repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
zhanghenry/stocks | django/conf/locale/sk/formats.py | 115 | 1173 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j. F Y G:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y G:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%y-%m-%d', # '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause |
blckshrk/Weboob | weboob/tools/value.py | 1 | 7675 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import re
from .ordereddict import OrderedDict
from .misc import to_unicode
__all__ = ['ValuesDict', 'Value', 'ValueBackendPassword', 'ValueInt', 'ValueFloat', 'ValueBool']
class ValuesDict(OrderedDict):
"""
Ordered dictionarry which can take values in constructor.
>>> ValuesDict(Value('a', label='Test'), ValueInt('b', label='Test2'))
"""
def __init__(self, *values):
OrderedDict.__init__(self)
for v in values:
self[v.id] = v
class Value(object):
"""
Value.
:param label: human readable description of a value
:type label: str
:param required: if ``True``, the backend can't load if the key isn't found in its configuration
:type required: bool
:param default: an optional default value, used when the key is not in config. If there is no default value and the key
is not found in configuration, the **required** parameter is implicitly set
:param masked: if ``True``, the value is masked. It is useful for applications to know if this key is a password
:type masked: bool
:param regexp: if specified, on load the specified value is checked against this regexp, and an error is raised if it doesn't match
:type regexp: str
:param choices: if this parameter is set, the value must be in the list
:param tiny: the value of choices can be entered by an user (as they are small)
:type choices: (list,dict)
"""
def __init__(self, *args, **kwargs):
if len(args) > 0:
self.id = args[0]
else:
self.id = ''
self.label = kwargs.get('label', kwargs.get('description', None))
self.description = kwargs.get('description', kwargs.get('label', None))
self.default = kwargs.get('default', None)
self.regexp = kwargs.get('regexp', None)
self.choices = kwargs.get('choices', None)
if isinstance(self.choices, (list, tuple)):
self.choices = dict(((v, v) for v in self.choices))
self.tiny = kwargs.get('tiny', None)
self.masked = kwargs.get('masked', False)
self.required = kwargs.get('required', self.default is None)
self._value = kwargs.get('value', None)
def check_valid(self, v):
"""
Check if the given value is valid.
:raises: ValueError
"""
if self.default is not None and v == self.default:
return
if v == '' and self.default != '':
raise ValueError('Value can\'t be empty')
if self.regexp is not None and not re.match(self.regexp, unicode(v)):
raise ValueError('Value "%s" does not match regexp "%s"' % (v, self.regexp))
if self.choices is not None and not v in self.choices.iterkeys():
raise ValueError('Value "%s" is not in list: %s' % (
v, ', '.join(unicode(s) for s in self.choices.iterkeys())))
def load(self, domain, v, callbacks):
"""
Load value.
:param domain: what is the domain of this value
:type domain: str
:param v: value to load
:param callbacks: list of weboob callbacks
:type callbacks: dict
"""
return self.set(v)
def set(self, v):
"""
Set a value.
"""
self.check_valid(v)
if isinstance(v, str):
v = to_unicode(v)
self._value = v
def dump(self):
"""
Dump value to be stored.
"""
return self.get()
def get(self):
"""
Get the value.
"""
return self._value
class ValueBackendPassword(Value):
_domain = None
_callbacks = {}
_stored = True
def __init__(self, *args, **kwargs):
kwargs['masked'] = kwargs.pop('masked', True)
self.noprompt = kwargs.pop('noprompt', False)
Value.__init__(self, *args, **kwargs)
def load(self, domain, password, callbacks):
self.check_valid(password)
self._domain = domain
self._value = to_unicode(password)
self._callbacks = callbacks
def check_valid(self, passwd):
if passwd == '':
# always allow empty passwords
return True
return Value.check_valid(self, passwd)
def set(self, passwd):
self.check_valid(passwd)
if passwd is None:
# no change
return
self._value = ''
if passwd == '':
return
if self._domain is None:
self._value = to_unicode(passwd)
return
try:
raise ImportError('Keyrings are disabled (see #706)')
import keyring
keyring.set_password(self._domain, self.id, passwd)
except Exception:
self._value = to_unicode(passwd)
else:
self._value = ''
def dump(self):
if self._stored:
return self._value
else:
return ''
def get(self):
if self._value != '' or self._domain is None:
return self._value
try:
raise ImportError('Keyrings are disabled (see #706)')
import keyring
except ImportError:
passwd = None
else:
passwd = keyring.get_password(self._domain, self.id)
if passwd is not None:
# Password has been read in the keyring.
return to_unicode(passwd)
# Prompt user to enter password by hand.
if not self.noprompt and 'login' in self._callbacks:
self._value = to_unicode(self._callbacks['login'](self._domain, self))
if self._value is None:
self._value = ''
else:
self._stored = False
return self._value
class ValueInt(Value):
def __init__(self, *args, **kwargs):
kwargs['regexp'] = '^\d+$'
Value.__init__(self, *args, **kwargs)
def get(self):
return int(self._value)
class ValueFloat(Value):
def __init__(self, *args, **kwargs):
kwargs['regexp'] = '^[\d\.]+$'
Value.__init__(self, *args, **kwargs)
def check_valid(self, v):
try:
float(v)
except ValueError:
raise ValueError('Value "%s" is not a float value')
def get(self):
return float(self._value)
class ValueBool(Value):
def __init__(self, *args, **kwargs):
kwargs['choices'] = {'y': 'True', 'n': 'False'}
Value.__init__(self, *args, **kwargs)
def check_valid(self, v):
if not isinstance(v, bool) and \
not unicode(v).lower() in ('y', 'yes', '1', 'true', 'on',
'n', 'no', '0', 'false', 'off'):
raise ValueError('Value "%s" is not a boolean (y/n)' % v)
def get(self):
return (isinstance(self._value, bool) and self._value) or \
unicode(self._value).lower() in ('y', 'yes', '1', 'true', 'on')
| agpl-3.0 |
seanwestfall/django | tests/contenttypes_tests/test_models.py | 249 | 12059 | from __future__ import unicode_literals
import warnings
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.views import shortcut
from django.contrib.sites.shortcuts import get_current_site
from django.db.utils import IntegrityError, OperationalError, ProgrammingError
from django.http import Http404, HttpRequest
from django.test import TestCase, mock, override_settings
from django.utils import six
from .models import (
ConcreteModel, FooWithBrokenAbsoluteUrl, FooWithoutUrl, FooWithUrl,
ProxyModel,
)
class ContentTypesTests(TestCase):
def setUp(self):
ContentType.objects.clear_cache()
def tearDown(self):
ContentType.objects.clear_cache()
def test_lookup_cache(self):
"""
Make sure that the content type cache (see ContentTypeManager)
works correctly. Lookups for a particular content type -- by model, ID
or natural key -- should hit the database only on the first lookup.
"""
# At this point, a lookup for a ContentType should hit the DB
with self.assertNumQueries(1):
ContentType.objects.get_for_model(ContentType)
# A second hit, though, won't hit the DB, nor will a lookup by ID
# or natural key
with self.assertNumQueries(0):
ct = ContentType.objects.get_for_model(ContentType)
with self.assertNumQueries(0):
ContentType.objects.get_for_id(ct.id)
with self.assertNumQueries(0):
ContentType.objects.get_by_natural_key('contenttypes',
'contenttype')
# Once we clear the cache, another lookup will again hit the DB
ContentType.objects.clear_cache()
with self.assertNumQueries(1):
ContentType.objects.get_for_model(ContentType)
# The same should happen with a lookup by natural key
ContentType.objects.clear_cache()
with self.assertNumQueries(1):
ContentType.objects.get_by_natural_key('contenttypes',
'contenttype')
# And a second hit shouldn't hit the DB
with self.assertNumQueries(0):
ContentType.objects.get_by_natural_key('contenttypes',
'contenttype')
def test_get_for_models_empty_cache(self):
# Empty cache.
with self.assertNumQueries(1):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
})
def test_get_for_models_partial_cache(self):
# Partial cache
ContentType.objects.get_for_model(ContentType)
with self.assertNumQueries(1):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
})
def test_get_for_models_full_cache(self):
# Full cache
ContentType.objects.get_for_model(ContentType)
ContentType.objects.get_for_model(FooWithUrl)
with self.assertNumQueries(0):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(cts, {
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
})
def test_get_for_concrete_model(self):
"""
Make sure the `for_concrete_model` kwarg correctly works
with concrete, proxy and deferred models
"""
concrete_model_ct = ContentType.objects.get_for_model(ConcreteModel)
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(ProxyModel))
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(ConcreteModel,
for_concrete_model=False))
proxy_model_ct = ContentType.objects.get_for_model(ProxyModel,
for_concrete_model=False)
self.assertNotEqual(concrete_model_ct, proxy_model_ct)
# Make sure deferred model are correctly handled
ConcreteModel.objects.create(name="Concrete")
DeferredConcreteModel = ConcreteModel.objects.only('pk').get().__class__
DeferredProxyModel = ProxyModel.objects.only('pk').get().__class__
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(DeferredConcreteModel))
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(DeferredConcreteModel,
for_concrete_model=False))
self.assertEqual(concrete_model_ct,
ContentType.objects.get_for_model(DeferredProxyModel))
self.assertEqual(proxy_model_ct,
ContentType.objects.get_for_model(DeferredProxyModel,
for_concrete_model=False))
def test_get_for_concrete_models(self):
"""
Make sure the `for_concrete_models` kwarg correctly works
with concrete, proxy and deferred models.
"""
concrete_model_ct = ContentType.objects.get_for_model(ConcreteModel)
cts = ContentType.objects.get_for_models(ConcreteModel, ProxyModel)
self.assertEqual(cts, {
ConcreteModel: concrete_model_ct,
ProxyModel: concrete_model_ct,
})
proxy_model_ct = ContentType.objects.get_for_model(ProxyModel,
for_concrete_model=False)
cts = ContentType.objects.get_for_models(ConcreteModel, ProxyModel,
for_concrete_models=False)
self.assertEqual(cts, {
ConcreteModel: concrete_model_ct,
ProxyModel: proxy_model_ct,
})
# Make sure deferred model are correctly handled
ConcreteModel.objects.create(name="Concrete")
DeferredConcreteModel = ConcreteModel.objects.only('pk').get().__class__
DeferredProxyModel = ProxyModel.objects.only('pk').get().__class__
cts = ContentType.objects.get_for_models(DeferredConcreteModel,
DeferredProxyModel)
self.assertEqual(cts, {
DeferredConcreteModel: concrete_model_ct,
DeferredProxyModel: concrete_model_ct,
})
cts = ContentType.objects.get_for_models(DeferredConcreteModel,
DeferredProxyModel,
for_concrete_models=False)
self.assertEqual(cts, {
DeferredConcreteModel: concrete_model_ct,
DeferredProxyModel: proxy_model_ct,
})
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_shortcut_view(self):
"""
Check that the shortcut view (used for the admin "view on site"
functionality) returns a complete URL regardless of whether the sites
framework is installed
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
user_ct = ContentType.objects.get_for_model(FooWithUrl)
obj = FooWithUrl.objects.create(name="john")
with self.modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'}):
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://%s/users/john/" % get_current_site(request).domain,
response._headers.get("location")[1])
with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}):
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://Example.com/users/john/",
response._headers.get("location")[1])
def test_shortcut_view_without_get_absolute_url(self):
"""
Check that the shortcut view (used for the admin "view on site"
functionality) returns 404 when get_absolute_url is not defined.
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
user_ct = ContentType.objects.get_for_model(FooWithoutUrl)
obj = FooWithoutUrl.objects.create(name="john")
self.assertRaises(Http404, shortcut, request, user_ct.id, obj.id)
def test_shortcut_view_with_broken_get_absolute_url(self):
"""
Check that the shortcut view does not catch an AttributeError raised
by the model's get_absolute_url method.
Refs #8997.
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
user_ct = ContentType.objects.get_for_model(FooWithBrokenAbsoluteUrl)
obj = FooWithBrokenAbsoluteUrl.objects.create(name="john")
self.assertRaises(AttributeError, shortcut, request, user_ct.id, obj.id)
def test_missing_model(self):
"""
Ensures that displaying content types in admin (or anywhere) doesn't
break on leftover content type records in the DB for which no model
is defined anymore.
"""
ct = ContentType.objects.create(
app_label='contenttypes',
model='OldModel',
)
self.assertEqual(six.text_type(ct), 'OldModel')
self.assertIsNone(ct.model_class())
# Make sure stale ContentTypes can be fetched like any other object.
# Before Django 1.6 this caused a NoneType error in the caching mechanism.
# Instead, just return the ContentType object and let the app detect stale states.
ct_fetched = ContentType.objects.get_for_id(ct.pk)
self.assertIsNone(ct_fetched.model_class())
def test_name_deprecation(self):
"""
ContentType.name has been removed. Test that a warning is emitted when
creating a ContentType with a `name`, but the creation should not fail.
"""
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
ContentType.objects.create(
name='Name',
app_label='contenttypes',
model='OldModel',
)
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns[0].message),
"ContentType.name field doesn't exist any longer. Please remove it from your code."
)
self.assertTrue(ContentType.objects.filter(model='OldModel').exists())
@mock.patch('django.contrib.contenttypes.models.ContentTypeManager.get_or_create')
@mock.patch('django.contrib.contenttypes.models.ContentTypeManager.get')
def test_message_if_get_for_model_fails(self, mocked_get, mocked_get_or_create):
"""
Check that `RuntimeError` with nice error message is raised if
`get_for_model` fails because of database errors.
"""
def _test_message(mocked_method):
for ExceptionClass in (IntegrityError, OperationalError, ProgrammingError):
mocked_method.side_effect = ExceptionClass
with self.assertRaisesMessage(
RuntimeError,
"Error creating new content types. Please make sure contenttypes "
"is migrated before trying to migrate apps individually."
):
ContentType.objects.get_for_model(ContentType)
_test_message(mocked_get)
mocked_get.side_effect = ContentType.DoesNotExist
_test_message(mocked_get_or_create)
| bsd-3-clause |
saukrIppl/seahub | seahub/base/accounts.py | 1 | 22247 | # encoding: utf-8
from django import forms
from django.core.mail import send_mail
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.contrib.sites.models import RequestSite
from django.contrib.sites.models import Site
from seahub.auth import login
from registration import signals
import seaserv
from seaserv import ccnet_threaded_rpc, unset_repo_passwd, is_passwd_set, \
seafile_api
from seahub.profile.models import Profile, DetailedProfile
from seahub.utils import is_valid_username, is_user_password_strong, \
clear_token, get_system_admins
from seahub.utils.mail import send_html_email_with_dj_template, MAIL_PRIORITY
try:
from seahub.settings import CLOUD_MODE
except ImportError:
CLOUD_MODE = False
try:
from seahub.settings import MULTI_TENANCY
except ImportError:
MULTI_TENANCY = False
from constance import config
UNUSABLE_PASSWORD = '!' # This will never be a valid hash
class UserManager(object):
def create_user(self, email, password=None, is_staff=False, is_active=False):
"""
Creates and saves a User with given username and password.
"""
# Lowercasing email address to avoid confusion.
email = email.lower()
user = User(email=email)
user.is_staff = is_staff
user.is_active = is_active
user.set_password(password)
user.save()
return self.get(email=email)
def update_role(self, email, role):
"""
If user has a role, update it; or create a role for user.
"""
ccnet_threaded_rpc.update_role_emailuser(email, role)
return self.get(email=email)
def create_superuser(self, email, password):
u = self.create_user(email, password, is_staff=True, is_active=True)
return u
def get_superusers(self):
"""Return a list of admins.
"""
emailusers = ccnet_threaded_rpc.get_superusers()
user_list = []
for e in emailusers:
user = User(e.email)
user.id = e.id
user.is_staff = e.is_staff
user.is_active = e.is_active
user.ctime = e.ctime
user_list.append(user)
return user_list
def get(self, email=None, id=None):
if not email and not id:
raise User.DoesNotExist, 'User matching query does not exits.'
if email:
emailuser = ccnet_threaded_rpc.get_emailuser(email)
if id:
emailuser = ccnet_threaded_rpc.get_emailuser_by_id(id)
if not emailuser:
raise User.DoesNotExist, 'User matching query does not exits.'
user = User(emailuser.email)
user.id = emailuser.id
user.enc_password = emailuser.password
user.is_staff = emailuser.is_staff
user.is_active = emailuser.is_active
user.ctime = emailuser.ctime
user.org = emailuser.org
user.source = emailuser.source
user.role = emailuser.role
return user
class UserPermissions(object):
def __init__(self, user):
self.user = user
def can_add_repo(self):
return True
def can_add_group(self):
return True
def can_generate_shared_link(self):
return True
def can_use_global_address_book(self):
return True
def can_view_org(self):
if MULTI_TENANCY:
return True if self.user.org is not None else False
return False if CLOUD_MODE else True
class User(object):
is_staff = False
is_active = False
is_superuser = False
groups = []
org = None
objects = UserManager()
class DoesNotExist(Exception):
pass
def __init__(self, email):
self.username = email
self.email = email
self.permissions = UserPermissions(self)
def __unicode__(self):
return self.username
def is_anonymous(self):
"""
Always returns False. This is a way of comparing User objects to
anonymous users.
"""
return False
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def save(self):
emailuser = ccnet_threaded_rpc.get_emailuser(self.username)
if emailuser:
if not hasattr(self, 'password'):
self.set_unusable_password()
if emailuser.source == "DB":
source = "DB"
else:
source = "LDAP"
result_code = ccnet_threaded_rpc.update_emailuser(source,
emailuser.id,
self.password,
int(self.is_staff),
int(self.is_active))
else:
result_code = ccnet_threaded_rpc.add_emailuser(self.username,
self.password,
int(self.is_staff),
int(self.is_active))
# -1 stands for failed; 0 stands for success
return result_code
def delete(self):
"""
When delete user, we should also delete group relationships.
"""
if self.source == "DB":
source = "DB"
else:
source = "LDAP"
owned_repos = []
orgs = ccnet_threaded_rpc.get_orgs_by_user(self.username)
if orgs:
for org in orgs:
owned_repos += seafile_api.get_org_owned_repo_list(org.org_id,
self.username)
else:
owned_repos += seafile_api.get_owned_repo_list(self.username)
for r in owned_repos:
seafile_api.remove_repo(r.id)
clear_token(self.username)
ccnet_threaded_rpc.remove_emailuser(source, self.username)
Profile.objects.delete_profile_by_user(self.username)
def get_and_delete_messages(self):
messages = []
return messages
def set_password(self, raw_password):
if raw_password is None:
self.set_unusable_password()
else:
self.password = '%s' % raw_password
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
"""
# Backwards-compatibility check. Older passwords won't include the
# algorithm or salt.
# if '$' not in self.password:
# is_correct = (self.password == \
# get_hexdigest('sha1', '', raw_password))
# return is_correct
return (ccnet_threaded_rpc.validate_emailuser(self.username, raw_password) == 0)
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = UNUSABLE_PASSWORD
def email_user(self, subject, message, from_email=None):
"Sends an e-mail to this User."
send_mail(subject, message, from_email, [self.email])
def freeze_user(self, notify_admins=False):
self.is_active = False
self.save()
if notify_admins:
admins = get_system_admins()
for u in admins:
# save current language
cur_language = translation.get_language()
# get and active user language
user_language = Profile.objects.get_user_language(u.email)
translation.activate(user_language)
send_html_email_with_dj_template(
u.email, dj_template='sysadmin/user_freeze_email.html',
subject=_('Account %(account)s froze on %(site)s.') % {
"account": self.email,
"site": settings.SITE_NAME,
},
context={'user': self.email},
priority=MAIL_PRIORITY.now
)
# restore current language
translation.activate(cur_language)
def remove_repo_passwds(self):
"""
Remove all repo decryption passwords stored on server.
"""
from seahub.utils import get_user_repos
owned_repos, shared_repos, groups_repos, public_repos = get_user_repos(self.email)
def has_repo(repos, repo):
for r in repos:
if repo.id == r.id:
return True
return False
passwd_setted_repos = []
for r in owned_repos + shared_repos + groups_repos + public_repos:
if not has_repo(passwd_setted_repos, r) and r.encrypted and \
is_passwd_set(r.id, self.email):
passwd_setted_repos.append(r)
for r in passwd_setted_repos:
unset_repo_passwd(r.id, self.email)
def remove_org_repo_passwds(self, org_id):
"""
Remove all org repo decryption passwords stored on server.
"""
from seahub.utils import get_user_repos
owned_repos, shared_repos, groups_repos, public_repos = get_user_repos(self.email, org_id=org_id)
def has_repo(repos, repo):
for r in repos:
if repo.id == r.id:
return True
return False
passwd_setted_repos = []
for r in owned_repos + shared_repos + groups_repos + public_repos:
if not has_repo(passwd_setted_repos, r) and r.encrypted and \
is_passwd_set(r.id, self.email):
passwd_setted_repos.append(r)
for r in passwd_setted_repos:
unset_repo_passwd(r.id, self.email)
class AuthBackend(object):
def get_user_with_import(self, username):
emailuser = seaserv.get_emailuser_with_import(username)
if not emailuser:
raise User.DoesNotExist, 'User matching query does not exits.'
user = User(emailuser.email)
user.id = emailuser.id
user.enc_password = emailuser.password
user.is_staff = emailuser.is_staff
user.is_active = emailuser.is_active
user.ctime = emailuser.ctime
user.org = emailuser.org
user.source = emailuser.source
user.role = emailuser.role
return user
def get_user(self, username):
try:
user = self.get_user_with_import(username)
except User.DoesNotExist:
user = None
return user
def authenticate(self, username=None, password=None):
user = self.get_user(username)
if not user:
return None
if user.check_password(password):
return user
########## Register related
class RegistrationBackend(object):
"""
A registration backend which follows a simple workflow:
1. User signs up, inactive account is created.
2. Email is sent to user with activation link.
3. User clicks activation link, account is now active.
Using this backend requires that
* ``registration`` be listed in the ``INSTALLED_APPS`` setting
(since this backend makes use of models defined in this
application).
* The setting ``ACCOUNT_ACTIVATION_DAYS`` be supplied, specifying
(as an integer) the number of days from registration during
which a user may activate their account (after that period
expires, activation will be disallowed).
* The creation of the templates
``registration/activation_email_subject.txt`` and
``registration/activation_email.txt``, which will be used for
the activation email. See the notes for this backends
``register`` method for details regarding these templates.
Additionally, registration can be temporarily closed by adding the
setting ``REGISTRATION_OPEN`` and setting it to
``False``. Omitting this setting, or setting it to ``True``, will
be interpreted as meaning that registration is currently open and
permitted.
Internally, this is accomplished via storing an activation key in
an instance of ``registration.models.RegistrationProfile``. See
that model and its custom manager for full documentation of its
fields and supported operations.
"""
def register(self, request, **kwargs):
"""
Given a username, email address and password, register a new
user account, which will initially be inactive.
Along with the new ``User`` object, a new
``registration.models.RegistrationProfile`` will be created,
tied to that ``User``, containing the activation key which
will be used for this account.
An email will be sent to the supplied email address; this
email should contain an activation link. The email will be
rendered using two templates. See the documentation for
``RegistrationProfile.send_activation_email()`` for
information about these templates and the contexts provided to
them.
After the ``User`` and ``RegistrationProfile`` are created and
the activation email is sent, the signal
``registration.signals.user_registered`` will be sent, with
the new ``User`` as the keyword argument ``user`` and the
class of this backend as the sender.
"""
email, password = kwargs['email'], kwargs['password1']
username = email
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
from registration.models import RegistrationProfile
if bool(config.ACTIVATE_AFTER_REGISTRATION) is True:
# since user will be activated after registration,
# so we will not use email sending, just create acitvated user
new_user = RegistrationProfile.objects.create_active_user(username, email,
password, site,
send_email=False)
# login the user
new_user.backend=settings.AUTHENTICATION_BACKENDS[0]
login(request, new_user)
else:
# create inactive user, user can be activated by admin, or through activated email
new_user = RegistrationProfile.objects.create_inactive_user(username, email,
password, site,
send_email=config.REGISTRATION_SEND_MAIL)
# userid = kwargs['userid']
# if userid:
# ccnet_threaded_rpc.add_binding(new_user.username, userid)
if settings.REQUIRE_DETAIL_ON_REGISTRATION:
name = kwargs.get('name', '')
department = kwargs.get('department', '')
telephone = kwargs.get('telephone', '')
note = kwargs.get('note', '')
Profile.objects.add_or_update(new_user.username, name, note)
DetailedProfile.objects.add_detailed_profile(new_user.username,
department,
telephone)
signals.user_registered.send(sender=self.__class__,
user=new_user,
request=request)
return new_user
def activate(self, request, activation_key):
"""
Given an an activation key, look up and activate the user
account corresponding to that key (if possible).
After successful activation, the signal
``registration.signals.user_activated`` will be sent, with the
newly activated ``User`` as the keyword argument ``user`` and
the class of this backend as the sender.
"""
from registration.models import RegistrationProfile
activated = RegistrationProfile.objects.activate_user(activation_key)
if activated:
signals.user_activated.send(sender=self.__class__,
user=activated,
request=request)
# login the user
activated.backend=settings.AUTHENTICATION_BACKENDS[0]
login(request, activated)
return activated
def registration_allowed(self, request):
"""
Indicate whether account registration is currently permitted,
based on the value of the setting ``REGISTRATION_OPEN``. This
is determined as follows:
* If ``REGISTRATION_OPEN`` is not specified in settings, or is
set to ``True``, registration is permitted.
* If ``REGISTRATION_OPEN`` is both specified and set to
``False``, registration is not permitted.
"""
return getattr(settings, 'REGISTRATION_OPEN', True)
def get_form_class(self, request):
"""
Return the default form class used for user registration.
"""
return RegistrationForm
def post_registration_redirect(self, request, user):
"""
Return the name of the URL to redirect to after successful
user registration.
"""
return ('registration_complete', (), {})
def post_activation_redirect(self, request, user):
"""
Return the name of the URL to redirect to after successful
account activation.
"""
return ('libraries', (), {})
class RegistrationForm(forms.Form):
"""
Form for registering a new user account.
Validates that the requested email is not already in use, and
requires the password to be entered twice to catch typos.
"""
attrs_dict = { 'class': 'input' }
email = forms.CharField(widget=forms.TextInput(attrs=dict(attrs_dict,
maxlength=75)),
label=_("Email address"))
userid = forms.RegexField(regex=r'^\w+$',
max_length=40,
required=False,
widget=forms.TextInput(),
label=_("Username"),
error_messages={ 'invalid': _("This value must be of length 40") })
password1 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_("Password"))
password2 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_("Password (again)"))
def clean_email(self):
email = self.cleaned_data['email']
if not is_valid_username(email):
raise forms.ValidationError(_("Enter a valid email address."))
emailuser = ccnet_threaded_rpc.get_emailuser(email)
if not emailuser:
return self.cleaned_data['email']
else:
raise forms.ValidationError(_("A user with this email already"))
def clean_userid(self):
if self.cleaned_data['userid'] and len(self.cleaned_data['userid']) != 40:
raise forms.ValidationError(_("Invalid user id."))
return self.cleaned_data['userid']
def clean_password1(self):
if 'password1' in self.cleaned_data:
pwd = self.cleaned_data['password1']
if bool(config.USER_STRONG_PASSWORD_REQUIRED) is True:
if bool(is_user_password_strong(pwd)) is True:
return pwd
else:
raise forms.ValidationError(
_(("%(pwd_len)s characters or more, include "
"%(num_types)s types or more of these: "
"letters(case sensitive), numbers, and symbols")) %
{'pwd_len': config.USER_PASSWORD_MIN_LENGTH,
'num_types': config.USER_PASSWORD_STRENGTH_LEVEL})
else:
return pwd
def clean_password2(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError(_("The two password fields didn't match."))
return self.cleaned_data
class DetailedRegistrationForm(RegistrationForm):
attrs_dict = { 'class': 'input' }
try:
from seahub.settings import REGISTRATION_DETAILS_MAP
except:
REGISTRATION_DETAILS_MAP = None
if REGISTRATION_DETAILS_MAP:
name_required = REGISTRATION_DETAILS_MAP.get('name', False)
dept_required = REGISTRATION_DETAILS_MAP.get('department', False)
tele_required = REGISTRATION_DETAILS_MAP.get('telephone', False)
note_required = REGISTRATION_DETAILS_MAP.get('note', False)
else:
# Backward compatible
name_required = dept_required = tele_required = note_required = True
name = forms.CharField(widget=forms.TextInput(
attrs=dict(attrs_dict, maxlength=64)), label=_("name"),
required=name_required)
department = forms.CharField(widget=forms.TextInput(
attrs=dict(attrs_dict, maxlength=512)), label=_("department"),
required=dept_required)
telephone = forms.CharField(widget=forms.TextInput(
attrs=dict(attrs_dict, maxlength=100)), label=_("telephone"),
required=tele_required)
note = forms.CharField(widget=forms.TextInput(
attrs=dict(attrs_dict, maxlength=100)), label=_("note"),
required=note_required)
| apache-2.0 |
jredrejo/web2pyreactpoc | languages/zh.py | 152 | 10080 | # coding: utf8
{
'!langcode!': 'zh-tw',
'!langname!': '中文',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"更新" 是選擇性的條件式, 格式就像 "欄位1=\'值\'". 但是 JOIN 的資料不可以使用 update 或是 delete"',
'%s %%{row} deleted': '已刪除 %s 筆',
'%s %%{row} updated': '已更新 %s 筆',
'%s selected': '%s 已選擇',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'(something like "it-it")': '(格式類似 "zh-tw")',
'A new version of web2py is available': '新版的 web2py 已發行',
'A new version of web2py is available: %s': '新版的 web2py 已發行: %s',
'about': '關於',
'About': '關於',
'About application': '關於本應用程式',
'Access Control': 'Access Control',
'Admin is disabled because insecure channel': '管理功能(Admin)在不安全連線環境下自動關閉',
'Admin is disabled because unsecure channel': '管理功能(Admin)在不安全連線環境下自動關閉',
'Administrative Interface': 'Administrative Interface',
'Administrative interface': '點此處進入管理介面',
'Administrator Password:': '管理員密碼:',
'Ajax Recipes': 'Ajax Recipes',
'appadmin is disabled because insecure channel': '因為來自非安全通道,管理介面關閉',
'Are you sure you want to delete file "%s"?': '確定要刪除檔案"%s"?',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Are you sure you want to uninstall application "%s"': '確定要移除應用程式 "%s"',
'Are you sure you want to uninstall application "%s"?': '確定要移除應用程式 "%s"',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': '注意: 登入管理帳號需要安全連線(HTTPS)或是在本機連線(localhost).',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': '注意: 因為在測試模式不保證多執行緒安全性,也就是說不可以同時執行多個測試案例',
'ATTENTION: you cannot edit the running application!': '注意:不可編輯正在執行的應用程式!',
'Authentication': '驗證',
'Available Databases and Tables': '可提供的資料庫和資料表',
'Buy this book': 'Buy this book',
'cache': '快取記憶體',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': '不可空白',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': '無法編譯:應用程式中含有錯誤,請除錯後再試一次.',
'Change Password': '變更密碼',
'change password': '變更密碼',
'Check to delete': '打勾代表刪除',
'Check to delete:': '點選以示刪除:',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Client IP': '客戶端網址(IP)',
'Community': 'Community',
'Components and Plugins': 'Components and Plugins',
'Controller': '控件',
'Controllers': '控件',
'Copyright': '版權所有',
'Create new application': '創建應用程式',
'Current request': '目前網路資料要求(request)',
'Current response': '目前網路資料回應(response)',
'Current session': '目前網路連線資訊(session)',
'customize me!': '請調整我!',
'data uploaded': '資料已上傳',
'Database': '資料庫',
'Database %s select': '已選擇 %s 資料庫',
'Date and Time': '日期和時間',
'db': 'db',
'DB Model': '資料庫模組',
'Delete': '刪除',
'Delete:': '刪除:',
'Demo': 'Demo',
'Deploy on Google App Engine': '配置到 Google App Engine',
'Deployment Recipes': 'Deployment Recipes',
'Description': '描述',
'DESIGN': '設計',
'design': '設計',
'Design for': '設計為了',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentation',
"Don't know what to do?": "Don't know what to do?",
'done!': '完成!',
'Download': 'Download',
'E-mail': '電子郵件',
'EDIT': '編輯',
'Edit': '編輯',
'Edit application': '編輯應用程式',
'Edit current record': '編輯當前紀錄',
'edit profile': '編輯設定檔',
'Edit Profile': '編輯設定檔',
'Edit This App': '編輯本應用程式',
'Editing file': '編輯檔案',
'Editing file "%s"': '編輯檔案"%s"',
'Email and SMS': 'Email and SMS',
'Error logs for "%(app)s"': '"%(app)s"的錯誤紀錄',
'Errors': 'Errors',
'export as csv file': '以逗號分隔檔(csv)格式匯出',
'FAQ': 'FAQ',
'First name': '名',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Functions with no doctests will result in [passed] tests.': '沒有 doctests 的函式會顯示 [passed].',
'Group ID': '群組編號',
'Groups': 'Groups',
'Hello World': '嗨! 世界',
'Home': 'Home',
'How did you get here?': 'How did you get here?',
'import': 'import',
'Import/Export': '匯入/匯出',
'Index': '索引',
'insert new': '插入新資料',
'insert new %s': '插入新資料 %s',
'Installed applications': '已安裝應用程式',
'Internal State': '內部狀態',
'Introduction': 'Introduction',
'Invalid action': '不合法的動作(action)',
'Invalid email': '不合法的電子郵件',
'Invalid Query': '不合法的查詢',
'invalid request': '不合法的網路要求(request)',
'Key': 'Key',
'Language files (static strings) updated': '語言檔已更新',
'Languages': '各國語言',
'Last name': '姓',
'Last saved on:': '最後儲存時間:',
'Layout': '網頁配置',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'License for': '軟體版權為',
'Live Chat': 'Live Chat',
'login': '登入',
'Login': '登入',
'Login to the Administrative Interface': '登入到管理員介面',
'logout': '登出',
'Logout': '登出',
'Lost Password': '密碼遺忘',
'Main Menu': '主選單',
'Manage Cache': 'Manage Cache',
'Menu Model': '選單模組(menu)',
'Models': '資料模組',
'Modules': '程式模組',
'My Sites': 'My Sites',
'Name': '名字',
'New Record': '新紀錄',
'new record inserted': '已插入新紀錄',
'next 100 rows': '往後 100 筆',
'NO': '否',
'No databases in this application': '這應用程式不含資料庫',
'Online examples': '點此處進入線上範例',
'or import from csv file': '或是從逗號分隔檔(CSV)匯入',
'Origin': '原文',
'Original/Translation': '原文/翻譯',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': 'Overview',
'Password': '密碼',
"Password fields don't match": '密碼欄不匹配',
'Peeking at file': '選擇檔案',
'Plugins': 'Plugins',
'Powered by': '基於以下技術構建:',
'Preface': 'Preface',
'previous 100 rows': '往前 100 筆',
'Python': 'Python',
'Query:': '查詢:',
'Quick Examples': 'Quick Examples',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Recipes',
'Record': '紀錄',
'record does not exist': '紀錄不存在',
'Record ID': '紀錄編號',
'Record id': '紀錄編號',
'Register': '註冊',
'register': '註冊',
'Registration key': '註冊金鑰',
'Remember me (for 30 days)': '記住我(30 天)',
'Reset Password key': '重設密碼',
'Resolve Conflict file': '解決衝突檔案',
'Role': '角色',
'Rows in Table': '在資料表裏的資料',
'Rows selected': '筆資料被選擇',
'Saved file hash:': '檔案雜湊值已紀錄:',
'Semantic': 'Semantic',
'Services': 'Services',
'Size of cache:': 'Size of cache:',
'state': '狀態',
'Static files': '靜態檔案',
'Statistics': 'Statistics',
'Stylesheet': '網頁風格檔',
'submit': 'submit',
'Submit': '傳送',
'Support': 'Support',
'Sure you want to delete this object?': '確定要刪除此物件?',
'Table': '資料表',
'Table name': '資料表名稱',
'Testing application': '測試中的應用程式',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"查詢"是一個像 "db.表1.欄位1==\'值\'" 的條件式. 以"db.表1.欄位1==db.表2.欄位2"方式則相當於執行 JOIN SQL.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s',
'The Views': 'The Views',
'There are no controllers': '沒有控件(controllers)',
'There are no models': '沒有資料庫模組(models)',
'There are no modules': '沒有程式模組(modules)',
'There are no static files': '沒有靜態檔案',
'There are no translators, only default language is supported': '沒有翻譯檔,只支援原始語言',
'There are no views': '沒有視圖',
'This App': 'This App',
'This is the %(filename)s template': '這是%(filename)s檔案的樣板(template)',
'Ticket': '問題單',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': '時間標記',
'Twitter': 'Twitter',
'Unable to check for upgrades': '無法做升級檢查',
'Unable to download': '無法下載',
'Unable to download app': '無法下載應用程式',
'unable to parse csv file': '無法解析逗號分隔檔(csv)',
'Update:': '更新:',
'Upload existing application': '更新存在的應用程式',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': '使用下列方式來組合更複雜的條件式, (...)&(...) 代表同時存在的條件, (...)|(...) 代表擇一的條件, ~(...)則代表反向條件.',
'User %(id)s Logged-in': '使用者 %(id)s 已登入',
'User %(id)s Registered': '使用者 %(id)s 已註冊',
'User ID': '使用者編號',
'Verify Password': '驗證密碼',
'Videos': 'Videos',
'View': '視圖',
'Views': '視圖',
'Welcome %s': '歡迎 %s',
'Welcome to web2py': '歡迎使用 web2py',
'Welcome to web2py!': 'Welcome to web2py!',
'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s',
'YES': '是',
'You are successfully running web2py': 'You are successfully running web2py',
'You can modify this application and adapt it to your needs': 'You can modify this application and adapt it to your needs',
'You visited the url %s': 'You visited the url %s',
}
| gpl-3.0 |
wshallum/ansible | lib/ansible/compat/tests/unittest.py | 375 | 1147 | # (c) 2014, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
Compat module for Python2.7's unittest module
'''
import sys
# Python 2.6
if sys.version_info < (2, 7):
try:
# Need unittest2 on python2.6
from unittest2 import *
except ImportError:
print('You need unittest2 installed on python2.6.x to run tests')
else:
from unittest import *
| gpl-3.0 |
gohin/django | django/utils/regex_helper.py | 432 | 12673 | """
Functions for reversing a regular expression (used in reverse URL resolving).
Used internally by Django and not intended for external use.
This is not, and is not intended to be, a complete reg-exp decompiler. It
should be good enough for a large class of URLS, however.
"""
from __future__ import unicode_literals
from django.utils import six
from django.utils.six.moves import zip
# Mapping of an escape character to a representative of that class. So, e.g.,
# "\w" is replaced by "x" in a reverse URL. A value of None means to ignore
# this sequence. Any missing key is mapped to itself.
ESCAPE_MAPPINGS = {
"A": None,
"b": None,
"B": None,
"d": "0",
"D": "x",
"s": " ",
"S": "x",
"w": "x",
"W": "!",
"Z": None,
}
class Choice(list):
"""
Used to represent multiple possibilities at this point in a pattern string.
We use a distinguished type, rather than a list, so that the usage in the
code is clear.
"""
class Group(list):
"""
Used to represent a capturing group in the pattern string.
"""
class NonCapture(list):
"""
Used to represent a non-capturing group in the pattern string.
"""
def normalize(pattern):
"""
Given a reg-exp pattern, normalizes it to an iterable of forms that
suffice for reverse matching. This does the following:
(1) For any repeating sections, keeps the minimum number of occurrences
permitted (this means zero for optional groups).
(2) If an optional group includes parameters, include one occurrence of
that group (along with the zero occurrence case from step (1)).
(3) Select the first (essentially an arbitrary) element from any character
class. Select an arbitrary character for any unordered class (e.g. '.'
or '\w') in the pattern.
(4) Ignore comments, look-ahead and look-behind assertions, and any of the
reg-exp flags that won't change what we construct ("iLmsu"). "(?x)" is
an error, however.
(5) Raise an error on any disjunctive ('|') constructs.
Django's URLs for forward resolving are either all positional arguments or
all keyword arguments. That is assumed here, as well. Although reverse
resolving can be done using positional args when keyword args are
specified, the two cannot be mixed in the same reverse() call.
"""
# Do a linear scan to work out the special features of this pattern. The
# idea is that we scan once here and collect all the information we need to
# make future decisions.
result = []
non_capturing_groups = []
consume_next = True
pattern_iter = next_char(iter(pattern))
num_args = 0
# A "while" loop is used here because later on we need to be able to peek
# at the next character and possibly go around without consuming another
# one at the top of the loop.
try:
ch, escaped = next(pattern_iter)
except StopIteration:
return [('', [])]
try:
while True:
if escaped:
result.append(ch)
elif ch == '.':
# Replace "any character" with an arbitrary representative.
result.append(".")
elif ch == '|':
# FIXME: One day we'll should do this, but not in 1.0.
raise NotImplementedError('Awaiting Implementation')
elif ch == "^":
pass
elif ch == '$':
break
elif ch == ')':
# This can only be the end of a non-capturing group, since all
# other unescaped parentheses are handled by the grouping
# section later (and the full group is handled there).
#
# We regroup everything inside the capturing group so that it
# can be quantified, if necessary.
start = non_capturing_groups.pop()
inner = NonCapture(result[start:])
result = result[:start] + [inner]
elif ch == '[':
# Replace ranges with the first character in the range.
ch, escaped = next(pattern_iter)
result.append(ch)
ch, escaped = next(pattern_iter)
while escaped or ch != ']':
ch, escaped = next(pattern_iter)
elif ch == '(':
# Some kind of group.
ch, escaped = next(pattern_iter)
if ch != '?' or escaped:
# A positional group
name = "_%d" % num_args
num_args += 1
result.append(Group((("%%(%s)s" % name), name)))
walk_to_end(ch, pattern_iter)
else:
ch, escaped = next(pattern_iter)
if ch in "iLmsu#!=<":
# All of these are ignorable. Walk to the end of the
# group.
walk_to_end(ch, pattern_iter)
elif ch == ':':
# Non-capturing group
non_capturing_groups.append(len(result))
elif ch != 'P':
# Anything else, other than a named group, is something
# we cannot reverse.
raise ValueError("Non-reversible reg-exp portion: '(?%s'" % ch)
else:
ch, escaped = next(pattern_iter)
if ch not in ('<', '='):
raise ValueError("Non-reversible reg-exp portion: '(?P%s'" % ch)
# We are in a named capturing group. Extra the name and
# then skip to the end.
if ch == '<':
terminal_char = '>'
# We are in a named backreference.
else:
terminal_char = ')'
name = []
ch, escaped = next(pattern_iter)
while ch != terminal_char:
name.append(ch)
ch, escaped = next(pattern_iter)
param = ''.join(name)
# Named backreferences have already consumed the
# parenthesis.
if terminal_char != ')':
result.append(Group((("%%(%s)s" % param), param)))
walk_to_end(ch, pattern_iter)
else:
result.append(Group((("%%(%s)s" % param), None)))
elif ch in "*?+{":
# Quantifiers affect the previous item in the result list.
count, ch = get_quantifier(ch, pattern_iter)
if ch:
# We had to look ahead, but it wasn't need to compute the
# quantifier, so use this character next time around the
# main loop.
consume_next = False
if count == 0:
if contains(result[-1], Group):
# If we are quantifying a capturing group (or
# something containing such a group) and the minimum is
# zero, we must also handle the case of one occurrence
# being present. All the quantifiers (except {0,0},
# which we conveniently ignore) that have a 0 minimum
# also allow a single occurrence.
result[-1] = Choice([None, result[-1]])
else:
result.pop()
elif count > 1:
result.extend([result[-1]] * (count - 1))
else:
# Anything else is a literal.
result.append(ch)
if consume_next:
ch, escaped = next(pattern_iter)
else:
consume_next = True
except StopIteration:
pass
except NotImplementedError:
# A case of using the disjunctive form. No results for you!
return [('', [])]
return list(zip(*flatten_result(result)))
def next_char(input_iter):
"""
An iterator that yields the next character from "pattern_iter", respecting
escape sequences. An escaped character is replaced by a representative of
its class (e.g. \w -> "x"). If the escaped character is one that is
skipped, it is not returned (the next character is returned instead).
Yields the next character, along with a boolean indicating whether it is a
raw (unescaped) character or not.
"""
for ch in input_iter:
if ch != '\\':
yield ch, False
continue
ch = next(input_iter)
representative = ESCAPE_MAPPINGS.get(ch, ch)
if representative is None:
continue
yield representative, True
def walk_to_end(ch, input_iter):
"""
The iterator is currently inside a capturing group. We want to walk to the
close of this group, skipping over any nested groups and handling escaped
parentheses correctly.
"""
if ch == '(':
nesting = 1
else:
nesting = 0
for ch, escaped in input_iter:
if escaped:
continue
elif ch == '(':
nesting += 1
elif ch == ')':
if not nesting:
return
nesting -= 1
def get_quantifier(ch, input_iter):
"""
Parse a quantifier from the input, where "ch" is the first character in the
quantifier.
Returns the minimum number of occurrences permitted by the quantifier and
either None or the next character from the input_iter if the next character
is not part of the quantifier.
"""
if ch in '*?+':
try:
ch2, escaped = next(input_iter)
except StopIteration:
ch2 = None
if ch2 == '?':
ch2 = None
if ch == '+':
return 1, ch2
return 0, ch2
quant = []
while ch != '}':
ch, escaped = next(input_iter)
quant.append(ch)
quant = quant[:-1]
values = ''.join(quant).split(',')
# Consume the trailing '?', if necessary.
try:
ch, escaped = next(input_iter)
except StopIteration:
ch = None
if ch == '?':
ch = None
return int(values[0]), ch
def contains(source, inst):
"""
Returns True if the "source" contains an instance of "inst". False,
otherwise.
"""
if isinstance(source, inst):
return True
if isinstance(source, NonCapture):
for elt in source:
if contains(elt, inst):
return True
return False
def flatten_result(source):
"""
Turns the given source sequence into a list of reg-exp possibilities and
their arguments. Returns a list of strings and a list of argument lists.
Each of the two lists will be of the same length.
"""
if source is None:
return [''], [[]]
if isinstance(source, Group):
if source[1] is None:
params = []
else:
params = [source[1]]
return [source[0]], [params]
result = ['']
result_args = [[]]
pos = last = 0
for pos, elt in enumerate(source):
if isinstance(elt, six.string_types):
continue
piece = ''.join(source[last:pos])
if isinstance(elt, Group):
piece += elt[0]
param = elt[1]
else:
param = None
last = pos + 1
for i in range(len(result)):
result[i] += piece
if param:
result_args[i].append(param)
if isinstance(elt, (Choice, NonCapture)):
if isinstance(elt, NonCapture):
elt = [elt]
inner_result, inner_args = [], []
for item in elt:
res, args = flatten_result(item)
inner_result.extend(res)
inner_args.extend(args)
new_result = []
new_args = []
for item, args in zip(result, result_args):
for i_item, i_args in zip(inner_result, inner_args):
new_result.append(item + i_item)
new_args.append(args[:] + i_args)
result = new_result
result_args = new_args
if pos >= last:
piece = ''.join(source[last:])
for i in range(len(result)):
result[i] += piece
return result, result_args
| bsd-3-clause |
mozilla/treeherder | treeherder/etl/taskcluster_pulse/parse_route.py | 2 | 1512 | # Code imported from https://github.com/taskcluster/taskcluster/blob/32629c562f8d6f5a6b608a3141a8ee2e0984619f/services/treeherder/src/util/route_parser.js
# A Taskcluster routing key will be in the form:
# treeherder.<version>.<user/project>|<project>.<revision>.<pushLogId/pullRequestId>
# [0] Routing key prefix used for listening to only treeherder relevant messages
# [1] Routing key version
# [2] In the form of user/project for github repos and just project for hg.mozilla.org
# [3] Top level revision for the push
# [4] Pull Request ID (github) or Push Log ID (hg.mozilla.org) of the push
# Note: pushes on a branch on Github would not have a PR ID
# Function extracted from
# https://github.com/taskcluster/taskcluster/blob/32629c562f8d6f5a6b608a3141a8ee2e0984619f/services/treeherder/src/util/route_parser.js
def parseRoute(route):
id = None
owner = None
parsedProject = None
parsedRoute = route.split('.')
project = parsedRoute[2]
if len(project.split('/')) == 2:
[owner, parsedProject] = project.split('/')
else:
parsedProject = project
if len(parsedRoute) == 5:
id = parsedRoute[4]
pushInfo = {
"destination": parsedRoute[0],
"id": int(id) if id else 0,
"project": parsedProject,
"revision": parsedRoute[3],
}
if owner and parsedProject:
pushInfo["owner"] = owner
pushInfo["origin"] = 'github.com'
else:
pushInfo["origin"] = 'hg.mozilla.org'
return pushInfo
| mpl-2.0 |
jdreaver/vispy | vispy/visuals/shaders/compiler.py | 20 | 7684 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import re
from ... import gloo
class Compiler(object):
"""
Compiler is used to convert Function and Variable instances into
ready-to-use GLSL code. This class handles name mangling to ensure that
there are no name collisions amongst global objects. The final name of
each object may be retrieved using ``Compiler.__getitem__(obj)``.
Accepts multiple root Functions as keyword arguments. ``compile()`` then
returns a dict of GLSL strings with the same keys.
Example::
# initialize with two main functions
compiler = Compiler(vert=v_func, frag=f_func)
# compile and extract shaders
code = compiler.compile()
v_code = code['vert']
f_code = code['frag']
# look up name of some object
name = compiler[obj]
"""
def __init__(self, namespace=None, **shaders):
# cache of compilation results for each function and variable
if namespace is None:
namespace = {}
self._object_names = namespace # {object: name}
self.shaders = shaders
def __getitem__(self, item):
"""
Return the name of the specified object, if it has been assigned one.
"""
return self._object_names[item]
def compile(self, pretty=True):
""" Compile all code and return a dict {name: code} where the keys
are determined by the keyword arguments passed to __init__().
Parameters
----------
pretty : bool
If True, use a slower method to mangle object names. This produces
GLSL that is more readable.
If False, then the output is mostly unreadable GLSL, but is about
10x faster to compile.
"""
# Authoritative mapping of {obj: name}
self._object_names = {}
#
# 1. collect list of dependencies for each shader
#
# maps {shader_name: [deps]}
self._shader_deps = {}
for shader_name, shader in self.shaders.items():
this_shader_deps = []
self._shader_deps[shader_name] = this_shader_deps
dep_set = set()
for dep in shader.dependencies(sort=True):
# visit each object no more than once per shader
if dep.name is None or dep in dep_set:
continue
this_shader_deps.append(dep)
dep_set.add(dep)
#
# 2. Assign names to all objects.
#
if pretty:
self._rename_objects_pretty()
else:
self._rename_objects_fast()
#
# 3. Now we have a complete namespace; concatenate all definitions
# together in topological order.
#
compiled = {}
obj_names = self._object_names
for shader_name, shader in self.shaders.items():
code = []
for dep in self._shader_deps[shader_name]:
dep_code = dep.definition(obj_names)
if dep_code is not None:
# strip out version pragma if present;
regex = r'#version (\d+)'
m = re.search(regex, dep_code)
if m is not None:
# check requested version
if m.group(1) != '120':
raise RuntimeError("Currently only GLSL #version "
"120 is supported.")
dep_code = re.sub(regex, '', dep_code)
code.append(dep_code)
compiled[shader_name] = '\n'.join(code)
self.code = compiled
return compiled
def _rename_objects_fast(self):
""" Rename all objects quickly to guaranteed-unique names using the
id() of each object.
This produces mostly unreadable GLSL, but is about 10x faster to
compile.
"""
for shader_name, deps in self._shader_deps.items():
for dep in deps:
name = dep.name
if name != 'main':
ext = '_%x' % id(dep)
name = name[:32-len(ext)] + ext
self._object_names[dep] = name
def _rename_objects_pretty(self):
""" Rename all objects like "name_1" to avoid conflicts. Objects are
only renamed if necessary.
This method produces more readable GLSL, but is rather slow.
"""
#
# 1. For each object, add its static names to the global namespace
# and make a list of the shaders used by the object.
#
# {name: obj} mapping for finding unique names
# initialize with reserved keywords.
self._global_ns = dict([(kwd, None) for kwd in gloo.util.KEYWORDS])
# functions are local per-shader
self._shader_ns = dict([(shader, {}) for shader in self.shaders])
# for each object, keep a list of shaders the object appears in
obj_shaders = {}
for shader_name, deps in self._shader_deps.items():
for dep in deps:
# Add static names to namespace
for name in dep.static_names():
self._global_ns[name] = None
obj_shaders.setdefault(dep, []).append(shader_name)
#
# 2. Assign new object names
#
name_index = {}
for obj, shaders in obj_shaders.items():
name = obj.name
if self._name_available(obj, name, shaders):
# hooray, we get to keep this name
self._assign_name(obj, name, shaders)
else:
# boo, find a new name
while True:
index = name_index.get(name, 0) + 1
name_index[name] = index
ext = '_%d' % index
new_name = name[:32-len(ext)] + ext
if self._name_available(obj, new_name, shaders):
self._assign_name(obj, new_name, shaders)
break
def _is_global(self, obj):
""" Return True if *obj* should be declared in the global namespace.
Some objects need to be declared only in per-shader namespaces:
functions, static variables, and const variables may all be given
different definitions in each shader.
"""
# todo: right now we assume all Variables are global, and all
# Functions are local. Is this actually correct? Are there any
# global functions? Are there any local variables?
from .variable import Variable
return isinstance(obj, Variable)
def _name_available(self, obj, name, shaders):
""" Return True if *name* is available for *obj* in *shaders*.
"""
if name in self._global_ns:
return False
shaders = self.shaders if self._is_global(obj) else shaders
for shader in shaders:
if name in self._shader_ns[shader]:
return False
return True
def _assign_name(self, obj, name, shaders):
""" Assign *name* to *obj* in *shaders*.
"""
if self._is_global(obj):
assert name not in self._global_ns
self._global_ns[name] = obj
else:
for shader in shaders:
ns = self._shader_ns[shader]
assert name not in ns
ns[name] = obj
self._object_names[obj] = name
| bsd-3-clause |
RDXT/django-guardian | guardian/south_migrations/0002_auto__add_field_groupobjectpermission_object_pk__add_field_userobjectp.py | 85 | 5650 | # encoding: utf-8
from south.db import db
from south.v2 import SchemaMigration
from guardian.compat import user_model_label
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GroupObjectPermission.object_pk'
db.add_column('guardian_groupobjectpermission', 'object_pk', self.gf('django.db.models.fields.TextField')(default=''), keep_default=False)
# Adding field 'UserObjectPermission.object_pk'
db.add_column('guardian_userobjectpermission', 'object_pk', self.gf('django.db.models.fields.TextField')(default=''), keep_default=False)
def backwards(self, orm):
# Deleting field 'GroupObjectPermission.object_pk'
db.delete_column('guardian_groupobjectpermission', 'object_pk')
# Deleting field 'UserObjectPermission.object_pk'
db.delete_column('guardian_userobjectpermission', 'object_pk')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': user_model_label.split('.')[-1]},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'guardian.groupobjectpermission': {
'Meta': {'unique_together': "(['group', 'permission', 'content_type', 'object_id'],)", 'object_name': 'GroupObjectPermission'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'object_pk': ('django.db.models.fields.TextField', [], {'default': "''"}),
'permission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Permission']"})
},
'guardian.userobjectpermission': {
'Meta': {'unique_together': "(['user', 'permission', 'content_type', 'object_id'],)", 'object_name': 'UserObjectPermission'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'object_pk': ('django.db.models.fields.TextField', [], {'default': "''"}),
'permission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Permission']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_model_label})
}
}
complete_apps = ['guardian']
| bsd-2-clause |
ed-/solum | solum/tests/deployer/handlers/test_noop.py | 1 | 1963 | # Copyright 2014 - Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from solum.deployer.handlers import noop as noop_handler
from solum.openstack.common.gettextutils import _
from solum.tests import base
from solum.tests import fakes
from solum.tests import utils
class HandlerTest(base.BaseTestCase):
def setUp(self):
super(HandlerTest, self).setUp()
self.ctx = utils.dummy_context()
@mock.patch('solum.deployer.handlers.noop.LOG')
def test_echo(self, fake_LOG):
noop_handler.Handler().echo({}, 'foo')
fake_LOG.debug.assert_called_once_with(_('%s') % 'foo')
@mock.patch('solum.deployer.handlers.noop.LOG')
def test_deploy(self, fake_LOG):
args = [77, 'created_image_id']
noop_handler.Handler().deploy(self.ctx, *args)
message = 'Deploy %s %s' % tuple(args)
fake_LOG.debug.assert_called_once_with(_("%s") % message)
@mock.patch('solum.objects.registry')
@mock.patch('solum.deployer.handlers.noop.LOG')
def test_destroy(self, fake_LOG, fake_registry):
fake_assembly = fakes.FakeAssembly()
fake_registry.Assembly.get_by_id.return_value = fake_assembly
args = [fake_assembly.id]
noop_handler.Handler().destroy(self.ctx, *args)
fake_assembly.destroy.assert_called_once_with(self.ctx)
message = 'Destroy %s' % tuple(args)
fake_LOG.debug.assert_called_once_with(_("%s") % message)
| apache-2.0 |
jessie935513/omaha | plugins/update/generate_plugin_idls.py | 67 | 3325 | #!/usr/bin/python2.4
#
# Copyright 2007-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""
Generates IDL file for the OneClick ActiveX control from the passed-in IDL
template. The input template is a complete IDL file in all but one respect;
It has one replaceable entry for the CLSID for GoopdateOneClickControl.
We generate a GUID using UUIDGEN.EXE, and write out an IDL with a new CLSID.
"""
import sys
import os
import getopt
import commands
def _GetStatusOutput(cmd):
"""Return (status, output) of executing cmd in a shell."""
if os.name == "nt":
pipe = os.popen(cmd + " 2>&1", 'r')
text = pipe.read()
sts = pipe.close()
if sts is None: sts = 0
if text[-1:] == '\n': text = text[:-1]
return sts, text
else:
return commands.getstatusoutput(cmd)
def _GenerateIDLText(idl_template):
(status, guid) = _GetStatusOutput("uuidgen.exe")
if status != 0:
raise SystemExit("Failed to get GUID: %s" % guid)
return idl_template % guid
def _GenerateIDLFile(idl_template_filename, idl_output_filename):
f_in = open(idl_template_filename, 'r')
idl_template = f_in.read()
f_in.close()
idl_output = _GenerateIDLText(idl_template)
f_out = open(idl_output_filename, 'w')
f_out.write("""
// ** AUTOGENERATED FILE. DO NOT HAND-EDIT **
""")
f_out.write(idl_output)
f_out.close()
def _Usage():
"""Prints out script usage information."""
print """
generate_oneclick_idl.py: Write out the given IDL file.
Usage:
generate_oneclick_idl.py [--help
| --idl_template_file filename
--idl_output_file filename]
Options:
--help Show this information.
--idl_output_file filename Path/name of output IDL filename.
--idl_template_file filename Path/name of input IDL template.
"""
def _Main():
"""Generates IDL file."""
# use getopt to parse the option and argument list; this may raise, but
# don't catch it
_ARGUMENT_LIST = ["help", "idl_template_file=", "idl_output_file="]
(opts, args) = getopt.getopt(sys.argv[1:], "", _ARGUMENT_LIST)
if not opts or ("--help", "") in opts:
_Usage()
sys.exit()
idl_template_filename = ""
idl_output_filename = ""
for (o, v) in opts:
if o == "--idl_template_file":
idl_template_filename = v
if o == "--idl_output_file":
idl_output_filename = v
# make sure we have work to do
if not idl_template_filename:
raise SystemExit("no idl_template_filename specified")
if not idl_output_filename:
raise SystemExit("no idl_output_filename specified")
_GenerateIDLFile(idl_template_filename, idl_output_filename)
sys.exit()
if __name__ == "__main__":
_Main()
| apache-2.0 |
imsparsh/python-for-android | python3-alpha/extra_modules/bs4/tests/test_tree.py | 46 | 59853 | # -*- coding: utf-8 -*-
"""Tests for Beautiful Soup's tree traversal methods.
The tree traversal methods are the main advantage of using Beautiful
Soup over just using a parser.
Different parsers will build different Beautiful Soup trees given the
same markup, but all Beautiful Soup trees can be traversed with the
methods tested here.
"""
import copy
import pickle
import re
import warnings
from bs4 import BeautifulSoup
from bs4.builder import (
builder_registry,
HTMLParserTreeBuilder,
)
from bs4.element import (
CData,
Doctype,
NavigableString,
SoupStrainer,
Tag,
)
from bs4.testing import (
SoupTest,
skipIf,
)
XML_BUILDER_PRESENT = (builder_registry.lookup("xml") is not None)
LXML_PRESENT = (builder_registry.lookup("lxml") is not None)
class TreeTest(SoupTest):
def assertSelects(self, tags, should_match):
"""Make sure that the given tags have the correct text.
This is used in tests that define a bunch of tags, each
containing a single string, and then select certain strings by
some mechanism.
"""
self.assertEqual([tag.string for tag in tags], should_match)
def assertSelectsIDs(self, tags, should_match):
"""Make sure that the given tags have the correct IDs.
This is used in tests that define a bunch of tags, each
containing a single string, and then select certain strings by
some mechanism.
"""
self.assertEqual([tag['id'] for tag in tags], should_match)
class TestFind(TreeTest):
"""Basic tests of the find() method.
find() just calls find_all() with limit=1, so it's not tested all
that thouroughly here.
"""
def test_find_tag(self):
soup = self.soup("<a>1</a><b>2</b><a>3</a><b>4</b>")
self.assertEqual(soup.find("b").string, "2")
def test_unicode_text_find(self):
soup = self.soup('<h1>Räksmörgås</h1>')
self.assertEqual(soup.find(text='Räksmörgås'), 'Räksmörgås')
class TestFindAll(TreeTest):
"""Basic tests of the find_all() method."""
def test_find_all_text_nodes(self):
"""You can search the tree for text nodes."""
soup = self.soup("<html>Foo<b>bar</b>\xbb</html>")
# Exact match.
self.assertEqual(soup.find_all(text="bar"), ["bar"])
# Match any of a number of strings.
self.assertEqual(
soup.find_all(text=["Foo", "bar"]), ["Foo", "bar"])
# Match a regular expression.
self.assertEqual(soup.find_all(text=re.compile('.*')),
["Foo", "bar", '\xbb'])
# Match anything.
self.assertEqual(soup.find_all(text=True),
["Foo", "bar", '\xbb'])
def test_find_all_limit(self):
"""You can limit the number of items returned by find_all."""
soup = self.soup("<a>1</a><a>2</a><a>3</a><a>4</a><a>5</a>")
self.assertSelects(soup.find_all('a', limit=3), ["1", "2", "3"])
self.assertSelects(soup.find_all('a', limit=1), ["1"])
self.assertSelects(
soup.find_all('a', limit=10), ["1", "2", "3", "4", "5"])
# A limit of 0 means no limit.
self.assertSelects(
soup.find_all('a', limit=0), ["1", "2", "3", "4", "5"])
def test_calling_a_tag_is_calling_findall(self):
soup = self.soup("<a>1</a><b>2<a id='foo'>3</a></b>")
self.assertSelects(soup('a', limit=1), ["1"])
self.assertSelects(soup.b(id="foo"), ["3"])
class TestFindAllBasicNamespaces(TreeTest):
def test_find_by_namespaced_name(self):
soup = self.soup('<mathml:msqrt>4</mathml:msqrt><a svg:fill="red">')
self.assertEqual("4", soup.find("mathml:msqrt").string)
self.assertEqual("a", soup.find(attrs= { "svg:fill" : "red" }).name)
class TestFindAllByName(TreeTest):
"""Test ways of finding tags by tag name."""
def setUp(self):
super(TreeTest, self).setUp()
self.tree = self.soup("""<a>First tag.</a>
<b>Second tag.</b>
<c>Third <a>Nested tag.</a> tag.</c>""")
def test_find_all_by_tag_name(self):
# Find all the <a> tags.
self.assertSelects(
self.tree.find_all('a'), ['First tag.', 'Nested tag.'])
def test_find_all_on_non_root_element(self):
# You can call find_all on any node, not just the root.
self.assertSelects(self.tree.c.find_all('a'), ['Nested tag.'])
def test_calling_element_invokes_find_all(self):
self.assertSelects(self.tree('a'), ['First tag.', 'Nested tag.'])
def test_find_all_by_tag_strainer(self):
self.assertSelects(
self.tree.find_all(SoupStrainer('a')),
['First tag.', 'Nested tag.'])
def test_find_all_by_tag_names(self):
self.assertSelects(
self.tree.find_all(['a', 'b']),
['First tag.', 'Second tag.', 'Nested tag.'])
def test_find_all_by_tag_dict(self):
self.assertSelects(
self.tree.find_all({'a' : True, 'b' : True}),
['First tag.', 'Second tag.', 'Nested tag.'])
def test_find_all_by_tag_re(self):
self.assertSelects(
self.tree.find_all(re.compile('^[ab]$')),
['First tag.', 'Second tag.', 'Nested tag.'])
def test_find_all_with_tags_matching_method(self):
# You can define an oracle method that determines whether
# a tag matches the search.
def id_matches_name(tag):
return tag.name == tag.get('id')
tree = self.soup("""<a id="a">Match 1.</a>
<a id="1">Does not match.</a>
<b id="b">Match 2.</a>""")
self.assertSelects(
tree.find_all(id_matches_name), ["Match 1.", "Match 2."])
class TestFindAllByAttribute(TreeTest):
def test_find_all_by_attribute_name(self):
# You can pass in keyword arguments to find_all to search by
# attribute.
tree = self.soup("""
<a id="first">Matching a.</a>
<a id="second">
Non-matching <b id="first">Matching b.</b>a.
</a>""")
self.assertSelects(tree.find_all(id='first'),
["Matching a.", "Matching b."])
def test_find_all_by_attribute_dict(self):
# You can pass in a dictionary as the argument 'attrs'. This
# lets you search for attributes like 'name' (a fixed argument
# to find_all) and 'class' (a reserved word in Python.)
tree = self.soup("""
<a name="name1" class="class1">Name match.</a>
<a name="name2" class="class2">Class match.</a>
<a name="name3" class="class3">Non-match.</a>
<name1>A tag called 'name1'.</name1>
""")
# This doesn't do what you want.
self.assertSelects(tree.find_all(name='name1'),
["A tag called 'name1'."])
# This does what you want.
self.assertSelects(tree.find_all(attrs={'name' : 'name1'}),
["Name match."])
# Passing class='class2' would cause a syntax error.
self.assertSelects(tree.find_all(attrs={'class' : 'class2'}),
["Class match."])
def test_find_all_by_class(self):
# Passing in a string to 'attrs' will search the CSS class.
tree = self.soup("""
<a class="1">Class 1.</a>
<a class="2">Class 2.</a>
<b class="1">Class 1.</b>
<c class="3 4">Class 3 and 4.</c>
""")
self.assertSelects(tree.find_all('a', '1'), ['Class 1.'])
self.assertSelects(tree.find_all(attrs='1'), ['Class 1.', 'Class 1.'])
self.assertSelects(tree.find_all('c', '3'), ['Class 3 and 4.'])
self.assertSelects(tree.find_all('c', '4'), ['Class 3 and 4.'])
def test_find_by_class_when_multiple_classes_present(self):
tree = self.soup("<gar class='foo bar'>Found it</gar>")
attrs = { 'class' : re.compile("o") }
f = tree.find_all("gar", attrs=attrs)
self.assertSelects(f, ["Found it"])
f = tree.find_all("gar", re.compile("a"))
self.assertSelects(f, ["Found it"])
# Since the class is not the string "foo bar", but the two
# strings "foo" and "bar", this will not find anything.
attrs = { 'class' : re.compile("o b") }
f = tree.find_all("gar", attrs=attrs)
self.assertSelects(f, [])
def test_find_all_with_non_dictionary_for_attrs_finds_by_class(self):
soup = self.soup("<a class='bar'>Found it</a>")
self.assertSelects(soup.find_all("a", re.compile("ba")), ["Found it"])
def big_attribute_value(value):
return len(value) > 3
self.assertSelects(soup.find_all("a", big_attribute_value), [])
def small_attribute_value(value):
return len(value) <= 3
self.assertSelects(
soup.find_all("a", small_attribute_value), ["Found it"])
def test_find_all_with_string_for_attrs_finds_multiple_classes(self):
soup = self.soup('<a class="foo bar"></a><a class="foo"></a>')
a, a2 = soup.find_all("a")
self.assertEqual([a, a2], soup.find_all("a", "foo"))
self.assertEqual([a], soup.find_all("a", "bar"))
# If you specify the attribute as a string that contains a
# space, only that specific value will be found.
self.assertEqual([a], soup.find_all("a", "foo bar"))
self.assertEqual([], soup.find_all("a", "bar foo"))
def test_find_all_by_attribute_soupstrainer(self):
tree = self.soup("""
<a id="first">Match.</a>
<a id="second">Non-match.</a>""")
strainer = SoupStrainer(attrs={'id' : 'first'})
self.assertSelects(tree.find_all(strainer), ['Match.'])
def test_find_all_with_missing_atribute(self):
# You can pass in None as the value of an attribute to find_all.
# This will match tags that do not have that attribute set.
tree = self.soup("""<a id="1">ID present.</a>
<a>No ID present.</a>
<a id="">ID is empty.</a>""")
self.assertSelects(tree.find_all('a', id=None), ["No ID present."])
def test_find_all_with_defined_attribute(self):
# You can pass in None as the value of an attribute to find_all.
# This will match tags that have that attribute set to any value.
tree = self.soup("""<a id="1">ID present.</a>
<a>No ID present.</a>
<a id="">ID is empty.</a>""")
self.assertSelects(
tree.find_all(id=True), ["ID present.", "ID is empty."])
def test_find_all_with_numeric_attribute(self):
# If you search for a number, it's treated as a string.
tree = self.soup("""<a id=1>Unquoted attribute.</a>
<a id="1">Quoted attribute.</a>""")
expected = ["Unquoted attribute.", "Quoted attribute."]
self.assertSelects(tree.find_all(id=1), expected)
self.assertSelects(tree.find_all(id="1"), expected)
def test_find_all_with_list_attribute_values(self):
# You can pass a list of attribute values instead of just one,
# and you'll get tags that match any of the values.
tree = self.soup("""<a id="1">1</a>
<a id="2">2</a>
<a id="3">3</a>
<a>No ID.</a>""")
self.assertSelects(tree.find_all(id=["1", "3", "4"]),
["1", "3"])
def test_find_all_with_regular_expression_attribute_value(self):
# You can pass a regular expression as an attribute value, and
# you'll get tags whose values for that attribute match the
# regular expression.
tree = self.soup("""<a id="a">One a.</a>
<a id="aa">Two as.</a>
<a id="ab">Mixed as and bs.</a>
<a id="b">One b.</a>
<a>No ID.</a>""")
self.assertSelects(tree.find_all(id=re.compile("^a+$")),
["One a.", "Two as."])
def test_find_by_name_and_containing_string(self):
soup = self.soup("<b>foo</b><b>bar</b><a>foo</a>")
a = soup.a
self.assertEqual([a], soup.find_all("a", text="foo"))
self.assertEqual([], soup.find_all("a", text="bar"))
self.assertEqual([], soup.find_all("a", text="bar"))
def test_find_by_name_and_containing_string_when_string_is_buried(self):
soup = self.soup("<a>foo</a><a><b><c>foo</c></b></a>")
self.assertEqual(soup.find_all("a"), soup.find_all("a", text="foo"))
def test_find_by_attribute_and_containing_string(self):
soup = self.soup('<b id="1">foo</b><a id="2">foo</a>')
a = soup.a
self.assertEqual([a], soup.find_all(id=2, text="foo"))
self.assertEqual([], soup.find_all(id=1, text="bar"))
class TestIndex(TreeTest):
"""Test Tag.index"""
def test_index(self):
tree = self.soup("""<wrap>
<a>Identical</a>
<b>Not identical</b>
<a>Identical</a>
<c><d>Identical with child</d></c>
<b>Also not identical</b>
<c><d>Identical with child</d></c>
</wrap>""")
wrap = tree.wrap
for i, element in enumerate(wrap.contents):
self.assertEqual(i, wrap.index(element))
self.assertRaises(ValueError, tree.index, 1)
class TestParentOperations(TreeTest):
"""Test navigation and searching through an element's parents."""
def setUp(self):
super(TestParentOperations, self).setUp()
self.tree = self.soup('''<ul id="empty"></ul>
<ul id="top">
<ul id="middle">
<ul id="bottom">
<b>Start here</b>
</ul>
</ul>''')
self.start = self.tree.b
def test_parent(self):
self.assertEqual(self.start.parent['id'], 'bottom')
self.assertEqual(self.start.parent.parent['id'], 'middle')
self.assertEqual(self.start.parent.parent.parent['id'], 'top')
def test_parent_of_top_tag_is_soup_object(self):
top_tag = self.tree.contents[0]
self.assertEqual(top_tag.parent, self.tree)
def test_soup_object_has_no_parent(self):
self.assertEqual(None, self.tree.parent)
def test_find_parents(self):
self.assertSelectsIDs(
self.start.find_parents('ul'), ['bottom', 'middle', 'top'])
self.assertSelectsIDs(
self.start.find_parents('ul', id="middle"), ['middle'])
def test_find_parent(self):
self.assertEqual(self.start.find_parent('ul')['id'], 'bottom')
def test_parent_of_text_element(self):
text = self.tree.find(text="Start here")
self.assertEqual(text.parent.name, 'b')
def test_text_element_find_parent(self):
text = self.tree.find(text="Start here")
self.assertEqual(text.find_parent('ul')['id'], 'bottom')
def test_parent_generator(self):
parents = [parent['id'] for parent in self.start.parents
if parent is not None and 'id' in parent.attrs]
self.assertEqual(parents, ['bottom', 'middle', 'top'])
class ProximityTest(TreeTest):
def setUp(self):
super(TreeTest, self).setUp()
self.tree = self.soup(
'<html id="start"><head></head><body><b id="1">One</b><b id="2">Two</b><b id="3">Three</b></body></html>')
class TestNextOperations(ProximityTest):
def setUp(self):
super(TestNextOperations, self).setUp()
self.start = self.tree.b
def test_next(self):
self.assertEqual(self.start.next_element, "One")
self.assertEqual(self.start.next_element.next_element['id'], "2")
def test_next_of_last_item_is_none(self):
last = self.tree.find(text="Three")
self.assertEqual(last.next_element, None)
def test_next_of_root_is_none(self):
# The document root is outside the next/previous chain.
self.assertEqual(self.tree.next_element, None)
def test_find_all_next(self):
self.assertSelects(self.start.find_all_next('b'), ["Two", "Three"])
self.start.find_all_next(id=3)
self.assertSelects(self.start.find_all_next(id=3), ["Three"])
def test_find_next(self):
self.assertEqual(self.start.find_next('b')['id'], '2')
self.assertEqual(self.start.find_next(text="Three"), "Three")
def test_find_next_for_text_element(self):
text = self.tree.find(text="One")
self.assertEqual(text.find_next("b").string, "Two")
self.assertSelects(text.find_all_next("b"), ["Two", "Three"])
def test_next_generator(self):
start = self.tree.find(text="Two")
successors = [node for node in start.next_elements]
# There are two successors: the final <b> tag and its text contents.
tag, contents = successors
self.assertEqual(tag['id'], '3')
self.assertEqual(contents, "Three")
class TestPreviousOperations(ProximityTest):
def setUp(self):
super(TestPreviousOperations, self).setUp()
self.end = self.tree.find(text="Three")
def test_previous(self):
self.assertEqual(self.end.previous_element['id'], "3")
self.assertEqual(self.end.previous_element.previous_element, "Two")
def test_previous_of_first_item_is_none(self):
first = self.tree.find('html')
self.assertEqual(first.previous_element, None)
def test_previous_of_root_is_none(self):
# The document root is outside the next/previous chain.
# XXX This is broken!
#self.assertEqual(self.tree.previous_element, None)
pass
def test_find_all_previous(self):
# The <b> tag containing the "Three" node is the predecessor
# of the "Three" node itself, which is why "Three" shows up
# here.
self.assertSelects(
self.end.find_all_previous('b'), ["Three", "Two", "One"])
self.assertSelects(self.end.find_all_previous(id=1), ["One"])
def test_find_previous(self):
self.assertEqual(self.end.find_previous('b')['id'], '3')
self.assertEqual(self.end.find_previous(text="One"), "One")
def test_find_previous_for_text_element(self):
text = self.tree.find(text="Three")
self.assertEqual(text.find_previous("b").string, "Three")
self.assertSelects(
text.find_all_previous("b"), ["Three", "Two", "One"])
def test_previous_generator(self):
start = self.tree.find(text="One")
predecessors = [node for node in start.previous_elements]
# There are four predecessors: the <b> tag containing "One"
# the <body> tag, the <head> tag, and the <html> tag.
b, body, head, html = predecessors
self.assertEqual(b['id'], '1')
self.assertEqual(body.name, "body")
self.assertEqual(head.name, "head")
self.assertEqual(html.name, "html")
class SiblingTest(TreeTest):
def setUp(self):
super(SiblingTest, self).setUp()
markup = '''<html>
<span id="1">
<span id="1.1"></span>
</span>
<span id="2">
<span id="2.1"></span>
</span>
<span id="3">
<span id="3.1"></span>
</span>
<span id="4"></span>
</html>'''
# All that whitespace looks good but makes the tests more
# difficult. Get rid of it.
markup = re.compile("\n\s*").sub("", markup)
self.tree = self.soup(markup)
class TestNextSibling(SiblingTest):
def setUp(self):
super(TestNextSibling, self).setUp()
self.start = self.tree.find(id="1")
def test_next_sibling_of_root_is_none(self):
self.assertEqual(self.tree.next_sibling, None)
def test_next_sibling(self):
self.assertEqual(self.start.next_sibling['id'], '2')
self.assertEqual(self.start.next_sibling.next_sibling['id'], '3')
# Note the difference between next_sibling and next_element.
self.assertEqual(self.start.next_element['id'], '1.1')
def test_next_sibling_may_not_exist(self):
self.assertEqual(self.tree.html.next_sibling, None)
nested_span = self.tree.find(id="1.1")
self.assertEqual(nested_span.next_sibling, None)
last_span = self.tree.find(id="4")
self.assertEqual(last_span.next_sibling, None)
def test_find_next_sibling(self):
self.assertEqual(self.start.find_next_sibling('span')['id'], '2')
def test_next_siblings(self):
self.assertSelectsIDs(self.start.find_next_siblings("span"),
['2', '3', '4'])
self.assertSelectsIDs(self.start.find_next_siblings(id='3'), ['3'])
def test_next_sibling_for_text_element(self):
soup = self.soup("Foo<b>bar</b>baz")
start = soup.find(text="Foo")
self.assertEqual(start.next_sibling.name, 'b')
self.assertEqual(start.next_sibling.next_sibling, 'baz')
self.assertSelects(start.find_next_siblings('b'), ['bar'])
self.assertEqual(start.find_next_sibling(text="baz"), "baz")
self.assertEqual(start.find_next_sibling(text="nonesuch"), None)
class TestPreviousSibling(SiblingTest):
def setUp(self):
super(TestPreviousSibling, self).setUp()
self.end = self.tree.find(id="4")
def test_previous_sibling_of_root_is_none(self):
self.assertEqual(self.tree.previous_sibling, None)
def test_previous_sibling(self):
self.assertEqual(self.end.previous_sibling['id'], '3')
self.assertEqual(self.end.previous_sibling.previous_sibling['id'], '2')
# Note the difference between previous_sibling and previous_element.
self.assertEqual(self.end.previous_element['id'], '3.1')
def test_previous_sibling_may_not_exist(self):
self.assertEqual(self.tree.html.previous_sibling, None)
nested_span = self.tree.find(id="1.1")
self.assertEqual(nested_span.previous_sibling, None)
first_span = self.tree.find(id="1")
self.assertEqual(first_span.previous_sibling, None)
def test_find_previous_sibling(self):
self.assertEqual(self.end.find_previous_sibling('span')['id'], '3')
def test_previous_siblings(self):
self.assertSelectsIDs(self.end.find_previous_siblings("span"),
['3', '2', '1'])
self.assertSelectsIDs(self.end.find_previous_siblings(id='1'), ['1'])
def test_previous_sibling_for_text_element(self):
soup = self.soup("Foo<b>bar</b>baz")
start = soup.find(text="baz")
self.assertEqual(start.previous_sibling.name, 'b')
self.assertEqual(start.previous_sibling.previous_sibling, 'Foo')
self.assertSelects(start.find_previous_siblings('b'), ['bar'])
self.assertEqual(start.find_previous_sibling(text="Foo"), "Foo")
self.assertEqual(start.find_previous_sibling(text="nonesuch"), None)
class TestTagCreation(SoupTest):
"""Test the ability to create new tags."""
def test_new_tag(self):
soup = self.soup("")
new_tag = soup.new_tag("foo", bar="baz")
self.assertTrue(isinstance(new_tag, Tag))
self.assertEqual("foo", new_tag.name)
self.assertEqual(dict(bar="baz"), new_tag.attrs)
self.assertEqual(None, new_tag.parent)
def test_tag_inherits_self_closing_rules_from_builder(self):
if XML_BUILDER_PRESENT:
xml_soup = BeautifulSoup("", "xml")
xml_br = xml_soup.new_tag("br")
xml_p = xml_soup.new_tag("p")
# Both the <br> and <p> tag are empty-element, just because
# they have no contents.
self.assertEqual(b"<br/>", xml_br.encode())
self.assertEqual(b"<p/>", xml_p.encode())
html_soup = BeautifulSoup("", "html")
html_br = html_soup.new_tag("br")
html_p = html_soup.new_tag("p")
# The HTML builder users HTML's rules about which tags are
# empty-element tags, and the new tags reflect these rules.
self.assertEqual(b"<br/>", html_br.encode())
self.assertEqual(b"<p></p>", html_p.encode())
def test_new_string_creates_navigablestring(self):
soup = self.soup("")
s = soup.new_string("foo")
self.assertEqual("foo", s)
self.assertTrue(isinstance(s, NavigableString))
class TestTreeModification(SoupTest):
def test_attribute_modification(self):
soup = self.soup('<a id="1"></a>')
soup.a['id'] = 2
self.assertEqual(soup.decode(), self.document_for('<a id="2"></a>'))
del(soup.a['id'])
self.assertEqual(soup.decode(), self.document_for('<a></a>'))
soup.a['id2'] = 'foo'
self.assertEqual(soup.decode(), self.document_for('<a id2="foo"></a>'))
def test_new_tag_creation(self):
builder = builder_registry.lookup('html')()
soup = self.soup("<body></body>", builder=builder)
a = Tag(soup, builder, 'a')
ol = Tag(soup, builder, 'ol')
a['href'] = 'http://foo.com/'
soup.body.insert(0, a)
soup.body.insert(1, ol)
self.assertEqual(
soup.body.encode(),
b'<body><a href="http://foo.com/"></a><ol></ol></body>')
def test_append_to_contents_moves_tag(self):
doc = """<p id="1">Don't leave me <b>here</b>.</p>
<p id="2">Don\'t leave!</p>"""
soup = self.soup(doc)
second_para = soup.find(id='2')
bold = soup.b
# Move the <b> tag to the end of the second paragraph.
soup.find(id='2').append(soup.b)
# The <b> tag is now a child of the second paragraph.
self.assertEqual(bold.parent, second_para)
self.assertEqual(
soup.decode(), self.document_for(
'<p id="1">Don\'t leave me .</p>\n'
'<p id="2">Don\'t leave!<b>here</b></p>'))
def test_replace_with_returns_thing_that_was_replaced(self):
text = "<a></a><b><c></c></b>"
soup = self.soup(text)
a = soup.a
new_a = a.replace_with(soup.c)
self.assertEqual(a, new_a)
def test_replace_with_children_returns_thing_that_was_replaced(self):
text = "<a><b></b><c></c></a>"
soup = self.soup(text)
a = soup.a
new_a = a.replace_with_children()
self.assertEqual(a, new_a)
def test_replace_tag_with_itself(self):
text = "<a><b></b><c>Foo<d></d></c></a><a><e></e></a>"
soup = self.soup(text)
c = soup.c
soup.c.replace_with(c)
self.assertEqual(soup.decode(), self.document_for(text))
def test_replace_tag_with_its_parent_raises_exception(self):
text = "<a><b></b></a>"
soup = self.soup(text)
self.assertRaises(ValueError, soup.b.replace_with, soup.a)
def test_insert_tag_into_itself_raises_exception(self):
text = "<a><b></b></a>"
soup = self.soup(text)
self.assertRaises(ValueError, soup.a.insert, 0, soup.a)
def test_replace_with_maintains_next_element_throughout(self):
soup = self.soup('<p><a>one</a><b>three</b></p>')
a = soup.a
b = a.contents[0]
# Make it so the <a> tag has two text children.
a.insert(1, "two")
# Now replace each one with the empty string.
left, right = a.contents
left.replaceWith('')
right.replaceWith('')
# The <b> tag is still connected to the tree.
self.assertEqual("three", soup.b.string)
def test_replace_final_node(self):
soup = self.soup("<b>Argh!</b>")
soup.find(text="Argh!").replace_with("Hooray!")
new_text = soup.find(text="Hooray!")
b = soup.b
self.assertEqual(new_text.previous_element, b)
self.assertEqual(new_text.parent, b)
self.assertEqual(new_text.previous_element.next_element, new_text)
self.assertEqual(new_text.next_element, None)
def test_consecutive_text_nodes(self):
# A builder should never create two consecutive text nodes,
# but if you insert one next to another, Beautiful Soup will
# handle it correctly.
soup = self.soup("<a><b>Argh!</b><c></c></a>")
soup.b.insert(1, "Hooray!")
self.assertEqual(
soup.decode(), self.document_for(
"<a><b>Argh!Hooray!</b><c></c></a>"))
new_text = soup.find(text="Hooray!")
self.assertEqual(new_text.previous_element, "Argh!")
self.assertEqual(new_text.previous_element.next_element, new_text)
self.assertEqual(new_text.previous_sibling, "Argh!")
self.assertEqual(new_text.previous_sibling.next_sibling, new_text)
self.assertEqual(new_text.next_sibling, None)
self.assertEqual(new_text.next_element, soup.c)
def test_insert_string(self):
soup = self.soup("<a></a>")
soup.a.insert(0, "bar")
soup.a.insert(0, "foo")
# The string were added to the tag.
self.assertEqual(["foo", "bar"], soup.a.contents)
# And they were converted to NavigableStrings.
self.assertEqual(soup.a.contents[0].next_element, "bar")
def test_insert_tag(self):
builder = self.default_builder
soup = self.soup(
"<a><b>Find</b><c>lady!</c><d></d></a>", builder=builder)
magic_tag = Tag(soup, builder, 'magictag')
magic_tag.insert(0, "the")
soup.a.insert(1, magic_tag)
self.assertEqual(
soup.decode(), self.document_for(
"<a><b>Find</b><magictag>the</magictag><c>lady!</c><d></d></a>"))
# Make sure all the relationships are hooked up correctly.
b_tag = soup.b
self.assertEqual(b_tag.next_sibling, magic_tag)
self.assertEqual(magic_tag.previous_sibling, b_tag)
find = b_tag.find(text="Find")
self.assertEqual(find.next_element, magic_tag)
self.assertEqual(magic_tag.previous_element, find)
c_tag = soup.c
self.assertEqual(magic_tag.next_sibling, c_tag)
self.assertEqual(c_tag.previous_sibling, magic_tag)
the = magic_tag.find(text="the")
self.assertEqual(the.parent, magic_tag)
self.assertEqual(the.next_element, c_tag)
self.assertEqual(c_tag.previous_element, the)
def test_insert_works_on_empty_element_tag(self):
# This is a little strange, since most HTML parsers don't allow
# markup like this to come through. But in general, we don't
# know what the parser would or wouldn't have allowed, so
# I'm letting this succeed for now.
soup = self.soup("<br/>")
soup.br.insert(1, "Contents")
self.assertEqual(str(soup.br), "<br>Contents</br>")
def test_insert_before(self):
soup = self.soup("<a>foo</a><b>bar</b>")
soup.b.insert_before("BAZ")
soup.a.insert_before("QUUX")
self.assertEqual(
soup.decode(), self.document_for("QUUX<a>foo</a>BAZ<b>bar</b>"))
soup.a.insert_before(soup.b)
self.assertEqual(
soup.decode(), self.document_for("QUUX<b>bar</b><a>foo</a>BAZ"))
def test_insert_after(self):
soup = self.soup("<a>foo</a><b>bar</b>")
soup.b.insert_after("BAZ")
soup.a.insert_after("QUUX")
self.assertEqual(
soup.decode(), self.document_for("<a>foo</a>QUUX<b>bar</b>BAZ"))
soup.b.insert_after(soup.a)
self.assertEqual(
soup.decode(), self.document_for("QUUX<b>bar</b><a>foo</a>BAZ"))
def test_insert_after_raises_valueerror_if_after_has_no_meaning(self):
soup = self.soup("")
tag = soup.new_tag("a")
string = soup.new_string("")
self.assertRaises(ValueError, string.insert_after, tag)
self.assertRaises(ValueError, soup.insert_after, tag)
self.assertRaises(ValueError, tag.insert_after, tag)
def test_insert_before_raises_valueerror_if_before_has_no_meaning(self):
soup = self.soup("")
tag = soup.new_tag("a")
string = soup.new_string("")
self.assertRaises(ValueError, string.insert_before, tag)
self.assertRaises(ValueError, soup.insert_before, tag)
self.assertRaises(ValueError, tag.insert_before, tag)
def test_replace_with(self):
soup = self.soup(
"<p>There's <b>no</b> business like <b>show</b> business</p>")
no, show = soup.find_all('b')
show.replace_with(no)
self.assertEqual(
soup.decode(),
self.document_for(
"<p>There's business like <b>no</b> business</p>"))
self.assertEqual(show.parent, None)
self.assertEqual(no.parent, soup.p)
self.assertEqual(no.next_element, "no")
self.assertEqual(no.next_sibling, " business")
def test_nested_tag_replace_with(self):
soup = self.soup(
"""<a>We<b>reserve<c>the</c><d>right</d></b></a><e>to<f>refuse</f><g>service</g></e>""")
# Replace the entire <b> tag and its contents ("reserve the
# right") with the <f> tag ("refuse").
remove_tag = soup.b
move_tag = soup.f
remove_tag.replace_with(move_tag)
self.assertEqual(
soup.decode(), self.document_for(
"<a>We<f>refuse</f></a><e>to<g>service</g></e>"))
# The <b> tag is now an orphan.
self.assertEqual(remove_tag.parent, None)
self.assertEqual(remove_tag.find(text="right").next_element, None)
self.assertEqual(remove_tag.previous_element, None)
self.assertEqual(remove_tag.next_sibling, None)
self.assertEqual(remove_tag.previous_sibling, None)
# The <f> tag is now connected to the <a> tag.
self.assertEqual(move_tag.parent, soup.a)
self.assertEqual(move_tag.previous_element, "We")
self.assertEqual(move_tag.next_element.next_element, soup.e)
self.assertEqual(move_tag.next_sibling, None)
# The gap where the <f> tag used to be has been mended, and
# the word "to" is now connected to the <g> tag.
to_text = soup.find(text="to")
g_tag = soup.g
self.assertEqual(to_text.next_element, g_tag)
self.assertEqual(to_text.next_sibling, g_tag)
self.assertEqual(g_tag.previous_element, to_text)
self.assertEqual(g_tag.previous_sibling, to_text)
def test_replace_with_children(self):
tree = self.soup("""
<p>Unneeded <em>formatting</em> is unneeded</p>
""")
tree.em.replace_with_children()
self.assertEqual(tree.em, None)
self.assertEqual(tree.p.text, "Unneeded formatting is unneeded")
def test_extract(self):
soup = self.soup(
'<html><body>Some content. <div id="nav">Nav crap</div> More content.</body></html>')
self.assertEqual(len(soup.body.contents), 3)
extracted = soup.find(id="nav").extract()
self.assertEqual(
soup.decode(), "<html><body>Some content. More content.</body></html>")
self.assertEqual(extracted.decode(), '<div id="nav">Nav crap</div>')
# The extracted tag is now an orphan.
self.assertEqual(len(soup.body.contents), 2)
self.assertEqual(extracted.parent, None)
self.assertEqual(extracted.previous_element, None)
self.assertEqual(extracted.next_element.next_element, None)
# The gap where the extracted tag used to be has been mended.
content_1 = soup.find(text="Some content. ")
content_2 = soup.find(text=" More content.")
self.assertEqual(content_1.next_element, content_2)
self.assertEqual(content_1.next_sibling, content_2)
self.assertEqual(content_2.previous_element, content_1)
self.assertEqual(content_2.previous_sibling, content_1)
def test_extract_distinguishes_between_identical_strings(self):
soup = self.soup("<a>foo</a><b>bar</b>")
foo_1 = soup.a.string
bar_1 = soup.b.string
foo_2 = soup.new_string("foo")
bar_2 = soup.new_string("bar")
soup.a.append(foo_2)
soup.b.append(bar_2)
# Now there are two identical strings in the <a> tag, and two
# in the <b> tag. Let's remove the first "foo" and the second
# "bar".
foo_1.extract()
bar_2.extract()
self.assertEqual(foo_2, soup.a.string)
self.assertEqual(bar_2, soup.b.string)
def test_clear(self):
"""Tag.clear()"""
soup = self.soup("<p><a>String <em>Italicized</em></a> and another</p>")
# clear using extract()
a = soup.a
soup.p.clear()
self.assertEqual(len(soup.p.contents), 0)
self.assertTrue(hasattr(a, "contents"))
# clear using decompose()
em = a.em
a.clear(decompose=True)
self.assertFalse(hasattr(em, "contents"))
def test_string_set(self):
"""Tag.string = 'string'"""
soup = self.soup("<a></a> <b><c></c></b>")
soup.a.string = "foo"
self.assertEqual(soup.a.contents, ["foo"])
soup.b.string = "bar"
self.assertEqual(soup.b.contents, ["bar"])
class TestElementObjects(SoupTest):
"""Test various features of element objects."""
def test_len(self):
"""The length of an element is its number of children."""
soup = self.soup("<top>1<b>2</b>3</top>")
# The BeautifulSoup object itself contains one element: the
# <top> tag.
self.assertEqual(len(soup.contents), 1)
self.assertEqual(len(soup), 1)
# The <top> tag contains three elements: the text node "1", the
# <b> tag, and the text node "3".
self.assertEqual(len(soup.top), 3)
self.assertEqual(len(soup.top.contents), 3)
def test_member_access_invokes_find(self):
"""Accessing a Python member .foo invokes find('foo')"""
soup = self.soup('<b><i></i></b>')
self.assertEqual(soup.b, soup.find('b'))
self.assertEqual(soup.b.i, soup.find('b').find('i'))
self.assertEqual(soup.a, None)
def test_deprecated_member_access(self):
soup = self.soup('<b><i></i></b>')
with warnings.catch_warnings(record=True) as w:
tag = soup.bTag
self.assertEqual(soup.b, tag)
self.assertEqual(
'.bTag is deprecated, use .find("b") instead.',
str(w[0].message))
def test_has_attr(self):
"""has_attr() checks for the presence of an attribute.
Please note note: has_attr() is different from
__in__. has_attr() checks the tag's attributes and __in__
checks the tag's chidlren.
"""
soup = self.soup("<foo attr='bar'>")
self.assertTrue(soup.foo.has_attr('attr'))
self.assertFalse(soup.foo.has_attr('attr2'))
def test_attributes_come_out_in_alphabetical_order(self):
markup = '<b a="1" z="5" m="3" f="2" y="4"></b>'
self.assertSoupEquals(markup, '<b a="1" f="2" m="3" y="4" z="5"></b>')
def test_string(self):
# A tag that contains only a text node makes that node
# available as .string.
soup = self.soup("<b>foo</b>")
self.assertEqual(soup.b.string, 'foo')
def test_empty_tag_has_no_string(self):
# A tag with no children has no .stirng.
soup = self.soup("<b></b>")
self.assertEqual(soup.b.string, None)
def test_tag_with_multiple_children_has_no_string(self):
# A tag with no children has no .string.
soup = self.soup("<a>foo<b></b><b></b></b>")
self.assertEqual(soup.b.string, None)
soup = self.soup("<a>foo<b></b>bar</b>")
self.assertEqual(soup.b.string, None)
# Even if all the children are strings, due to trickery,
# it won't work--but this would be a good optimization.
soup = self.soup("<a>foo</b>")
soup.a.insert(1, "bar")
self.assertEqual(soup.a.string, None)
def test_tag_with_recursive_string_has_string(self):
# A tag with a single child which has a .string inherits that
# .string.
soup = self.soup("<a><b>foo</b></a>")
self.assertEqual(soup.a.string, "foo")
self.assertEqual(soup.string, "foo")
def test_lack_of_string(self):
"""Only a tag containing a single text node has a .string."""
soup = self.soup("<b>f<i>e</i>o</b>")
self.assertFalse(soup.b.string)
soup = self.soup("<b></b>")
self.assertFalse(soup.b.string)
def test_all_text(self):
"""Tag.text and Tag.get_text(sep=u"") -> all child text, concatenated"""
soup = self.soup("<a>a<b>r</b> <r> t </r></a>")
self.assertEqual(soup.a.text, "ar t ")
self.assertEqual(soup.a.get_text(strip=True), "art")
self.assertEqual(soup.a.get_text(","), "a,r, , t ")
self.assertEqual(soup.a.get_text(",", strip=True), "a,r,t")
class TestCDAtaListAttributes(SoupTest):
"""Testing cdata-list attributes like 'class'.
"""
def test_single_value_becomes_list(self):
soup = self.soup("<a class='foo'>")
self.assertEqual(["foo"],soup.a['class'])
def test_multiple_values_becomes_list(self):
soup = self.soup("<a class='foo bar'>")
self.assertEqual(["foo", "bar"], soup.a['class'])
def test_multiple_values_separated_by_weird_whitespace(self):
soup = self.soup("<a class='foo\tbar\nbaz'>")
self.assertEqual(["foo", "bar", "baz"],soup.a['class'])
def test_attributes_joined_into_string_on_output(self):
soup = self.soup("<a class='foo\tbar'>")
self.assertEqual(b'<a class="foo bar"></a>', soup.a.encode())
def test_accept_charset(self):
soup = self.soup('<form accept-charset="ISO-8859-1 UTF-8">')
self.assertEqual(['ISO-8859-1', 'UTF-8'], soup.form['accept-charset'])
def test_cdata_attribute_applying_only_to_one_tag(self):
data = '<a accept-charset="ISO-8859-1 UTF-8"></a>'
soup = self.soup(data)
# We saw in another test that accept-charset is a cdata-list
# attribute for the <form> tag. But it's not a cdata-list
# attribute for any other tag.
self.assertEqual('ISO-8859-1 UTF-8', soup.a['accept-charset'])
class TestPersistence(SoupTest):
"Testing features like pickle and deepcopy."
def setUp(self):
super(TestPersistence, self).setUp()
self.page = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"
"http://www.w3.org/TR/REC-html40/transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Beautiful Soup: We called him Tortoise because he taught us.</title>
<link rev="made" href="mailto:[email protected]">
<meta name="Description" content="Beautiful Soup: an HTML parser optimized for screen-scraping.">
<meta name="generator" content="Markov Approximation 1.4 (module: leonardr)">
<meta name="author" content="Leonard Richardson">
</head>
<body>
<a href="foo">foo</a>
<a href="foo"><b>bar</b></a>
</body>
</html>"""
self.tree = self.soup(self.page)
def test_pickle_and_unpickle_identity(self):
# Pickling a tree, then unpickling it, yields a tree identical
# to the original.
dumped = pickle.dumps(self.tree, 2)
loaded = pickle.loads(dumped)
self.assertEqual(loaded.__class__, BeautifulSoup)
self.assertEqual(loaded.decode(), self.tree.decode())
def test_deepcopy_identity(self):
# Making a deepcopy of a tree yields an identical tree.
copied = copy.deepcopy(self.tree)
self.assertEqual(copied.decode(), self.tree.decode())
def test_unicode_pickle(self):
# A tree containing Unicode characters can be pickled.
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
dumped = pickle.dumps(soup, pickle.HIGHEST_PROTOCOL)
loaded = pickle.loads(dumped)
self.assertEqual(loaded.decode(), soup.decode())
class TestSubstitutions(SoupTest):
def test_default_formatter_is_minimal(self):
markup = "<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="minimal")
# The < is converted back into < but the e-with-acute is left alone.
self.assertEqual(
decoded,
self.document_for(
"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"))
def test_formatter_html(self):
markup = "<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="html")
self.assertEqual(
decoded,
self.document_for("<b><<Sacré bleu!>></b>"))
def test_formatter_minimal(self):
markup = "<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="minimal")
# The < is converted back into < but the e-with-acute is left alone.
self.assertEqual(
decoded,
self.document_for(
"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"))
def test_formatter_null(self):
markup = "<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
soup = self.soup(markup)
decoded = soup.decode(formatter=None)
# Neither the angle brackets nor the e-with-acute are converted.
# This is not valid HTML, but it's what the user wanted.
self.assertEqual(decoded,
self.document_for("<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"))
def test_formatter_custom(self):
markup = "<b><foo></b><b>bar</b>"
soup = self.soup(markup)
decoded = soup.decode(formatter = lambda x: x.upper())
# Instead of normal entity conversion code, the custom
# callable is called on every string.
self.assertEqual(
decoded,
self.document_for("<b><FOO></b><b>BAR</b>"))
def test_prettify_accepts_formatter(self):
soup = BeautifulSoup("<html><body>foo</body></html>")
pretty = soup.prettify(formatter = lambda x: x.upper())
self.assertTrue("FOO" in pretty)
def test_prettify_outputs_unicode_by_default(self):
soup = self.soup("<a></a>")
self.assertEqual(str, type(soup.prettify()))
def test_prettify_can_encode_data(self):
soup = self.soup("<a></a>")
self.assertEqual(bytes, type(soup.prettify("utf-8")))
def test_html_entity_substitution_off_by_default(self):
markup = "<b>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</b>"
soup = self.soup(markup)
encoded = soup.b.encode("utf-8")
self.assertEqual(encoded, markup.encode('utf-8'))
def test_encoding_substitution(self):
# Here's the <meta> tag saying that a document is
# encoded in Shift-JIS.
meta_tag = ('<meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/>')
soup = self.soup(meta_tag)
# Parse the document, and the charset is replaced with a
# generic value.
self.assertEqual(soup.meta['content'],
'text/html; charset=%SOUP-ENCODING%')
# Encode the document into some encoding, and the encoding is
# substituted into the meta tag.
utf_8 = soup.encode("utf-8")
self.assertTrue(b"charset=utf-8" in utf_8)
euc_jp = soup.encode("euc_jp")
self.assertTrue(b"charset=euc_jp" in euc_jp)
shift_jis = soup.encode("shift-jis")
self.assertTrue(b"charset=shift-jis" in shift_jis)
utf_16_u = soup.encode("utf-16").decode("utf-16")
self.assertTrue("charset=utf-16" in utf_16_u)
def test_encoding_substitution_doesnt_happen_if_tag_is_strained(self):
markup = ('<head><meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/></head><pre>foo</pre>')
# Beautiful Soup used to try to rewrite the meta tag even if the
# meta tag got filtered out by the strainer. This test makes
# sure that doesn't happen.
strainer = SoupStrainer('pre')
soup = self.soup(markup, parse_only=strainer)
self.assertEqual(soup.contents[0].name, 'pre')
class TestEncoding(SoupTest):
"""Test the ability to encode objects into strings."""
def test_unicode_string_can_be_encoded(self):
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(soup.b.string.encode("utf-8"),
"\N{SNOWMAN}".encode("utf-8"))
def test_tag_containing_unicode_string_can_be_encoded(self):
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(
soup.b.encode("utf-8"), html.encode("utf-8"))
def test_encoding_substitutes_unrecognized_characters_by_default(self):
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertEqual(soup.b.encode("ascii"), b"<b>☃</b>")
def test_encoding_can_be_made_strict(self):
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
self.assertRaises(
UnicodeEncodeError, soup.encode, "ascii", errors="strict")
class TestNavigableStringSubclasses(SoupTest):
def test_cdata(self):
# None of the current builders turn CDATA sections into CData
# objects, but you can create them manually.
soup = self.soup("")
cdata = CData("foo")
soup.insert(1, cdata)
self.assertEqual(str(soup), "<![CDATA[foo]]>")
self.assertEqual(soup.find(text="foo"), "foo")
self.assertEqual(soup.contents[0], "foo")
def test_doctype_ends_in_newline(self):
# Unlike other NavigableString subclasses, a DOCTYPE always ends
# in a newline.
doctype = Doctype("foo")
soup = self.soup("")
soup.insert(1, doctype)
self.assertEqual(soup.encode(), b"<!DOCTYPE foo>\n")
class TestSoupSelector(TreeTest):
HTML = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>The title</title>
<link rel="stylesheet" href="blah.css" type="text/css" id="l1">
</head>
<body>
<div id="main">
<div id="inner">
<h1 id="header1">An H1</h1>
<p>Some text</p>
<p class="onep" id="p1">Some more text</p>
<h2 id="header2">An H2</h2>
<p class="class1 class2 class3" id="pmulti">Another</p>
<a href="http://bob.example.org/" rel="friend met" id="bob">Bob</a>
<h2 id="header3">Another H2</h2>
<a id="me" href="http://simonwillison.net/" rel="me">me</a>
<span class="s1">
<a href="#" id="s1a1">span1a1</a>
<a href="#" id="s1a2">span1a2 <span id="s1a2s1">test</span></a>
<span class="span2">
<a href="#" id="s2a1">span2a1</a>
</span>
<span class="span3"></span>
</span>
</div>
<p lang="en" id="lang-en">English</p>
<p lang="en-gb" id="lang-en-gb">English UK</p>
<p lang="en-us" id="lang-en-us">English US</p>
<p lang="fr" id="lang-fr">French</p>
</div>
<div id="footer">
</div>
"""
def setUp(self):
self.soup = BeautifulSoup(self.HTML)
def assertSelects(self, selector, expected_ids):
el_ids = [el['id'] for el in self.soup.select(selector)]
el_ids.sort()
expected_ids.sort()
self.assertEqual(expected_ids, el_ids,
"Selector %s, expected [%s], got [%s]" % (
selector, ', '.join(expected_ids), ', '.join(el_ids)
)
)
assertSelect = assertSelects
def assertSelectMultiple(self, *tests):
for selector, expected_ids in tests:
self.assertSelect(selector, expected_ids)
def test_one_tag_one(self):
els = self.soup.select('title')
self.assertEqual(len(els), 1)
self.assertEqual(els[0].name, 'title')
self.assertEqual(els[0].contents, ['The title'])
def test_one_tag_many(self):
els = self.soup.select('div')
self.assertEqual(len(els), 3)
for div in els:
self.assertEqual(div.name, 'div')
def test_tag_in_tag_one(self):
els = self.soup.select('div div')
self.assertSelects('div div', ['inner'])
def test_tag_in_tag_many(self):
for selector in ('html div', 'html body div', 'body div'):
self.assertSelects(selector, ['main', 'inner', 'footer'])
def test_tag_no_match(self):
self.assertEqual(len(self.soup.select('del')), 0)
def test_invalid_tag(self):
self.assertEqual(len(self.soup.select('tag%t')), 0)
def test_header_tags(self):
self.assertSelectMultiple(
('h1', ['header1']),
('h2', ['header2', 'header3']),
)
def test_class_one(self):
for selector in ('.onep', 'p.onep', 'html p.onep'):
els = self.soup.select(selector)
self.assertEqual(len(els), 1)
self.assertEqual(els[0].name, 'p')
self.assertEqual(els[0]['class'], ['onep'])
def test_class_mismatched_tag(self):
els = self.soup.select('div.onep')
self.assertEqual(len(els), 0)
def test_one_id(self):
for selector in ('div#inner', '#inner', 'div div#inner'):
self.assertSelects(selector, ['inner'])
def test_bad_id(self):
els = self.soup.select('#doesnotexist')
self.assertEqual(len(els), 0)
def test_items_in_id(self):
els = self.soup.select('div#inner p')
self.assertEqual(len(els), 3)
for el in els:
self.assertEqual(el.name, 'p')
self.assertEqual(els[1]['class'], ['onep'])
self.assertFalse('class' in els[0])
def test_a_bunch_of_emptys(self):
for selector in ('div#main del', 'div#main div.oops', 'div div#main'):
self.assertEqual(len(self.soup.select(selector)), 0)
def test_multi_class_support(self):
for selector in ('.class1', 'p.class1', '.class2', 'p.class2',
'.class3', 'p.class3', 'html p.class2', 'div#inner .class2'):
self.assertSelects(selector, ['pmulti'])
def test_multi_class_selection(self):
for selector in ('.class1.class3', '.class3.class2',
'.class1.class2.class3'):
self.assertSelects(selector, ['pmulti'])
def test_child_selector(self):
self.assertSelects('.s1 > a', ['s1a1', 's1a2'])
self.assertSelects('.s1 > a span', ['s1a2s1'])
def test_attribute_equals(self):
self.assertSelectMultiple(
('p[class="onep"]', ['p1']),
('p[id="p1"]', ['p1']),
('[class="onep"]', ['p1']),
('[id="p1"]', ['p1']),
('link[rel="stylesheet"]', ['l1']),
('link[type="text/css"]', ['l1']),
('link[href="blah.css"]', ['l1']),
('link[href="no-blah.css"]', []),
('[rel="stylesheet"]', ['l1']),
('[type="text/css"]', ['l1']),
('[href="blah.css"]', ['l1']),
('[href="no-blah.css"]', []),
('p[href="no-blah.css"]', []),
('[href="no-blah.css"]', []),
)
def test_attribute_tilde(self):
self.assertSelectMultiple(
('p[class~="class1"]', ['pmulti']),
('p[class~="class2"]', ['pmulti']),
('p[class~="class3"]', ['pmulti']),
('[class~="class1"]', ['pmulti']),
('[class~="class2"]', ['pmulti']),
('[class~="class3"]', ['pmulti']),
('a[rel~="friend"]', ['bob']),
('a[rel~="met"]', ['bob']),
('[rel~="friend"]', ['bob']),
('[rel~="met"]', ['bob']),
)
def test_attribute_startswith(self):
self.assertSelectMultiple(
('[rel^="style"]', ['l1']),
('link[rel^="style"]', ['l1']),
('notlink[rel^="notstyle"]', []),
('[rel^="notstyle"]', []),
('link[rel^="notstyle"]', []),
('link[href^="bla"]', ['l1']),
('a[href^="http://"]', ['bob', 'me']),
('[href^="http://"]', ['bob', 'me']),
('[id^="p"]', ['pmulti', 'p1']),
('[id^="m"]', ['me', 'main']),
('div[id^="m"]', ['main']),
('a[id^="m"]', ['me']),
)
def test_attribute_endswith(self):
self.assertSelectMultiple(
('[href$=".css"]', ['l1']),
('link[href$=".css"]', ['l1']),
('link[id$="1"]', ['l1']),
('[id$="1"]', ['l1', 'p1', 'header1', 's1a1', 's2a1', 's1a2s1']),
('div[id$="1"]', []),
('[id$="noending"]', []),
)
def test_attribute_contains(self):
self.assertSelectMultiple(
# From test_attribute_startswith
('[rel*="style"]', ['l1']),
('link[rel*="style"]', ['l1']),
('notlink[rel*="notstyle"]', []),
('[rel*="notstyle"]', []),
('link[rel*="notstyle"]', []),
('link[href*="bla"]', ['l1']),
('a[href*="http://"]', ['bob', 'me']),
('[href*="http://"]', ['bob', 'me']),
('[id*="p"]', ['pmulti', 'p1']),
('div[id*="m"]', ['main']),
('a[id*="m"]', ['me']),
# From test_attribute_endswith
('[href*=".css"]', ['l1']),
('link[href*=".css"]', ['l1']),
('link[id*="1"]', ['l1']),
('[id*="1"]', ['l1', 'p1', 'header1', 's1a1', 's1a2', 's2a1', 's1a2s1']),
('div[id*="1"]', []),
('[id*="noending"]', []),
# New for this test
('[href*="."]', ['bob', 'me', 'l1']),
('a[href*="."]', ['bob', 'me']),
('link[href*="."]', ['l1']),
('div[id*="n"]', ['main', 'inner']),
('div[id*="nn"]', ['inner']),
)
def test_attribute_exact_or_hypen(self):
self.assertSelectMultiple(
('p[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']),
('[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']),
('p[lang|="fr"]', ['lang-fr']),
('p[lang|="gb"]', []),
)
def test_attribute_exists(self):
self.assertSelectMultiple(
('[rel]', ['l1', 'bob', 'me']),
('link[rel]', ['l1']),
('a[rel]', ['bob', 'me']),
('[lang]', ['lang-en', 'lang-en-gb', 'lang-en-us', 'lang-fr']),
('p[class]', ['p1', 'pmulti']),
('[blah]', []),
('p[blah]', []),
)
def test_select_on_element(self):
# Other tests operate on the tree; this operates on an element
# within the tree.
inner = self.soup.find("div", id="main")
selected = inner.select("div")
# The <div id="inner"> tag was selected. The <div id="footer">
# tag was not.
self.assertSelectsIDs(selected, ['inner'])
| apache-2.0 |
darjeeling/django | tests/utils_tests/test_encoding.py | 53 | 6599 | import datetime
import unittest
from unittest import mock
from urllib.parse import quote_plus
from django.test import SimpleTestCase
from django.utils.encoding import (
DjangoUnicodeDecodeError, escape_uri_path, filepath_to_uri, force_bytes,
force_text, get_system_encoding, iri_to_uri, smart_bytes, smart_text,
uri_to_iri,
)
from django.utils.functional import SimpleLazyObject
from django.utils.translation import gettext_lazy
class TestEncodingUtils(SimpleTestCase):
def test_force_text_exception(self):
"""
Broken __str__ actually raises an error.
"""
class MyString:
def __str__(self):
return b'\xc3\xb6\xc3\xa4\xc3\xbc'
# str(s) raises a TypeError if the result is not a text type.
with self.assertRaises(TypeError):
force_text(MyString())
def test_force_text_lazy(self):
s = SimpleLazyObject(lambda: 'x')
self.assertTrue(type(force_text(s)), str)
def test_force_text_DjangoUnicodeDecodeError(self):
msg = (
"'utf-8' codec can't decode byte 0xff in position 0: invalid "
"start byte. You passed in b'\\xff' (<class 'bytes'>)"
)
with self.assertRaisesMessage(DjangoUnicodeDecodeError, msg):
force_text(b'\xff')
def test_force_bytes_exception(self):
"""
force_bytes knows how to convert to bytes an exception
containing non-ASCII characters in its args.
"""
error_msg = "This is an exception, voilà"
exc = ValueError(error_msg)
self.assertEqual(force_bytes(exc), error_msg.encode())
self.assertEqual(force_bytes(exc, encoding='ascii', errors='ignore'), b'This is an exception, voil')
def test_force_bytes_strings_only(self):
today = datetime.date.today()
self.assertEqual(force_bytes(today, strings_only=True), today)
def test_force_bytes_encoding(self):
error_msg = 'This is an exception, voilà'.encode()
result = force_bytes(error_msg, encoding='ascii', errors='ignore')
self.assertEqual(result, b'This is an exception, voil')
def test_force_bytes_memory_view(self):
self.assertEqual(force_bytes(memoryview(b'abc')), b'abc')
def test_smart_bytes(self):
class Test:
def __str__(self):
return 'ŠĐĆŽćžšđ'
lazy_func = gettext_lazy('x')
self.assertIs(smart_bytes(lazy_func), lazy_func)
self.assertEqual(smart_bytes(Test()), b'\xc5\xa0\xc4\x90\xc4\x86\xc5\xbd\xc4\x87\xc5\xbe\xc5\xa1\xc4\x91')
self.assertEqual(smart_bytes(1), b'1')
self.assertEqual(smart_bytes('foo'), b'foo')
def test_smart_text(self):
class Test:
def __str__(self):
return 'ŠĐĆŽćžšđ'
lazy_func = gettext_lazy('x')
self.assertIs(smart_text(lazy_func), lazy_func)
self.assertEqual(smart_text(Test()), '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111')
self.assertEqual(smart_text(1), '1')
self.assertEqual(smart_text('foo'), 'foo')
def test_get_default_encoding(self):
with mock.patch('locale.getdefaultlocale', side_effect=Exception):
self.assertEqual(get_system_encoding(), 'ascii')
class TestRFC3987IEncodingUtils(unittest.TestCase):
def test_filepath_to_uri(self):
self.assertEqual(filepath_to_uri(None), None)
self.assertEqual(filepath_to_uri('upload\\чубака.mp4'), 'upload/%D1%87%D1%83%D0%B1%D0%B0%D0%BA%D0%B0.mp4')
def test_iri_to_uri(self):
cases = [
# Valid UTF-8 sequences are encoded.
('red%09rosé#red', 'red%09ros%C3%A9#red'),
('/blog/for/Jürgen Münster/', '/blog/for/J%C3%BCrgen%20M%C3%BCnster/'),
('locations/%s' % quote_plus('Paris & Orléans'), 'locations/Paris+%26+Orl%C3%A9ans'),
# Reserved chars remain unescaped.
('%&', '%&'),
('red&♥ros%#red', 'red&%E2%99%A5ros%#red'),
(gettext_lazy('red&♥ros%#red'), 'red&%E2%99%A5ros%#red'),
]
for iri, uri in cases:
self.assertEqual(iri_to_uri(iri), uri)
# Test idempotency.
self.assertEqual(iri_to_uri(iri_to_uri(iri)), uri)
def test_uri_to_iri(self):
cases = [
(None, None),
# Valid UTF-8 sequences are decoded.
('/%e2%89%Ab%E2%99%a5%E2%89%aB/', '/≫♥≫/'),
('/%E2%99%A5%E2%99%A5/?utf8=%E2%9C%93', '/♥♥/?utf8=✓'),
('/%41%5a%6B/', '/AZk/'),
# Reserved and non-URL valid ASCII chars are not decoded.
('/%25%20%02%41%7b/', '/%25%20%02A%7b/'),
# Broken UTF-8 sequences remain escaped.
('/%AAd%AAj%AAa%AAn%AAg%AAo%AA/', '/%AAd%AAj%AAa%AAn%AAg%AAo%AA/'),
('/%E2%99%A5%E2%E2%99%A5/', '/♥%E2♥/'),
('/%E2%99%A5%E2%99%E2%99%A5/', '/♥%E2%99♥/'),
('/%E2%E2%99%A5%E2%99%A5%99/', '/%E2♥♥%99/'),
('/%E2%99%A5%E2%99%A5/?utf8=%9C%93%E2%9C%93%9C%93', '/♥♥/?utf8=%9C%93✓%9C%93'),
]
for uri, iri in cases:
self.assertEqual(uri_to_iri(uri), iri)
# Test idempotency.
self.assertEqual(uri_to_iri(uri_to_iri(uri)), iri)
def test_complementarity(self):
cases = [
('/blog/for/J%C3%BCrgen%20M%C3%BCnster/', '/blog/for/J\xfcrgen%20M\xfcnster/'),
('%&', '%&'),
('red&%E2%99%A5ros%#red', 'red&♥ros%#red'),
('/%E2%99%A5%E2%99%A5/', '/♥♥/'),
('/%E2%99%A5%E2%99%A5/?utf8=%E2%9C%93', '/♥♥/?utf8=✓'),
('/%25%20%02%7b/', '/%25%20%02%7b/'),
('/%AAd%AAj%AAa%AAn%AAg%AAo%AA/', '/%AAd%AAj%AAa%AAn%AAg%AAo%AA/'),
('/%E2%99%A5%E2%E2%99%A5/', '/♥%E2♥/'),
('/%E2%99%A5%E2%99%E2%99%A5/', '/♥%E2%99♥/'),
('/%E2%E2%99%A5%E2%99%A5%99/', '/%E2♥♥%99/'),
('/%E2%99%A5%E2%99%A5/?utf8=%9C%93%E2%9C%93%9C%93', '/♥♥/?utf8=%9C%93✓%9C%93'),
]
for uri, iri in cases:
self.assertEqual(iri_to_uri(uri_to_iri(uri)), uri)
self.assertEqual(uri_to_iri(iri_to_uri(iri)), iri)
def test_escape_uri_path(self):
self.assertEqual(
escape_uri_path('/;some/=awful/?path/:with/@lots/&of/+awful/chars'),
'/%3Bsome/%3Dawful/%3Fpath/:with/@lots/&of/+awful/chars'
)
self.assertEqual(escape_uri_path('/foo#bar'), '/foo%23bar')
self.assertEqual(escape_uri_path('/foo?bar'), '/foo%3Fbar')
| bsd-3-clause |
colejohnson66/distorm | disOps/x86header.py | 29 | 5884 | #
# x86header.py
#
# Copyright (C) 2009 Gil Dabah, http://ragestorm.net/disops/
#
class OperandType:
""" Types of possible operands in an opcode.
Refer to the diStorm's documentation or diStorm's instructions.h
for more explanation about every one of them. """
(NONE,
IMM8,
IMM16,
IMM_FULL,
IMM32,
SEIMM8,
IMM16_1, # NEW
IMM8_1, # NEW
IMM8_2, # NEW
REG8,
REG16,
REG_FULL,
REG32,
REG32_64,
FREG32_64_RM,
RM8,
RM16,
RM_FULL,
RM32_64,
RM16_32,
FPUM16,
FPUM32,
FPUM64,
FPUM80,
R32_M8,
R32_M16,
R32_64_M8,
R32_64_M16,
RFULL_M16,
CREG,
DREG,
SREG,
SEG,
ACC8,
ACC16,
ACC_FULL,
ACC_FULL_NOT64,
MEM16_FULL,
PTR16_FULL,
MEM16_3264,
RELCB,
RELC_FULL,
MEM,
MEM_OPT, # NEW
MEM32,
MEM32_64, # NEW
MEM64,
MEM128,
MEM64_128,
MOFFS8,
MOFFS_FULL,
CONST1,
REGCL,
IB_RB,
IB_R_FULL,
REGI_ESI,
REGI_EDI,
REGI_EBXAL,
REGI_EAX,
REGDX,
REGECX,
FPU_SI,
FPU_SSI,
FPU_SIS,
MM,
MM_RM,
MM32,
MM64,
XMM,
XMM_RM,
XMM16,
XMM32,
XMM64,
XMM128,
REGXMM0,
# Below new for AVX:
RM32,
REG32_64_M8,
REG32_64_M16,
WREG32_64,
WRM32_64,
WXMM32_64,
VXMM,
XMM_IMM,
YXMM,
YXMM_IMM,
YMM,
YMM256,
VYMM,
VYXMM,
YXMM64_256,
YXMM128_256,
LXMM64_128,
LMEM128_256) = range(93)
class OpcodeLength:
""" The length of the opcode in bytes.
Where a suffix of '3' means we have to read the REG field of the ModR/M byte (REG size is 3 bits).
Suffix of 'd' means it's a Divided instruction (see documentation),
tells the disassembler to read the REG field or the whole next byte.
OL_33 and OL_4 are used in raw opcode bytes, they include the mandatory prefix,
therefore when they are defined in the instruction tables, the mandatory prefix table is added,
and they become OL_23 and OL_3 correspondingly. There is no effective opcode which is more than 3 bytes. """
(OL_1, # 0
OL_13, # 1
OL_1d, # 2 - Can be prefixed (only by WAIT/9b)
OL_2, # 3 - Can be prefixed
OL_23, # 4 - Can be prefixed
OL_2d, # 5
OL_3, # 6 - Can be prefixed
OL_33, # 7 - Internal only
OL_4 # 8 - Internal only
) = range(9)
""" Next-Opcode-Length dictionary is used in order to recursively build the instructions' tables dynamically.
It is used in such a way that it indicates how many more nested tables
we have to build and link starting from a given OL. """
NextOL = {OL_13: OL_1, OL_1d: OL_1, OL_2: OL_1, OL_23: OL_13,
OL_2d: OL_1d, OL_3: OL_2, OL_33: OL_23, OL_4: OL_3}
class InstFlag:
""" Instruction Flag contains all bit mask constants for describing an instruction.
You can bitwise-or the flags. See diStorm's documentation for more explanation.
The GEN_BLOCK is a special flag, it is used in the tables generator only;
See GenBlock class inside x86db.py. """
FLAGS_EX_START_INDEX = 32
INST_FLAGS_NONE = 0
(MODRM_REQUIRED, # 0
NOT_DIVIDED, # 1
_16BITS, # 2
_32BITS, # 3
PRE_LOCK, # 4
PRE_REPNZ, # 5
PRE_REP, # 6
PRE_CS, # 7
PRE_SS, # 8
PRE_DS, # 9
PRE_ES, # 10
PRE_FS, # 11
PRE_GS, # 12
PRE_OP_SIZE, # 13
PRE_ADDR_SIZE, # 14
NATIVE, # 15
USE_EXMNEMONIC, # 16
USE_OP3, # 17
USE_OP4, # 18
MNEMONIC_MODRM_BASED, # 19
MODRR_REQUIRED, # 20
_3DNOW_FETCH, # 21
PSEUDO_OPCODE, # 22
INVALID_64BITS, # 23
_64BITS, # 24
PRE_REX, # 25
USE_EXMNEMONIC2, # 26
_64BITS_FETCH, # 27
FORCE_REG0, # 28
PRE_VEX, # 29
MODRM_INCLUDED, # 30
DST_WR, # 31
VEX_L, # 32 From here on: flagsEx.
VEX_W, # 33
MNEMONIC_VEXW_BASED, # 34
MNEMONIC_VEXL_BASED, # 35
FORCE_VEXL, # 36
MODRR_BASED, # 37
VEX_V_UNUSED, # 38
GEN_BLOCK, # 39 From here on: internal to disOps.
EXPORTED # 40
) = [1 << i for i in xrange(41)]
# Nodes are extended if they have any of the following flags:
EXTENDED = (PRE_VEX | USE_EXMNEMONIC | USE_EXMNEMONIC2 | USE_OP3 | USE_OP4)
SEGMENTS = (PRE_CS | PRE_SS | PRE_DS | PRE_ES | PRE_FS | PRE_FS)
class ISetClass:
""" Instruction-Set-Class indicates to which set the instruction belongs.
These types are taken from the documentation of Intel/AMD. """
(INTEGER,
FPU,
P6,
MMX,
SSE,
SSE2,
SSE3,
SSSE3,
SSE4_1,
SSE4_2,
SSE4_A,
_3DNOW,
_3DNOWEXT,
VMX,
SVM,
AVX,
FMA,
CLMUL,
AES) = range(1, 20)
class FlowControl:
""" The flow control instruction will be flagged in the lo nibble of the 'meta' field in _InstInfo of diStorm.
They are used to distinguish between flow control instructions (such as: ret, call, jmp, jz, etc) to normal ones. """
(CALL,
RET,
SYS,
UNC_BRANCH,
CND_BRANCH,
INT,
CMOV) = range(1, 8)
class NodeType:
""" A node can really be an object holder for an instruction-info object or
another table (list) with a different size.
GROUP - 8 entries in the table
FULL - 256 entries in the table.
Divided - 72 entries in the table (ranges: 0x0-0x7, 0xc0-0xff).
Prefixed - 12 entries in the table (none, 0x66, 0xf2, 0xf3). """
(NONE, # 0
INFO, # 1
INFOEX, # 2
LIST_GROUP, # 3
LIST_FULL, # 4
LIST_DIVIDED, # 5
LIST_PREFIXED # 6
) = range(0, 7)
class CPUFlags:
""" Specifies all the flags that the x86/x64 CPU supports, in a special compact order. """
(CF, # 0
IF, # 1
PF, # 2
DF, # 3
AF, # 4
OF, # 5
ZF, # 6
SF # 7
) = [1 << i for i in xrange(8)]
| gpl-3.0 |
chirilo/mozillians | vendor-local/src/mimeparse/setup.py | 43 | 1807 | # -*- coding: utf-8 -*-
#old way
from distutils.core import setup
#new way
#from setuptools import setup, find_packages
setup(name='mimeparse',
version='0.1.3',
description='A module provides basic functions for parsing mime-type names and matching them against a list of media-ranges.',
long_description="""
This module provides basic functions for handling mime-types. It can handle
matching mime-types against a list of media-ranges. See section 14.1 of
the HTTP specification [RFC 2616] for a complete explanation.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
Contents:
- parse_mime_type(): Parses a mime-type into its component parts.
- parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q' quality parameter.
- quality(): Determines the quality ('q') of a mime-type when compared against a list of media-ranges.
- quality_parsed(): Just like quality() except the second parameter must be pre-parsed.
- best_match(): Choose the mime-type with the highest quality ('q') from a list of candidates.
""",
classifiers=[
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='mime-type',
author='Joe Gregorio',
author_email='[email protected]',
maintainer='Joe Gregorio',
maintainer_email='[email protected]',
url='http://code.google.com/p/mimeparse/',
license='MIT',
py_modules=['mimeparse'],
zip_safe=True,
)
| bsd-3-clause |
pimier15/PyGUI | Kivy/Kivy/Bk_Interractive/Kivy-Interractive application/sample/Chapter_04_code/python3/08 - Behaviours - enhancing widget functionality/comicwidgets.py | 22 | 1999 | # File name: comicwidgets.py
import kivy
kivy.require('1.9.0')
from kivy.uix.scatter import Scatter
from kivy.graphics import Line
class DraggableWidget(Scatter):
def __init__(self, **kwargs):
self.selected = None
self.touched = False
super(DraggableWidget, self).__init__(**kwargs)
def on_touch_down(self, touch):
if self.collide_point(touch.x, touch.y):
self.touched = True
self.select()
super(DraggableWidget, self).on_touch_down(touch)
return True
return super(DraggableWidget, self).on_touch_down(touch)
def select(self):
if not self.selected:
self.ix = self.center_x
self.iy = self.center_y
with self.canvas:
self.selected = Line(rectangle=(0,0,self.width,self.height), dash_offset=2)
def on_pos(self, instance, value):
if self.selected and self.touched:
go = self.parent.general_options
go.translation = (self.center_x- self.ix, self.center_y - self.iy)
self.ix = self.center_x
self.iy = self.center_y
def on_rotation(self, instance, value):
if self.selected and self.touched:
go = self.parent.general_options
go.rotation = value
def on_scale(self, instance, value):
if self.selected and self.touched:
go = self.parent.general_options
go.scale = value
def translate(self, x, y):
self.center_x = self.ix = self.ix + x
self.center_y = self.iy = self.iy + y
def on_touch_up(self, touch):
self.touched = False
if self.selected:
if not self.parent.general_options.group_mode:
self.unselect()
return super(DraggableWidget, self).on_touch_up(touch)
def unselect(self):
if self.selected:
self.canvas.remove(self.selected)
self.selected = None
class StickMan(DraggableWidget):
pass
| mit |
jamiefolsom/edx-platform | common/lib/xmodule/xmodule/imageannotation_module.py | 107 | 7163 | """
Module for Image annotations using annotator.
"""
from lxml import etree
from pkg_resources import resource_string
from xmodule.x_module import XModule
from xmodule.raw_module import RawDescriptor
from xblock.core import Scope, String
from xmodule.annotator_mixin import get_instructions, html_to_text
from xmodule.annotator_token import retrieve_token
from xblock.fragment import Fragment
import textwrap
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
class AnnotatableFields(object):
""" Fields for `ImageModule` and `ImageDescriptor`. """
data = String(
help=_("XML data for the annotation"),
scope=Scope.content,
default=textwrap.dedent("""\
<annotatable>
<instructions>
<p>
Add the instructions to the assignment here.
</p>
</instructions>
<p>
Lorem ipsum dolor sit amet, at amet animal petentium nec. Id augue nemore postulant mea. Ex eam dicant noluisse expetenda, alia admodum abhorreant qui et. An ceteros expetenda mea, tale natum ipsum quo no, ut pro paulo alienum noluisse.
</p>
<json>
navigatorSizeRatio: 0.25,
wrapHorizontal: false,
showNavigator: true,
navigatorPosition: "BOTTOM_LEFT",
showNavigationControl: true,
tileSources: [{"profile": "http://library.stanford.edu/iiif/image-api/1.1/compliance.html#level2", "scale_factors": [1, 2, 4, 8, 16, 32, 64], "tile_height": 1024, "height": 3466, "width": 113793, "tile_width": 1024, "qualities": ["native", "bitonal", "grey", "color"], "formats": ["jpg", "png", "gif"], "@context": "http://library.stanford.edu/iiif/image-api/1.1/context.json", "@id": "http://54.187.32.48/loris/suzhou_orig.jp2"}],
</json>
</annotatable>
"""))
display_name = String(
display_name=_("Display Name"),
help=_("Display name for this module"),
scope=Scope.settings,
default=_('Image Annotation'),
)
instructor_tags = String(
display_name=_("Tags for Assignments"),
help=_("Add tags that automatically highlight in a certain color using the comma-separated form, i.e. imagery:red,parallelism:blue"),
scope=Scope.settings,
default='professor:green,teachingAssistant:blue',
)
annotation_storage_url = String(
help=_("Location of Annotation backend"),
scope=Scope.settings,
default="http://your_annotation_storage.com",
display_name=_("Url for Annotation Storage")
)
annotation_token_secret = String(
help=_("Secret string for annotation storage"),
scope=Scope.settings,
default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
display_name=_("Secret Token String for Annotation")
)
default_tab = String(
display_name=_("Default Annotations Tab"),
help=_("Select which tab will be the default in the annotations table: myNotes, Instructor, or Public."),
scope=Scope.settings,
default="myNotes",
)
# currently only supports one instructor, will build functionality for multiple later
instructor_email = String(
display_name=_("Email for 'Instructor' Annotations"),
help=_("Email of the user that will be attached to all annotations that will be found in 'Instructor' tab."),
scope=Scope.settings,
default="",
)
annotation_mode = String(
display_name=_("Mode for Annotation Tool"),
help=_("Type in number corresponding to following modes: 'instructor' or 'everyone'"),
scope=Scope.settings,
default="everyone",
)
class ImageAnnotationModule(AnnotatableFields, XModule):
'''Image Annotation Module'''
js = {
'coffee': [
resource_string(__name__, 'js/src/javascript_loader.coffee'),
resource_string(__name__, 'js/src/html/display.coffee'),
resource_string(__name__, 'js/src/annotatable/display.coffee'),
],
'js': [
resource_string(__name__, 'js/src/collapsible.js'),
]
}
css = {'scss': [resource_string(__name__, 'css/annotatable/display.scss')]}
icon_class = 'imageannotation'
def __init__(self, *args, **kwargs):
super(ImageAnnotationModule, self).__init__(*args, **kwargs)
xmltree = etree.fromstring(self.data)
self.instructions = self._extract_instructions(xmltree)
self.openseadragonjson = html_to_text(etree.tostring(xmltree.find('json'), encoding='unicode'))
self.user_email = ""
self.is_course_staff = False
if self.runtime.get_user_role() in ['instructor', 'staff']:
self.is_course_staff = True
if self.runtime.get_real_user is not None:
try:
self.user_email = self.runtime.get_real_user(self.runtime.anonymous_student_id).email
except Exception: # pylint: disable=broad-except
self.user_email = _("No email address found.")
def _extract_instructions(self, xmltree):
""" Removes <instructions> from the xmltree and returns them as a string, otherwise None. """
return get_instructions(xmltree)
def student_view(self, context):
""" Renders parameters to template. """
context = {
'display_name': self.display_name_with_default,
'instructions_html': self.instructions,
'token': retrieve_token(self.user_email, self.annotation_token_secret),
'tag': self.instructor_tags,
'openseadragonjson': self.openseadragonjson,
'annotation_storage': self.annotation_storage_url,
'default_tab': self.default_tab,
'instructor_email': self.instructor_email,
'annotation_mode': self.annotation_mode,
'is_course_staff': self.is_course_staff,
}
fragment = Fragment(self.system.render_template('imageannotation.html', context))
# TinyMCE already exists in Studio so we should not load the files again
# get_real_user always returns "None" in Studio since its runtimes contains no anonymous ids
if self.runtime.get_real_user is not None:
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/tinymce.full.min.js")
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/jquery.tinymce.min.js")
return fragment
class ImageAnnotationDescriptor(AnnotatableFields, RawDescriptor): # pylint: disable=abstract-method
''' Image annotation descriptor '''
module_class = ImageAnnotationModule
mako_template = "widgets/raw-edit.html"
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(ImageAnnotationDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([
ImageAnnotationDescriptor.annotation_storage_url,
ImageAnnotationDescriptor.annotation_token_secret,
])
return non_editable_fields
| agpl-3.0 |
ChanderG/scikit-learn | sklearn/metrics/tests/test_regression.py | 272 | 6066 | from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
| bsd-3-clause |
andreshp/Algorithms | MachineLearning/Clustering/SingleLinkedClustering/SLClusteringDistance.py | 1 | 12524 | #!/usr/bin/python
######################################################################
# Autor: Andrés Herrera Poyatos
# Universidad de Granada, March, 2015
# Single-Linkage Clustering Algorithm
#######################################################################
# This program read the values asociated to the vertices from a file.
# The distance between 2 vertices is calculated with those values.
# For example, if the values are a string of 0s and 1s with a fixed size,
# the hamming distance is the sum of the bits where both strings differ.
# (This is the distance considered in the code but it is easy to change).
#
# Given a positive integer k, the program executes the Single-Linkage
# Clustering Algorithm to find the k clusters that maximize:
# min d(x, y)
# x, y are in a different cluster
import sys
import time
from gmpy2 import popcount
#------------- MINHEAP IMPLEMENTATION --------------#
# Swap two components of an array
def swap(array, i, j):
copy = array[i]
array[i] = array[j]
array[j] = copy
array[i].index = i
array[j].index = j
# MinHeap Class.
# A heap is a representation of a complete binary tree as an array.
# The array has the Breadth-First Order of the nodes. Consecuently,
# the following equitities are true:
# leftChild(index) = 2*index+1
# rightChild(index) = 2*index+2
# parent(index) = (index-1) // 2
#
# A MinHeap is a heap where there is a total order relation and verifies the following property:
# "heap[i] >= heap[parent(i)] for all i in range(0, size())"
# analogously:
# "Each children is greater or equal than its parent."
#
# Consecuently, heap[0] is the minimum of the elements of the heap.
# A MinHeap supports the following operations:
# - Get the minimum in O(1) (return heap[0])
# - Insert an element in O(log n)
# - Delete an element in O(log n)
class MinHeap(object):
# Init method
def __init__(self):
self.heap = []
# Check if the Heap is empty
def empty(self):
return (not self.heap)
# Return the min of the Heap.
# Precondition: The Heap must be not empty.
def min(self):
return self.heap[0] # A MinHeap keeps the min in the first position.
# Size of the Heap
def size(self):
return len(self.heap)
# Insert Method
def insert(self, element):
element.index = len(self.heap)
self.heap.append(element)
self._repairUp(len(self.heap)-1)
# Insert the elements of an array
def insertArray(self, array):
for number in array:
self.insert(number)
# Delete an element from the Heap
# Precondition: The Heap must be not empty.
def delete(self, index):
swap(self.heap, index, len(self.heap)-1)
self.heap.pop()
self._repairDown(index)
# Delete min from the Heap.
# Precondition: The Heap must be not empty.
def deleteMin(self):
swap(self.heap, 0, len(self.heap)-1)
self.heap.pop()
self._repairDown(0)
# Change the value of an element and repair the MinHeap Structure.
def changeElement(self, index, value):
self.heap[index] = value
self.repairHeap(index)
# Execute HeapSort to the elements of the heap.
def heapSort(self):
sorted_array = []
while(not self.empty()):
sorted_array.append(self.min())
self.deleteMin()
return sorted_array
# Print Heap by levels
def printHeap(self):
elements_level = 1
print("Heap:")
for i in range(0, len(self.heap)):
if i == elements_level:
elements_level += elements_level+1; print()
print(self.heap[i], " ", end="")
print(); print()
# Check that it is a MinHeap.
# The invariant is checked.
def _checkHeap(self):
is_heap = True; fail = -1
for i in range(1, len(self.heap)):
if self.heap[i] < self.heap[(i-1) // 2]:
is_heap = False; fail = i; break
return is_heap, fail
# Repair the Min Heap invariant:
# Each parent key is less or equal than their children keys.
def _repairHeap(self, index):
self._repairUp(index)
self._repairDown(index)
# Go up in the Heap repairing its invariant
def _repairUp(self, index):
parent = (index-1) // 2
while index > 0:
if self.heap[index] < self.heap[parent]:
swap(self.heap, index, parent)
else: break
index = parent
parent = (index-1) // 2
# Go down in the Heap repairing its invariant
def _repairDown(self, index):
child = 2 * index + 1
while child < len(self.heap):
if child + 1 < len(self.heap) and self.heap[child] > self.heap[child+1]:
child += 1
if self.heap[index] > self.heap[child]:
swap(self.heap, child, index)
else: break
index = child
child = 2 * index +1
#------------- VERTEX IMPLEMENTATION --------------#
# Vertex Class.
# It keeps the vertex value and the cluster
# asociated with the vertex.
class Vertex(object):
# Contructor
def __init__(self, key, value):
self.key = key
self.value = value
self.cluster = Cluster(self)
self.edge = -1 # Used in the clustering algorithm
self.distance = float("inf") # Used in the clustering algorithm
# Overloading comparisons operators
def __lt__(self, other):
return (self.distance < other.distance)
def __le__(self, other):
return(self.distance <= other.distance)
def __gt__(self, other):
return(self.distance > other.distance)
def __ge__(self, other):
return(self.distance >= other.distance)
# Hash function
def __hash__(self):
return self.key.__hash__()
# Distance between two vertices.
# In this case, it is the Hamming Distance.
def hammingDistance(self, vertex):
return popcount(self.value ^ vertex.value)
#------------- CLUSTER IMPLEMENTATION --------------#
# Union - Find Data Structure
# Each vertex has an asociated cluster. We can:
# - Get the asociated cluster of a vertex in O(1)
# - Join two clusters of size r and s in O(min{r,s})
class Cluster(object):
# Number of active clusters (class attribute)
n_clusters = 0
# Initializes a cluster
def __init__(self, vertex):
self.index = Cluster.n_clusters
Cluster.n_clusters += 1
self.members = [vertex]
# Adds a vertex to the cluster
def add(self, vertex):
self.members.append(vertex)
vertex.cluster = self
# Size of the cluster
def size(self):
return len(self.members)
# Get the cluster of a given vertex. It is a class method
def getSet(vertex):
return vertex.cluster
# Returns True if both nodes are in the same cluster.
# Returns False otherwise.
def sameCluster(node1, node2):
return node1.cluster is node2.cluster
# Class method to join nodes' cluster in just one
def join(node1, node2):
node1.cluster._join(node2.cluster)
# Both clusters are joined in one of them
def _join(self, other):
if self.size() < other.size():
self.__join(other)
else:
other.__join(self)
# Private method to accomplish the join
def __join(self, other):
for vertex in self.members:
other.add(vertex)
self.members = []
Cluster.n_clusters -= 1
# Hash function
def __hash__(self):
return self.index.__hash__()
#------------- SINGLE-LINKAGE CLUSTERING --------------#
# Single-Linkage Clustering Algorithm
# Parameters:
# - edges :
# - k : Number of clusters
def SLClustering(vertices, k):
# For each vertex, we find the one to which there is less distance (and not used yet).
for i in range(1, len(vertices)-1):
for j in range(i+1, len(vertices)):
new_distance = vertices[i].hammingDistance(vertices[j])
if new_distance < vertices[i].distance:
vertices[i].distance = new_distance
vertices[i].edge = j
for i in range(2, len(vertices)):
for j in range(1, i):
if vertices[j].edge != i:
new_distance = vertices[i].hammingDistance(vertices[j])
if new_distance < vertices[i].distance:
vertices[i].distance = new_distance
vertices[i].edge = j
# Build a min heap with all the vertices:
heap = MinHeap()
for i in range(1, len(vertices)):
heap.insert(vertices[i])
# Add the max_edges times the edge between separated clusters
# that has the minimum cost and join the respective clusters.
max_edges = len(vertices) - k - 1
added_edges = 0
while added_edges < max_edges:
# Next vertex of the heap
next_vertex = heap.min(); heap.deleteMin()
# If it has a valid edge (an edge between two different clusters)
# join those clusters and count it.
if not Cluster.sameCluster(next_vertex, vertices[next_vertex.edge]):
Cluster.join(next_vertex, vertices[next_vertex.edge])
added_edges += 1
# Put the vertex again in the heap with the edge with minimum cost
# from those which go to a different cluster.
next_vertex.distance = float("inf")
next_vertex.edge = -1
for j in range(1, len(vertices)):
if not Cluster.sameCluster(next_vertex, vertices[j]):
new_distance = next_vertex.hammingDistance(vertices[j])
if new_distance < next_vertex.distance:
next_vertex.distance = new_distance
next_vertex.edge = j
if next_vertex.distance < float("inf"):
heap.insert(next_vertex)
if added_edges % 10 == 0:
print("Completed: ", (added_edges / max_edges) * 100.0)
# Find the maximum spacing distance between k clusters
max_spacing = float("inf")
while Cluster.sameCluster(heap.min(), vertices[heap.min().edge]):
heap.deleteMin()
max_spacing = heap.min().distance
return max_spacing
# Read the vertices from a file.
# It initializes the vertices, the clusters (one per vertex)
# and return list with the vertices.lk
def readVertices(distances_file):
data = open(distances_file, "r")
# Build the vertices
first_line = data.readline()
num_vertices = int(first_line.split()[0])
num_bits = int(first_line.split()[1])
vertices = [None] * (num_vertices+1)
# Each line corresponds to a vertex
# It contains the value of the vertex, a string with num_bits bits of 0s and 1s,
# such as: 1 1 1 0 0 0 0 0 1 1 0 1 0 0 1 1 1 1 0 0 1 1 1 1.
# We represent it as an integer in base 2.
i = 1
for line in data:
vertices[i] = Vertex(i, int(line.replace(" ", ""), 2))
i += 1
return vertices
######################## MAIN ##########################
# See if arguments are correct
if len(sys.argv) < 3 or len(sys.argv) > 4:
print("Sintax: SLClusteringDistance.py <options> distances.txt k \n The option -n don't print the clusters.")
sys.exit()
print_clusters = True
if len(sys.argv) > 3:
if sys.argv[1] == "-n":
print_clusters = False
# Read the distances between the vertices and the value of k
try:
distances_file = sys.argv[1 if len(sys.argv) == 3 else 2]
vertices = readVertices(distances_file)
k = int(sys.argv[2 if len(sys.argv) == 3 else 3])
except IOError:
print("Error: The file", distances_file, "can\'t be read.")
sys.exit()
# Execute clustering algorithm and compute the time wasted
start_time = time.time()
try:
maximum_spacing_distance = SLClustering(vertices, k)
except RuntimeError as element:
print("Error:", element.args[0] , "is not a vertex.")
sys.exit()
print("--- %f seconds ---" % (time.time() - start_time))
# Print the result
print("Maximum Spacing of a", k, "-Clustering:", maximum_spacing_distance)
# If chosen, print the clusters
if print_clusters:
print("Clusters:")
clusters = set()
for j in range(1,len(vertices)):
clusters.add(vertices[j].cluster)
i = 1
for cluster in clusters:
print("Cluster", i, ":")
for vertex in cluster.members:
print(vertex.key, end=" ")
print()
i += 1
| gpl-2.0 |
IZSVenezie/VetEpiGIS-Stat | plugin/local_dialog.py | 1 | 5920 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'local_dialog_base.ui'
#
# Created: Sat Jan 7 15:11:01 2017
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(588, 551)
self.gridLayout_3 = QtGui.QGridLayout(Dialog)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.splitter = QtGui.QSplitter(Dialog)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.label = QtGui.QLabel(self.splitter)
self.label.setObjectName(_fromUtf8("label"))
self.comboBox = QtGui.QComboBox(self.splitter)
self.comboBox.setMinimumSize(QtCore.QSize(251, 0))
self.comboBox.setObjectName(_fromUtf8("comboBox"))
self.gridLayout_3.addWidget(self.splitter, 0, 0, 1, 6)
self.label_5 = QtGui.QLabel(Dialog)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout_3.addWidget(self.label_5, 1, 0, 1, 1)
self.comboBox_5 = QtGui.QComboBox(Dialog)
self.comboBox_5.setObjectName(_fromUtf8("comboBox_5"))
self.gridLayout_3.addWidget(self.comboBox_5, 1, 1, 1, 1)
self.lineEdit = QtGui.QLineEdit(Dialog)
self.lineEdit.setInputMethodHints(QtCore.Qt.ImhNone)
self.lineEdit.setInputMask(_fromUtf8(""))
self.lineEdit.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.gridLayout_3.addWidget(self.lineEdit, 1, 3, 1, 1)
self.comboBox_6 = QtGui.QComboBox(Dialog)
self.comboBox_6.setObjectName(_fromUtf8("comboBox_6"))
self.gridLayout_3.addWidget(self.comboBox_6, 1, 4, 1, 1)
spacerItem = QtGui.QSpacerItem(21, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_3.addItem(spacerItem, 1, 5, 1, 1)
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.comboBox_2 = QtGui.QComboBox(Dialog)
self.comboBox_2.setObjectName(_fromUtf8("comboBox_2"))
self.gridLayout.addWidget(self.comboBox_2, 0, 1, 1, 1)
self.label_2 = QtGui.QLabel(Dialog)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)
self.gridLayout_3.addLayout(self.gridLayout, 2, 0, 1, 2)
spacerItem1 = QtGui.QSpacerItem(39, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_3.addItem(spacerItem1, 2, 2, 1, 1)
self.label_4 = QtGui.QLabel(Dialog)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout_3.addWidget(self.label_4, 2, 3, 1, 1)
self.comboBox_4 = QtGui.QComboBox(Dialog)
self.comboBox_4.setObjectName(_fromUtf8("comboBox_4"))
self.gridLayout_3.addWidget(self.comboBox_4, 2, 4, 1, 1)
self.gridLayout_2 = QtGui.QGridLayout()
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label_3 = QtGui.QLabel(Dialog)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_2.addWidget(self.label_3, 0, 0, 1, 1)
self.comboBox_3 = QtGui.QComboBox(Dialog)
self.comboBox_3.setObjectName(_fromUtf8("comboBox_3"))
self.gridLayout_2.addWidget(self.comboBox_3, 0, 1, 1, 1)
self.gridLayout_3.addLayout(self.gridLayout_2, 3, 0, 1, 2)
spacerItem2 = QtGui.QSpacerItem(233, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_3.addItem(spacerItem2, 3, 3, 1, 2)
self.toolButton = QtGui.QToolButton(Dialog)
self.toolButton.setIconSize(QtCore.QSize(30, 30))
self.toolButton.setObjectName(_fromUtf8("toolButton"))
self.gridLayout_3.addWidget(self.toolButton, 3, 5, 1, 1)
self.tableView = QtGui.QTableView(Dialog)
self.tableView.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.tableView.setObjectName(_fromUtf8("tableView"))
self.gridLayout_3.addWidget(self.tableView, 4, 0, 1, 6)
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Close|QtGui.QDialogButtonBox.Save)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayout_3.addWidget(self.buttonBox, 5, 3, 1, 3)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Dialog", None))
self.label.setText(_translate("Dialog", "Data field:", None))
self.label_5.setText(_translate("Dialog", "Neighbouring method:", None))
self.label_2.setText(_translate("Dialog", "Weighting scheme:", None))
self.label_4.setText(_translate("Dialog", "Variance assumption:", None))
self.label_3.setText(_translate("Dialog", "Alternative hypothesis:", None))
self.toolButton.setText(_translate("Dialog", "...", None))
| gpl-3.0 |
luomiao/docker-volume-vsphere | esx_service/utils/auth_api.py | 2 | 39067 | # Copyright 2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
"""APIs for tenant management.
Note that externally used functions are named _function() and internally are function(),
which contradicts python module function naming. It will be fixed later (issue #1153) """
import auth
import auth_data_const
import convert
import auth_data
import vmdk_utils
import error_code
import log_config
import logging
from error_code import ErrorCode
from error_code import ErrorInfo
from error_code import generate_error_info
from error_code import error_code_to_message
import re
# regex for valid tenant name
VALID_TENANT_NAME_REGEXP = "[a-zA-Z0-9_][a-zA-Z0-9_.-]*"
VALID_TENANT_NAMES = 'rename of vmgroups other than _DEFAULT'
global valid_tenant_name_reg
valid_tenant_name_reg = re.compile("^" + VALID_TENANT_NAME_REGEXP + "$")
def get_auth_mgr_object():
""" Get a auth_mgr object which needed to connect to auth DB. """
# auth.get_auth_mgr will not throw an Exception
# it will return err_msg when it fails
err_msg, auth_mgr = auth.get_auth_mgr()
if err_msg:
error_info = error_code.generate_error_info(ErrorCode.INTERNAL_ERROR, err_msg)
return error_info, None
return None, auth_mgr
def only_when_configured(ret_obj=False):
"""
Decorator to check if the DB was already inited.
Serves functions which return ErrInfo (when ret_obj=False), and the ones
returning (ErrInfo, None) when ret_obj=True.
Makes sure the decorated function is called only when DB is connected,
otherwise a proper ErrInfo is returned.
"""
def real_decorator(func):
'The actual logic for decorator.'
def not_inited():
'Returns err code for not initialized'
return generate_error_info(ErrorCode.INIT_NEEDED)
def internal_error():
'Returns error code for internal errors'
return generate_error_info(ErrorCode.INTERNAL_ERROR,
"@only_when_configured: %s" % func.__name__)
def check_config(*args, **kwargs):
'call func() if DB is configured and issue an error if not.'
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
if ret_obj:
return internal_error(), None
else:
return internal_error()
if auth_mgr.allow_all_access():
if ret_obj:
return not_inited(), None
else:
return not_inited()
# No error, we can go ahead and call the function
return func(*args, **kwargs)
# this function will actually do the checks for connection
return check_config
# @only_when_configured just handles the ret_obj param and returns real_decorator to be called
return real_decorator
def get_tenant_from_db(name):
"""
Get a tenant object with given name
Return value:
-- error_code: return None on success or error info on failure
-- tenant: return tenant object on success or None on failure
"""
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info, None
logging.debug("auth_api.get_tenant_from_db name=%s", name)
error_msg, tenant = auth_mgr.get_tenant(name)
if error_msg:
error_info = generate_error_info(error_msg)
return error_info, None
return None, tenant
def get_tenant_name(tenant_uuid):
"""
Get tenant name with given tenant_uuid
Return value:
-- error_info: return None on success or error info on failure
-- tenant_name: return tenant name on success or None on failure
"""
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info, None
error_msg, tenant_name = auth_mgr.get_tenant_name(tenant_uuid)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info, tenant_name
def check_tenant_exist(name):
""" Check tenant with @param name exist or not
Return value:
-- Return None if tenant with given name does not exist
-- Return error_info on failure or the tenant with given name exists
"""
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info
error_msg, exist_tenant = auth_mgr.get_tenant(name)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
if exist_tenant:
error_info = generate_error_info(ErrorCode.TENANT_ALREADY_EXIST, name)
return error_info
def create_tenant_in_db(name, description, vms, privileges):
"""
Create a tenant object in DB
Return value:
-- error_info: return None on success or error info on failure
-- tenant: return tenant object on success or None on failure
"""
error_info = check_tenant_exist(name)
if error_info:
return error_info, None
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info, None
error_msg, tenant = auth_mgr.create_tenant(name=name,
description=description,
vms=vms,
privileges=privileges)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info, tenant
def get_tenant_list_from_db(name=None):
"""
List all tenants or tenant with the name specified
Params:
-- name: if "name" is specified, return a list of one tenant with name specified
if "name" is not specified, return a list of all tenants
Return value:
-- error_info: return None on success or error info on failure
-- tenant_list: return a list of tenant objects on success or None on failure
"""
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info, None
if not name:
error_msg, tenant_list = auth_mgr.list_tenants()
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
else:
error_msg, tenant = auth_mgr.get_tenant(name)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
if error_msg or not tenant:
tenant_list = []
else:
tenant_list = [tenant]
return error_info, tenant_list
def generate_tuple_from_vm_list(vm_list):
""" Generate a list of (vm_uuid, vm_name) pair """
if not vm_list:
return None, [], []
vms = []
error_msg = ""
not_found_vms = []
for vm_name in vm_list:
vm_uuid = vmdk_utils.get_vm_uuid_by_name(vm_name)
if not vm_uuid:
err = "Cannot find vm_uuid for vm {0} ".format(vm_name)
if err:
error_msg = error_msg + err
not_found_vms.append(vm_name)
vms.append((vm_uuid, vm_name))
if error_msg:
return error_msg, vms, not_found_vms
return None, vms, not_found_vms
def default_privileges():
""" Return a privilege object with default value """
privileges = [{'datastore': '',
'allow_create': 0,
'max_volume_size': 0,
'usage_quota': 0}]
return privileges
def set_privileges(allow_create, privileges, value):
""" set or unset allow_create privileges based on input param """
logging.debug("set_privileges: allow_create=%s, privileges=%s, value=%d", allow_create,
privileges, value)
privileges[auth_data_const.COL_ALLOW_CREATE] = value
return privileges
def validate_string_to_bool(allow_create):
"""
Validating case insensitive true, false strings
Return boolean value of the arguement if it is valid,
else return original value. Also return status if arguement is valid or not
"""
is_valid = True
# If already bool, return
if type(allow_create) is bool:
return allow_create, is_valid
allow_create = str(allow_create).lower()
if allow_create == "true":
return True, is_valid
elif allow_create == "false":
return False, is_valid
else:
is_valid = False
return allow_create, is_valid
def generate_privileges(datastore_url, allow_create, volume_maxsize_in_MB, volume_totalsize_in_MB):
""" Generate privileges based on input params """
logging.debug("generate_privileges: datastore_url=%s allow_create=%s"
"volume_maxsize_in_MB=%s volume_totalsize_in_MB=%s",
datastore_url, allow_create, volume_maxsize_in_MB, volume_totalsize_in_MB)
privileges = default_privileges()[0]
privileges[auth_data_const.COL_DATASTORE_URL] = datastore_url
if allow_create is True:
privileges = set_privileges(allow_create, privileges, 1)
if volume_maxsize_in_MB:
privileges[auth_data_const.COL_MAX_VOLUME_SIZE] = volume_maxsize_in_MB
if volume_totalsize_in_MB:
privileges[auth_data_const.COL_USAGE_QUOTA] = volume_totalsize_in_MB
logging.debug("generate_privileges: privileges=%s", privileges)
return privileges
def modify_privileges(privileges, allow_create, volume_maxsize_in_MB, volume_totalsize_in_MB):
""" Modify privileges based on input params """
logging.debug("modify_privileges: allow_create=%s, volume_maxsize_in_MB=%s, volume_totalsize_in_MB=%s",
allow_create, volume_maxsize_in_MB, volume_totalsize_in_MB)
# If None, don't change the privilege
# If not None, change accordingly
if allow_create is not None:
# allow_create has been validated. It is either True or False
if allow_create is True:
privileges = set_privileges(allow_create, privileges, 1)
else:
privileges = set_privileges(allow_create, privileges, 0)
if volume_maxsize_in_MB:
privileges[auth_data_const.COL_MAX_VOLUME_SIZE] = volume_maxsize_in_MB
if volume_totalsize_in_MB:
privileges[auth_data_const.COL_USAGE_QUOTA] = volume_totalsize_in_MB
return privileges
def generate_privileges_dict(privileges):
# privileges is a list which is read from auth DB
# it has the following format
# (tenant_uuid, datastore_url, allow_create, max_volume_size, usage_quota)
privileges_dict = {}
privileges_dict[auth_data_const.COL_DATASTORE_URL] = privileges.datastore_url
privileges_dict[auth_data_const.COL_ALLOW_CREATE] = privileges.allow_create
privileges_dict[auth_data_const.COL_MAX_VOLUME_SIZE] = privileges.max_volume_size
privileges_dict[auth_data_const.COL_USAGE_QUOTA] = privileges.usage_quota
return privileges_dict
def get_default_datastore_url(name):
"""
Get default_datastore url for given tenant
Return value:
--- error_info: return None on success or error info on failure
--- default_datastore: return name of default_datastore on success or None on failure
"""
logging.debug("auth_api.get_default_datastore_url: for tenant with name=%s", name)
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info, None
if auth_mgr.allow_all_access():
if name == auth_data_const.DEFAULT_TENANT:
return None, auth_data_const.VM_DS_URL
else:
return generate_error_info(ErrorCode.INIT_NEEDED), None
error_info, tenant = get_tenant_from_db(name)
if error_info:
return error_info, None
if not tenant:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EXIST, name)
return error_info, None
# if default_datastore is not set for this tenant, default_datastore will be None
error_msg, default_datastore_url = tenant.get_default_datastore(auth_mgr.conn)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
logging.debug("returning url %s", default_datastore_url)
return error_info, default_datastore_url
def is_tenant_name_valid(name):
""" Check given tenant name is valid or not """
if valid_tenant_name_reg.match(name):
return True
else:
return False
def is_vm_duplicate(vm_list):
"""
Check if vm names in vm_list contain duplicates
"""
if len(vm_list) != len(set(vm_list)):
error_info = error_code.generate_error_info(ErrorCode.VM_DUPLICATE, vm_list)
logging.error(error_info.msg)
return error_info
return None
def check_default_datastore(datastore_name):
"""
Check datastore with given name is a valid value for default_datastore
Returns None for success and err message for errors
"""
# The valid default_datastore name are:
# named datastore existing on the host
# hard coded datastore name "_VM_DS"
# "_ALL_DS" is not a valid value to set as "default_datastore"
if datastore_name == auth_data_const.VM_DS:
return None
if datastore_name == auth_data_const.ALL_DS:
return generate_error_info(ErrorCode.DS_DEFAULT_CANNOT_USE_ALL_DS)
if not vmdk_utils.validate_datastore(datastore_name):
error_info = generate_error_info(ErrorCode.DS_NOT_EXIST, datastore_name)
return error_info
return None
def set_default_ds(tenant, default_datastore, check_existing):
"""
Set "default_datastore" for given tenant and create a full access privilege
to "default_datastore" if entry does not exist
Need to check whether the default_datastore to be set is the same as exiting
default_datastore when @Param check_existing is set to True
"""
# @Param tenant is a DockerVolumeTenant object
logging.debug("set_default_ds: tenant_name=%s default_datastore=%s check_existing=%d",
tenant.name, default_datastore, check_existing)
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info
datastore_url = vmdk_utils.get_datastore_url(default_datastore)
# datastore_url will be set to "None" by "vmdk_utils.get_datastore_url" is "default_datastore"
# is not a valid datastore
if datastore_url is None:
error_info = generate_error_info(ErrorCode.DS_DEFAULT_NAME_INVALID, default_datastore)
return error_info
existing_default_ds_url = None
if check_existing:
error_msg, existing_default_ds_url = tenant.get_default_datastore(auth_mgr.conn)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
# the "default_datastore" to be set is the same as existing "default_datastore" for this tenant
if datastore_url == existing_default_ds_url:
return None
error_msg = tenant.set_default_datastore(auth_mgr.conn, datastore_url)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
existing_default_ds = vmdk_utils.get_datastore_name(existing_default_ds_url) if existing_default_ds_url is not None else None
logging.info("Existing default_datastore %s is being changed to %s for tenant %s", existing_default_ds,
default_datastore, tenant)
# create full access privilege to default_datastore
error_info = _tenant_access_add(name=tenant.name,
datastore=default_datastore,
allow_create=True)
# privilege to default_datastore already exist, no need to create
if error_info and error_info.code == ErrorCode.PRIVILEGE_ALREADY_EXIST:
logging.info(error_info.msg + " not overwriting the existing access privilege")
error_info = None
return error_info
@only_when_configured(ret_obj=True)
def _tenant_create(name, default_datastore, description="", vm_list=None, privileges=None):
""" API to create a tenant . Returns (ErrInfo, Tenant) """
logging.debug("_tenant_create: name=%s description=%s vm_list=%s privileges=%s default_ds=%s",
name, description, vm_list, privileges, default_datastore)
if not is_tenant_name_valid(name):
error_info = generate_error_info(ErrorCode.TENANT_NAME_INVALID, name, VALID_TENANT_NAME_REGEXP)
return error_info, None
# if param "description" is not set by caller, the default value is empty string
if not description:
description = ""
# VM list can be empty during tenant create. Validate only if it exists
vms = None
if vm_list:
error_info = is_vm_duplicate(vm_list)
if error_info:
return error_info, None
error_msg, vms, not_found_vms = generate_tuple_from_vm_list(vm_list)
if error_msg:
not_found_vm_list = ",".join(not_found_vms)
error_info = generate_error_info(ErrorCode.VM_NOT_FOUND, not_found_vm_list)
return error_info, None
error_info = vm_in_any_tenant(vms)
if error_info:
return error_info, None
error_info = vmdk_utils.check_volumes_mounted(vms)
if error_info:
error_info.msg = "Cannot add VM to vmgroup " + error_info.msg
logging.error(error_info.msg)
return error_info, None
logging.debug("_tenant_create: vms=%s", vms)
error_info = check_default_datastore(default_datastore)
if error_info:
return error_info, None
error_info, tenant = create_tenant_in_db(
name=name,
description=description,
vms=vms,
privileges=privileges)
if error_info:
return error_info, None
error_info = set_default_ds(tenant=tenant,
default_datastore=default_datastore,
check_existing=False)
if error_info:
return error_info, None
return None, tenant
@only_when_configured()
def _tenant_update(name, new_name=None, description=None, default_datastore=None):
""" API to update a tenant """
logging.debug("_tenant_update: name=%s, new_name=%s, descrption=%s, default_datastore=%s",
name, new_name, description, default_datastore)
error_info, tenant = get_tenant_from_db(name)
if error_info:
return error_info
if not tenant:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EXIST, name)
return error_info
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info
if default_datastore:
error_info = check_default_datastore(default_datastore)
if error_info:
return error_info
error_info = set_default_ds(tenant=tenant,
default_datastore=default_datastore,
check_existing=True)
if error_info:
return error_info
if new_name:
if name == auth_data_const.DEFAULT_TENANT:
error_info = generate_error_info(ErrorCode.TENANT_NAME_INVALID, name, VALID_TENANT_NAMES)
return error_info
# check whether tenant with new_name already exist or not
error_info = check_tenant_exist(new_name)
if error_info:
return error_info
if not is_tenant_name_valid(name):
error_info = generate_error_info(ErrorCode.TENANT_NAME_INVALID, name, VALID_TENANT_NAME_REGEXP)
return error_info
error_msg = tenant.set_name(auth_mgr.conn, name, new_name)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
if description:
error_msg = tenant.set_description(auth_mgr.conn, description)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
return None
@only_when_configured()
def _tenant_rm(name, remove_volumes=False):
""" API to remove a tenant """
logging.debug("_tenant_rm: name=%s remove_volumes=%s", name, remove_volumes)
error_info, tenant = get_tenant_from_db(name)
if error_info:
return error_info
if not tenant:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EXIST, name)
return error_info
if tenant.vms:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EMPTY, name)
logging.error(error_info.msg)
return error_info
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info
error_msg = auth_mgr.remove_tenant(tenant.id, remove_volumes)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
def _tenant_ls(name=None):
""" API to list all tenants """
logging.debug("_tenant_ls: name=%s", name)
error_info, tenant_list = get_tenant_list_from_db(name)
return error_info, tenant_list
def vm_already_in_tenant(name, vms):
"""
Check whether any vm in @param "vms" already exists in tenant @param "name"
"""
error_info, existing_vms = _tenant_vm_ls(name)
if error_info:
return error_info
for vm_id, vm_name in vms:
if vm_id in existing_vms:
error_info = generate_error_info(ErrorCode.VM_ALREADY_IN_TENANT,
vm_name, name)
logging.error(error_info.msg)
return error_info
return None
def vm_not_exist(name, vms):
"""
Check whether any vm in @param "vms" does not exist in tenant @param "name"
"""
error_info, existing_vms = _tenant_vm_ls(name)
if error_info:
return error_info
existing_vm_uuids = [vm_id for (vm_id, _) in existing_vms]
for vm_id, vm_name in vms:
if not vm_id in existing_vm_uuids:
error_info = error_code.generate_error_info(ErrorCode.VM_NOT_IN_TENANT, vm_name, name)
logging.error(error_info.msg)
return error_info
return None
def vm_in_any_tenant(vms):
"""
Check if any vm in @param "vms" is a part of another tenant
"""
error_info, tenant_list = get_tenant_list_from_db()
if error_info:
return error_info
for tenant in tenant_list:
for vm_id, vm_name in vms:
if vm_id in dict(tenant.vms):
error_info = error_code.generate_error_info(ErrorCode.VM_IN_ANOTHER_TENANT,
vm_name, tenant.name)
logging.error(error_info.msg)
return error_info
return None
def named_tenant(func):
"""
Decorator to check whether the function is called by a named tenant.
Return error 'feature is not supported' if called by _DEFAULT tenant
"""
def not_supported():
return generate_error_info(ErrorCode.FEATURE_NOT_SUPPORTED, auth_data_const.DEFAULT_TENANT)
def check_name(name, vm_list):
if name == auth_data_const.DEFAULT_TENANT:
return not_supported()
return func(name, vm_list)
return check_name
@only_when_configured()
@named_tenant
def _tenant_vm_add(name, vm_list):
""" API to add vms for a tenant """
logging.debug("_tenant_vm_add: name=%s vm_list=%s", name, vm_list)
error_info, tenant = get_tenant_from_db(name)
if error_info:
return error_info
if not tenant:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EXIST, name)
return error_info
if not vm_list:
error_info = generate_error_info(ErrorCode.VM_LIST_EMPTY)
return error_info
error_info = is_vm_duplicate(vm_list)
if error_info:
return error_info
error_msg, vms, not_found_vms = generate_tuple_from_vm_list(vm_list)
if error_msg:
not_found_vm_list = ",".join(not_found_vms)
error_info = generate_error_info(ErrorCode.VM_NOT_FOUND, not_found_vm_list)
return error_info
error_info = vm_already_in_tenant(name, vms)
if error_info:
return error_info
error_info = vm_in_any_tenant(vms)
if error_info:
return error_info
error_info = vmdk_utils.check_volumes_mounted(vms)
if error_info:
error_info.msg = "Cannot add VM to vmgroup " + error_info.msg
logging.error(error_info.msg)
return error_info
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info
logging.debug("_tenant_vm_add: vms=%s", vms)
error_msg = tenant.add_vms(auth_mgr.conn, vms)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
@only_when_configured()
@named_tenant
def _tenant_vm_rm(name, vm_list):
""" API to remove vms for a tenant """
logging.debug("_tenant_vm_rm: name=%s vm_list=%s", name, vm_list)
error_info, tenant = get_tenant_from_db(name)
if error_info:
return error_info
if not tenant:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EXIST, name)
return error_info
if not vm_list:
error_info = generate_error_info(ErrorCode.VM_LIST_EMPTY)
return error_info
error_info = is_vm_duplicate(vm_list)
if error_info:
return error_info
error_msg, vms, not_found_vms = generate_tuple_from_vm_list(vm_list)
if error_msg:
not_found_vm_list = ",".join(not_found_vms)
error_info = generate_error_info(ErrorCode.VM_NOT_FOUND, not_found_vm_list)
return error_info
# check if vms to be removed have any volumes mounted.
error_info = vmdk_utils.check_volumes_mounted(vms)
if error_info:
error_info.msg = "Cannot complete vmgroup vm rm. " + error_info.msg
logging.error(error_info.msg)
return error_info
logging.debug("_tenant_vm_rm: vms=%s", vms)
error_info = vm_not_exist(name, vms)
if error_info:
return error_info
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info
error_msg = tenant.remove_vms(auth_mgr.conn, vms)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
def _tenant_vm_ls(name):
""" API to get vms for a tenant """
logging.debug("_tenant_vm_ls: name=%s", name)
error_info, tenant = get_tenant_from_db(name)
if error_info:
return error_info, None
if not tenant:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EXIST, name)
return error_info, None
# tenant.vms is a list of vm_uuid of vms which belong to this tenant
return None, tenant.vms
@only_when_configured()
@named_tenant
def _tenant_vm_replace(name, vm_list):
""" API to replace vms for a tenant """
logging.debug("_tenant_vm_replace: name=%s vm_list=%s", name, vm_list)
error_info, tenant = get_tenant_from_db(name)
if error_info:
return error_info
if not tenant:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EXIST, name)
return error_info
if not vm_list:
error_info = generate_error_info(ErrorCode.REPLACE_VM_EMPTY)
return error_info
error_info = is_vm_duplicate(vm_list)
if error_info:
return error_info
error_msg, vms, not_found_vms = generate_tuple_from_vm_list(vm_list)
if error_msg:
not_found_vm_list = ",".join(not_found_vms)
error_info = generate_error_info(ErrorCode.VM_NOT_FOUND, not_found_vm_list)
return error_info
error_info = vm_already_in_tenant(name, vms)
if error_info:
return error_info
error_info = vm_in_any_tenant(vms)
if error_info:
return error_info
# check if vms that would be replaced out have any volumes mounted
error_info, existing_vms = _tenant_vm_ls(name)
if error_info:
return error_info
error_info = vmdk_utils.check_volumes_mounted(existing_vms)
if error_info:
error_info.msg = "Cannot complete vmgroup vm replace. " + error_info.msg
logging.error(error_info.msg)
return error_info
logging.debug("_tenant_vm_replace: vms=%s", vms)
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info
error_msg = tenant.replace_vms(auth_mgr.conn, vms)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
def check_datastore(datastore_name):
""" Check datastore with given name is a valid datastore or not """
if datastore_name == auth_data_const.VM_DS:
return None
if datastore_name == auth_data_const.ALL_DS:
return None
if not vmdk_utils.validate_datastore(datastore_name):
error_info = generate_error_info(ErrorCode.DS_NOT_EXIST, datastore_name)
return error_info
return None
def privilege_exist(privileges, datastore_url):
""" Check whether a entry with given datastore_name exists in privileges """
for p in privileges:
if datastore_url == p.datastore_url:
return True
return False
def check_privilege_parameters(privilege):
"""
Check whether the privilege parameters are invalid
Params:
-- privilege: privilege is a dictionary that contains privilege properties
Return value:
-- error_info: return None on success or error info on failure
"""
volume_maxsize = privilege[auth_data_const.COL_MAX_VOLUME_SIZE]
volume_totalsize = privilege[auth_data_const.COL_USAGE_QUOTA]
# If both volume max size and volume total size are set,
# volume max size should not exceed volume total size
if (volume_maxsize and volume_totalsize and (volume_maxsize > volume_totalsize)):
error_info = generate_error_info(ErrorCode.PRIVILEGE_INVALID_VOLUME_SIZE, volume_maxsize, volume_totalsize)
return error_info
return None
def check_usage_quota(datastore, volume_totalsize_in_MB):
"""
Check if the requested quota is valid in the given datastore
Return None if the usage_quota is valid
Return error_info if the usage_quota is invalid
"""
# usage_quota on "_VM_DS" and "_ALL_DS" should be "Unset"
if datastore == auth_data_const.VM_DS or datastore == auth_data_const.ALL_DS:
if volume_totalsize_in_MB is not None:
error_info = generate_error_info(ErrorCode.PRIVILEGE_SET_TOTAL_VOLUME_SIZE_LIMIT_NOT_ALLOWED,
datastore)
return error_info
@only_when_configured()
def _tenant_access_add(name, datastore, allow_create=None,
volume_maxsize_in_MB=None, volume_totalsize_in_MB=None):
""" API to add datastore access for a tenant """
logging.debug("_tenant_access_add: name=%s datastore=%s, allow_create=%s "
"volume_maxsize(MB)=%s volume_totalsize(MB)=%s", name, datastore, allow_create,
volume_maxsize_in_MB, volume_totalsize_in_MB)
error_info, tenant = get_tenant_from_db(name)
if error_info:
return error_info
if not tenant:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EXIST, name)
return error_info
error_info = check_datastore(datastore)
if error_info:
return error_info
error_info = check_usage_quota(datastore, volume_totalsize_in_MB)
if error_info:
return error_info
datastore_url = vmdk_utils.get_datastore_url(datastore)
error_info, existing_privileges = _tenant_access_ls(name)
if error_info:
return error_info
if privilege_exist(existing_privileges, datastore_url):
error_info = generate_error_info(ErrorCode.PRIVILEGE_ALREADY_EXIST, name, datastore)
return error_info
# Possible value:
# None - no change required
# True/False (boolean or string) - change to corresponding True/False
if allow_create is not None:
# validate to boolean value if it is a string
allow_create_val, valid = validate_string_to_bool(allow_create)
if not valid:
err_code = ErrorCode.PRIVILEGE_INVALID_ALLOW_CREATE_VALUE
err_msg = error_code_to_message[err_code].format(allow_create)
logging.error(err_msg)
return ErrorInfo(err_code, err_msg)
allow_create = allow_create_val
privileges = generate_privileges(datastore_url=datastore_url,
allow_create=allow_create,
volume_maxsize_in_MB=volume_maxsize_in_MB,
volume_totalsize_in_MB=volume_totalsize_in_MB)
logging.debug("_tenant_access_add: privileges=%s", privileges)
error_info = check_privilege_parameters(privilege=privileges)
if error_info:
return error_info
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info
error_msg = tenant.set_datastore_access_privileges(auth_mgr.conn, [privileges])
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
return error_info
@only_when_configured()
def _tenant_access_set(name, datastore, allow_create=None, volume_maxsize_in_MB=None, volume_totalsize_in_MB=None):
""" API to modify datastore access for a tenant """
logging.debug("_tenant_access_set: name=%s datastore=%s, allow_create=%s "
"volume_maxsize(MB)=%s volume_totalsize(MB)=%s", name, datastore, allow_create,
volume_maxsize_in_MB, volume_totalsize_in_MB)
error_info, tenant = get_tenant_from_db(name)
if error_info:
return error_info
if not tenant:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EXIST, name)
return error_info
error_info = check_datastore(datastore)
if error_info:
return error_info
error_info = check_usage_quota(datastore, volume_totalsize_in_MB)
if error_info:
return error_info
datastore_url = vmdk_utils.get_datastore_url(datastore)
error_info, existing_privileges = _tenant_access_ls(name)
if error_info:
return error_info
if not privilege_exist(existing_privileges, datastore_url):
error_info = generate_error_info(ErrorCode.PRIVILEGE_NOT_FOUND, name, datastore)
return error_info
logging.debug("_tenant_access_set: datastore_url=%s", datastore_url)
privileges = [d for d in tenant.privileges if d.datastore_url == datastore_url]
if not privileges:
err_code = ErrorCode.PRIVILEGE_NOT_FOUND
err_msg = error_code_to_message[err_code].format(name, datastore)
error_info = ErrorInfo(err_code, err_msg)
return error_info
if allow_create is not None:
allow_create_val, valid = validate_string_to_bool(allow_create)
if not valid:
err_code = ErrorCode.PRIVILEGE_INVALID_ALLOW_CREATE_VALUE
err_msg = error_code_to_message[err_code].format(allow_create)
logging.error(err_msg)
return ErrorInfo(err_code, err_msg)
allow_create = allow_create_val
privileges_dict = generate_privileges_dict(privileges[0])
logging.debug("_tenant_access_set: originial privileges_dict=%s", privileges_dict)
privileges_dict = modify_privileges(privileges=privileges_dict,
allow_create=allow_create,
volume_maxsize_in_MB=volume_maxsize_in_MB,
volume_totalsize_in_MB=volume_totalsize_in_MB)
logging.debug("_tenant_access_set: modified privileges_dict=%s", privileges_dict)
error_info = check_privilege_parameters(privilege=privileges_dict)
if error_info:
return error_info
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info
error_msg = tenant.set_datastore_access_privileges(auth_mgr.conn, [privileges_dict])
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
@only_when_configured()
def _tenant_access_rm(name, datastore):
""" API to remove datastore access for a tenant """
logging.debug("_tenant_access_rm: name=%s datastore=%s", name, datastore)
error_info, tenant = get_tenant_from_db(name)
if error_info:
return error_info
if not tenant:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EXIST, name)
return error_info
error_info = check_datastore(datastore)
if error_info:
return error_info
datastore_url = vmdk_utils.get_datastore_url(datastore)
error_info, existing_privileges = _tenant_access_ls(name)
if error_info:
return error_info
if not privilege_exist(existing_privileges, datastore_url):
error_info = generate_error_info(ErrorCode.PRIVILEGE_NOT_FOUND, name, datastore)
return error_info
error_info, auth_mgr = get_auth_mgr_object()
if error_info:
return error_info
# get dafault_datastore for this tenant
# if the default_datastore is equal to param "datastore", which means
# we are trying to remove a row in "privilege" table with datastore which is
# marked as default_datastore of this tenant, should return with error
error_info, default_datastore_url = get_default_datastore_url(name)
if error_info:
return error_info
if default_datastore_url == datastore_url:
error_info = generate_error_info(ErrorCode.PRIVILEGE_REMOVE_NOT_ALLOWED)
return error_info
logging.debug("_tenant_access_rm: datastore_url=%s", datastore_url)
error_msg = tenant.remove_datastore_access_privileges(auth_mgr.conn, datastore_url)
if error_msg:
error_info = generate_error_info(ErrorCode.INTERNAL_ERROR, error_msg)
return error_info
return None
@only_when_configured(ret_obj=True)
def _tenant_access_ls(name):
""" Handle tenant access ls command. Returns (ErrInfo, [list of privileges]) """
logging.debug("_tenant_access_ls: name=%s", name)
error_info, tenant = get_tenant_from_db(name)
if error_info:
return error_info, None
if not tenant:
error_info = generate_error_info(ErrorCode.TENANT_NOT_EXIST, name)
return error_info, None
return None, tenant.privileges
| apache-2.0 |
SpatialMetabolomics/SM_distributed | tests/test_imzml_txt_converter_db.py | 2 | 1137 | import numpy as np
from unittest.mock import patch
from sm.engine.ms_txt_converter import MsTxtConverter
from sm.engine.util import SMConfig
from sm.engine.tests.util import sm_config, ds_config
@patch('sm.engine.ms_txt_converter.MsTxtConverter._parser_factory')
def test_convert(MockImzMLParser, sm_config):
mock_parser = MockImzMLParser.return_value
mock_parser.coordinates = [(1, 1), (1, 2)]
mock_parser.getspectrum.side_effect = [(np.array([100., 200.]), np.array([100., 10.])),
(np.array([100., 200.]), np.array([100., 10.]))]
SMConfig._config_dict = sm_config
converter = MsTxtConverter('imzml_path', 'txt_path', 'coord_path')
with patch('sm.engine.ms_txt_converter.open', create=True) as mock_open:
converter.convert()
mock_open_write_args = [args[0] for _, args, kw_args in mock_open.mock_calls if args]
assert '0|100.0 200.0|100.0 10.0\n' in mock_open_write_args
assert '1|100.0 200.0|100.0 10.0\n' in mock_open_write_args
assert '0,1,1\n' in mock_open_write_args
assert '1,1,2\n' in mock_open_write_args
| apache-2.0 |
EduPepperPD/pepper2013 | cms/djangoapps/contentstore/tests/test_import_export.py | 7 | 3135 | """
Unit tests for course import and export
"""
import os
import shutil
import tarfile
import tempfile
import copy
from uuid import uuid4
from pymongo import MongoClient
from .utils import CourseTestCase
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from django.conf import settings
from xmodule.contentstore.django import _CONTENTSTORE
TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE)
TEST_DATA_CONTENTSTORE['OPTIONS']['db'] = 'test_xcontent_%s' % uuid4().hex
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
class ImportTestCase(CourseTestCase):
"""
Unit tests for importing a course
"""
def setUp(self):
super(ImportTestCase, self).setUp()
self.url = reverse("import_course", kwargs={
'org': self.course.location.org,
'course': self.course.location.course,
'name': self.course.location.name,
})
self.content_dir = tempfile.mkdtemp()
def touch(name):
""" Equivalent to shell's 'touch'"""
with file(name, 'a'):
os.utime(name, None)
# Create tar test files -----------------------------------------------
# OK course:
good_dir = tempfile.mkdtemp(dir=self.content_dir)
os.makedirs(os.path.join(good_dir, "course"))
with open(os.path.join(good_dir, "course.xml"), "w+") as f:
f.write('<course url_name="2013_Spring" org="EDx" course="0.00x"/>')
with open(os.path.join(good_dir, "course", "2013_Spring.xml"), "w+") as f:
f.write('<course></course>')
self.good_tar = os.path.join(self.content_dir, "good.tar.gz")
with tarfile.open(self.good_tar, "w:gz") as gtar:
gtar.add(good_dir)
# Bad course (no 'course.xml' file):
bad_dir = tempfile.mkdtemp(dir=self.content_dir)
touch(os.path.join(bad_dir, "bad.xml"))
self.bad_tar = os.path.join(self.content_dir, "bad.tar.gz")
with tarfile.open(self.bad_tar, "w:gz") as btar:
btar.add(bad_dir)
def tearDown(self):
shutil.rmtree(self.content_dir)
MongoClient().drop_database(TEST_DATA_CONTENTSTORE['OPTIONS']['db'])
_CONTENTSTORE.clear()
def test_no_coursexml(self):
"""
Check that the response for a tar.gz import without a course.xml is
correct.
"""
with open(self.bad_tar) as btar:
resp = self.client.post(
self.url,
{
"name": self.bad_tar,
"course-data": [btar]
})
self.assertEquals(resp.status_code, 415)
def test_with_coursexml(self):
"""
Check that the response for a tar.gz import with a course.xml is
correct.
"""
with open(self.good_tar) as gtar:
resp = self.client.post(
self.url,
{
"name": self.good_tar,
"course-data": [gtar]
})
self.assertEquals(resp.status_code, 200)
| agpl-3.0 |
google-research/rigl | rigl/imagenet_resnet/utils.py | 1 | 4331 | # coding=utf-8
# Copyright 2021 RigL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helped functions to concatenate subset of noisy images to batch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.compat.v2 import summary
IMG_SUMMARY_PREFIX = '_img_'
def format_tensors(*dicts):
"""Format metrics to be callable as tf.summary scalars on tpu's.
Args:
*dicts: A set of metric dictionaries, containing metric name + value tensor.
Returns:
A single formatted dictionary that holds all tensors.
Raises:
ValueError: if any tensor is not a scalar.
"""
merged_summaries = {}
for d in dicts:
for metric_name, value in d.items():
shape = value.shape.as_list()
if metric_name.startswith(IMG_SUMMARY_PREFIX):
# If image, shape it into 2d.
merged_summaries[metric_name] = tf.reshape(value,
(1, -1, value.shape[-1], 1))
elif not shape:
merged_summaries[metric_name] = tf.expand_dims(value, axis=0)
elif shape == [1]:
merged_summaries[metric_name] = value
else:
raise ValueError(
'Metric {} has value {} that is not reconciliable'.format(
metric_name, value))
return merged_summaries
def host_call_fn(model_dir, **kwargs):
"""host_call function used for creating training summaries when using TPU.
Args:
model_dir: String indicating the output_dir to save summaries in.
**kwargs: Set of metric names and tensor values for all desired summaries.
Returns:
Summary op to be passed to the host_call arg of the estimator function.
"""
gs = kwargs.pop('global_step')[0]
with summary.create_file_writer(model_dir).as_default():
# Always record summaries.
with summary.record_if(True):
for name, tensor in kwargs.items():
if name.startswith(IMG_SUMMARY_PREFIX):
summary.image(name.replace(IMG_SUMMARY_PREFIX, ''), tensor,
max_images=1)
else:
summary.scalar(name, tensor[0], step=gs)
# Following function is under tf:1x, so we use it.
return tf.summary.all_v2_summary_ops()
def mask_summaries(masks, with_img=False):
metrics = {}
for mask in masks:
metrics['pruning/{}/sparsity'.format(
mask.op.name)] = tf.nn.zero_fraction(mask)
if with_img:
metrics[IMG_SUMMARY_PREFIX + 'mask/' + mask.op.name] = mask
return metrics
def initialize_parameters_from_ckpt(ckpt_path, model_dir, param_suffixes):
"""Load parameters from an existing checkpoint.
Args:
ckpt_path: str, loads the mask variables from this checkpoint.
model_dir: str, if checkpoint exists in this folder no-op.
param_suffixes: list or str, suffix of parameters to be load from
checkpoint.
"""
already_has_ckpt = model_dir and tf.train.latest_checkpoint(
model_dir) is not None
if already_has_ckpt:
tf.logging.info(
'Training already started on this model, not loading masks from'
'previously trained model')
return
reader = tf.train.NewCheckpointReader(ckpt_path)
param_names = reader.get_variable_to_shape_map().keys()
param_names = [x for x in param_names if x.endswith(param_suffixes)]
variable_map = {}
for var in tf.global_variables():
var_name = var.name.split(':')[0]
if var_name in param_names:
tf.logging.info('Loading parameter variable from checkpoint: %s',
var_name)
variable_map[var_name] = var
elif var_name.endswith(param_suffixes):
tf.logging.info(
'Cannot find parameter variable in checkpoint, skipping: %s',
var_name)
tf.train.init_from_checkpoint(ckpt_path, variable_map)
| apache-2.0 |
dsajkl/reqiop | common/test/acceptance/pages/studio/container.py | 9 | 14433 | """
Container page in Studio
"""
from bok_choy.page_object import PageObject
from bok_choy.promise import Promise, EmptyPromise
from . import BASE_URL
from utils import click_css, confirm_prompt
class ContainerPage(PageObject):
"""
Container page in Studio
"""
NAME_SELECTOR = '.page-header-title'
NAME_INPUT_SELECTOR = '.page-header .xblock-field-input'
NAME_FIELD_WRAPPER_SELECTOR = '.page-header .wrapper-xblock-field'
ADD_MISSING_GROUPS_SELECTOR = '.notification-action-button[data-notification-action="add-missing-groups"]'
def __init__(self, browser, locator):
super(ContainerPage, self).__init__(browser)
self.locator = locator
@property
def url(self):
"""URL to the container page for an xblock."""
return "{}/container/{}".format(BASE_URL, self.locator)
@property
def name(self):
titles = self.q(css=self.NAME_SELECTOR).text
if titles:
return titles[0]
else:
return None
def is_browser_on_page(self):
def _xblock_count(class_name, request_token):
return len(self.q(css='{body_selector} .xblock.{class_name}[data-request-token="{request_token}"]'.format(
body_selector=XBlockWrapper.BODY_SELECTOR, class_name=class_name, request_token=request_token
)).results)
def _is_finished_loading():
is_done = False
# Get the request token of the first xblock rendered on the page and assume it is correct.
data_request_elements = self.q(css='[data-request-token]')
if len(data_request_elements) > 0:
request_token = data_request_elements.first.attrs('data-request-token')[0]
# Then find the number of Studio xblock wrappers on the page with that request token.
num_wrappers = len(self.q(css='{} [data-request-token="{}"]'.format(XBlockWrapper.BODY_SELECTOR, request_token)).results)
# Wait until all components have been loaded and marked as either initialized or failed.
# See:
# - common/static/coffee/src/xblock/core.coffee which adds the class "xblock-initialized"
# at the end of initializeBlock.
# - common/static/js/views/xblock.js which adds the class "xblock-initialization-failed"
# if the xblock threw an error while initializing.
num_initialized_xblocks = _xblock_count('xblock-initialized', request_token)
num_failed_xblocks = _xblock_count('xblock-initialization-failed', request_token)
is_done = num_wrappers == (num_initialized_xblocks + num_failed_xblocks)
return (is_done, is_done)
# First make sure that an element with the view-container class is present on the page,
# and then wait for the loading spinner to go away and all the xblocks to be initialized.
return (
self.q(css='body.view-container').present and
self.q(css='div.ui-loading.is-hidden').present and
Promise(_is_finished_loading, 'Finished rendering the xblock wrappers.').fulfill()
)
def wait_for_component_menu(self):
"""
Waits until the menu bar of components is present on the page.
"""
EmptyPromise(
lambda: self.q(css='div.add-xblock-component').present,
'Wait for the menu of components to be present'
).fulfill()
@property
def xblocks(self):
"""
Return a list of xblocks loaded on the container page.
"""
return self._get_xblocks()
@property
def inactive_xblocks(self):
"""
Return a list of inactive xblocks loaded on the container page.
"""
return self._get_xblocks(".is-inactive ")
@property
def active_xblocks(self):
"""
Return a list of active xblocks loaded on the container page.
"""
return self._get_xblocks(".is-active ")
@property
def publish_title(self):
"""
Returns the title as displayed on the publishing sidebar component.
"""
return self.q(css='.pub-status').first.text[0]
@property
def release_title(self):
"""
Returns the title before the release date in the publishing sidebar component.
"""
return self.q(css='.wrapper-release .title').first.text[0]
@property
def release_date(self):
"""
Returns the release date of the unit (with ancestor inherited from), as displayed
in the publishing sidebar component.
"""
return self.q(css='.wrapper-release .copy').first.text[0]
@property
def last_saved_text(self):
"""
Returns the last saved message as displayed in the publishing sidebar component.
"""
return self.q(css='.wrapper-last-draft').first.text[0]
@property
def last_published_text(self):
"""
Returns the last published message as displayed in the sidebar.
"""
return self.q(css='.wrapper-last-publish').first.text[0]
@property
def currently_visible_to_students(self):
"""
Returns True if the unit is marked as currently visible to students
(meaning that a warning is being displayed).
"""
warnings = self.q(css='.container-message .warning')
if not warnings.is_present():
return False
warning_text = warnings.first.text[0]
return warning_text == "Caution: The last published version of this unit is live. By publishing changes you will change the student experience."
def shows_inherited_staff_lock(self, parent_type=None, parent_name=None):
"""
Returns True if the unit inherits staff lock from a section or subsection.
"""
return self.q(css='.bit-publishing .wrapper-visibility .copy .inherited-from').visible
@property
def publish_action(self):
"""
Returns the link for publishing a unit.
"""
return self.q(css='.action-publish').first
def discard_changes(self):
"""
Discards draft changes (which will then re-render the page).
"""
click_css(self, 'a.action-discard', 0, require_notification=False)
confirm_prompt(self)
self.wait_for_ajax()
@property
def is_staff_locked(self):
""" Returns True if staff lock is currently enabled, False otherwise """
return 'icon-check' in self.q(css='a.action-staff-lock>i').attrs('class')
def toggle_staff_lock(self, inherits_staff_lock=False):
"""
Toggles "hide from students" which enables or disables a staff-only lock.
Returns True if the lock is now enabled, else False.
"""
was_locked_initially = self.is_staff_locked
if not was_locked_initially:
self.q(css='a.action-staff-lock').first.click()
else:
click_css(self, 'a.action-staff-lock', 0, require_notification=False)
if not inherits_staff_lock:
confirm_prompt(self)
self.wait_for_ajax()
return not was_locked_initially
def view_published_version(self):
"""
Clicks "View Live Version", which will open the published version of the unit page in the LMS.
Switches the browser to the newly opened LMS window.
"""
self.q(css='.button-view').first.click()
self._switch_to_lms()
def preview(self):
"""
Clicks "Preview Changes", which will open the draft version of the unit page in the LMS.
Switches the browser to the newly opened LMS window.
"""
self.q(css='.button-preview').first.click()
self._switch_to_lms()
def _switch_to_lms(self):
"""
Assumes LMS has opened-- switches to that window.
"""
browser_window_handles = self.browser.window_handles
# Switch to browser window that shows HTML Unit in LMS
# The last handle represents the latest windows opened
self.browser.switch_to_window(browser_window_handles[-1])
def _get_xblocks(self, prefix=""):
return self.q(css=prefix + XBlockWrapper.BODY_SELECTOR).map(
lambda el: XBlockWrapper(self.browser, el.get_attribute('data-locator'))).results
def duplicate(self, source_index):
"""
Duplicate the item with index source_index (based on vertical placement in page).
"""
click_css(self, 'a.duplicate-button', source_index)
def delete(self, source_index):
"""
Delete the item with index source_index (based on vertical placement in page).
Only visible items are counted in the source_index.
The index of the first item is 0.
"""
# Click the delete button
click_css(self, 'a.delete-button', source_index, require_notification=False)
# Click the confirmation dialog button
confirm_prompt(self)
def edit(self):
"""
Clicks the "edit" button for the first component on the page.
"""
return _click_edit(self)
def add_missing_groups(self):
"""
Click the "add missing groups" link.
Note that this does an ajax call.
"""
self.q(css=self.ADD_MISSING_GROUPS_SELECTOR).first.click()
self.wait_for_ajax()
# Wait until all xblocks rendered.
self.wait_for_page()
def missing_groups_button_present(self):
"""
Returns True if the "add missing groups" button is present.
"""
return self.q(css=self.ADD_MISSING_GROUPS_SELECTOR).present
def get_xblock_information_message(self):
"""
Returns an information message for the container page.
"""
return self.q(css=".xblock-message.information").first.text[0]
def is_inline_editing_display_name(self):
"""
Return whether this container's display name is in its editable form.
"""
return "is-editing" in self.q(css=self.NAME_FIELD_WRAPPER_SELECTOR).first.attrs("class")[0]
class XBlockWrapper(PageObject):
"""
A PageObject representing a wrapper around an XBlock child shown on the Studio container page.
"""
url = None
BODY_SELECTOR = '.studio-xblock-wrapper'
NAME_SELECTOR = '.xblock-display-name'
COMPONENT_BUTTONS = {
'basic_tab': '.editor-tabs li.inner_tab_wrap:nth-child(1) > a',
'advanced_tab': '.editor-tabs li.inner_tab_wrap:nth-child(2) > a',
'save_settings': '.action-save',
}
def __init__(self, browser, locator):
super(XBlockWrapper, self).__init__(browser)
self.locator = locator
def is_browser_on_page(self):
return self.q(css='{}[data-locator="{}"]'.format(self.BODY_SELECTOR, self.locator)).present
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular `CourseOutlineChild` context
"""
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
@property
def student_content(self):
"""
Returns the text content of the xblock as displayed on the container page.
"""
return self.q(css=self._bounded_selector('.xblock-student_view'))[0].text
@property
def name(self):
titles = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).text
if titles:
return titles[0]
else:
return None
@property
def children(self):
"""
Will return any first-generation descendant xblocks of this xblock.
"""
descendants = self.q(css=self._bounded_selector(self.BODY_SELECTOR)).map(
lambda el: XBlockWrapper(self.browser, el.get_attribute('data-locator'))).results
# Now remove any non-direct descendants.
grandkids = []
for descendant in descendants:
grandkids.extend(descendant.children)
grand_locators = [grandkid.locator for grandkid in grandkids]
return [descendant for descendant in descendants if not descendant.locator in grand_locators]
@property
def preview_selector(self):
return self._bounded_selector('.xblock-student_view,.xblock-author_view')
def go_to_container(self):
"""
Open the container page linked to by this xblock, and return
an initialized :class:`.ContainerPage` for that xblock.
"""
return ContainerPage(self.browser, self.locator).visit()
def edit(self):
"""
Clicks the "edit" button for this xblock.
"""
return _click_edit(self, self._bounded_selector)
def open_advanced_tab(self):
"""
Click on Advanced Tab.
"""
self._click_button('advanced_tab')
def open_basic_tab(self):
"""
Click on Basic Tab.
"""
self._click_button('basic_tab')
def save_settings(self):
"""
Click on settings Save button.
"""
self._click_button('save_settings')
@property
def editor_selector(self):
return '.xblock-studio_view'
def _click_button(self, button_name):
"""
Click on a button as specified by `button_name`
Arguments:
button_name (str): button name
"""
self.q(css=self.COMPONENT_BUTTONS[button_name]).first.click()
self.wait_for_ajax()
def go_to_group_configuration_page(self):
"""
Go to the Group Configuration used by the component.
"""
self.q(css=self._bounded_selector('span.message-text a')).first.click()
@property
def group_configuration_link_name(self):
"""
Get Group Configuration name from link.
"""
return self.q(css=self._bounded_selector('span.message-text a')).first.text[0]
def _click_edit(page_object, bounded_selector=lambda(x): x):
"""
Click on the first edit button found and wait for the Studio editor to be present.
"""
page_object.q(css=bounded_selector('.edit-button')).first.click()
EmptyPromise(
lambda: page_object.q(css='.xblock-studio_view').present,
'Wait for the Studio editor to be present'
).fulfill()
return page_object
| agpl-3.0 |
aroche/django | django/template/context.py | 105 | 9348 | import warnings
from contextlib import contextmanager
from copy import copy
from django.utils.deprecation import RemovedInDjango110Warning
# Hard-coded processor for easier use of CSRF protection.
_builtin_context_processors = ('django.template.context_processors.csrf',)
_current_app_undefined = object()
class ContextPopException(Exception):
"pop() has been called more times than push()"
pass
class ContextDict(dict):
def __init__(self, context, *args, **kwargs):
super(ContextDict, self).__init__(*args, **kwargs)
context.dicts.append(self)
self.context = context
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.context.pop()
class BaseContext(object):
def __init__(self, dict_=None):
self._reset_dicts(dict_)
def _reset_dicts(self, value=None):
builtins = {'True': True, 'False': False, 'None': None}
self.dicts = [builtins]
if value is not None:
self.dicts.append(value)
def __copy__(self):
duplicate = copy(super(BaseContext, self))
duplicate.dicts = self.dicts[:]
return duplicate
def __repr__(self):
return repr(self.dicts)
def __iter__(self):
for d in reversed(self.dicts):
yield d
def push(self, *args, **kwargs):
return ContextDict(self, *args, **kwargs)
def pop(self):
if len(self.dicts) == 1:
raise ContextPopException
return self.dicts.pop()
def __setitem__(self, key, value):
"Set a variable in the current context"
self.dicts[-1][key] = value
def __getitem__(self, key):
"Get a variable's value, starting at the current context and going upward"
for d in reversed(self.dicts):
if key in d:
return d[key]
raise KeyError(key)
def __delitem__(self, key):
"Delete a variable from the current context"
del self.dicts[-1][key]
def has_key(self, key):
for d in self.dicts:
if key in d:
return True
return False
def __contains__(self, key):
return self.has_key(key)
def get(self, key, otherwise=None):
for d in reversed(self.dicts):
if key in d:
return d[key]
return otherwise
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def new(self, values=None):
"""
Returns a new context with the same properties, but with only the
values given in 'values' stored.
"""
new_context = copy(self)
new_context._reset_dicts(values)
return new_context
def flatten(self):
"""
Returns self.dicts as one dictionary
"""
flat = {}
for d in self.dicts:
flat.update(d)
return flat
def __eq__(self, other):
"""
Compares two contexts by comparing theirs 'dicts' attributes.
"""
if isinstance(other, BaseContext):
# because dictionaries can be put in different order
# we have to flatten them like in templates
return self.flatten() == other.flatten()
# if it's not comparable return false
return False
class Context(BaseContext):
"A stack container for variable context"
def __init__(self, dict_=None, autoescape=True,
current_app=_current_app_undefined,
use_l10n=None, use_tz=None):
if current_app is not _current_app_undefined:
warnings.warn(
"The current_app argument of Context is deprecated. Use "
"RequestContext and set the current_app attribute of its "
"request instead.", RemovedInDjango110Warning, stacklevel=2)
self.autoescape = autoescape
self._current_app = current_app
self.use_l10n = use_l10n
self.use_tz = use_tz
self.template_name = "unknown"
self.render_context = RenderContext()
# Set to the original template -- as opposed to extended or included
# templates -- during rendering, see bind_template.
self.template = None
super(Context, self).__init__(dict_)
@property
def current_app(self):
return None if self._current_app is _current_app_undefined else self._current_app
@property
def is_current_app_set(self):
return self._current_app is not _current_app_undefined
@contextmanager
def bind_template(self, template):
if self.template is not None:
raise RuntimeError("Context is already bound to a template")
self.template = template
try:
yield
finally:
self.template = None
def __copy__(self):
duplicate = super(Context, self).__copy__()
duplicate.render_context = copy(self.render_context)
return duplicate
def update(self, other_dict):
"Pushes other_dict to the stack of dictionaries in the Context"
if not hasattr(other_dict, '__getitem__'):
raise TypeError('other_dict must be a mapping (dictionary-like) object.')
return ContextDict(self, other_dict)
class RenderContext(BaseContext):
"""
A stack container for storing Template state.
RenderContext simplifies the implementation of template Nodes by providing a
safe place to store state between invocations of a node's `render` method.
The RenderContext also provides scoping rules that are more sensible for
'template local' variables. The render context stack is pushed before each
template is rendered, creating a fresh scope with nothing in it. Name
resolution fails if a variable is not found at the top of the RequestContext
stack. Thus, variables are local to a specific template and don't affect the
rendering of other templates as they would if they were stored in the normal
template context.
"""
def __iter__(self):
for d in self.dicts[-1]:
yield d
def has_key(self, key):
return key in self.dicts[-1]
def get(self, key, otherwise=None):
return self.dicts[-1].get(key, otherwise)
def __getitem__(self, key):
return self.dicts[-1][key]
class RequestContext(Context):
"""
This subclass of template.Context automatically populates itself using
the processors defined in the engine's configuration.
Additional processors can be specified as a list of callables
using the "processors" keyword argument.
"""
def __init__(self, request, dict_=None, processors=None,
current_app=_current_app_undefined,
use_l10n=None, use_tz=None):
# current_app isn't passed here to avoid triggering the deprecation
# warning in Context.__init__.
super(RequestContext, self).__init__(
dict_, use_l10n=use_l10n, use_tz=use_tz)
if current_app is not _current_app_undefined:
warnings.warn(
"The current_app argument of RequestContext is deprecated. "
"Set the current_app attribute of its request instead.",
RemovedInDjango110Warning, stacklevel=2)
self._current_app = current_app
self.request = request
self._processors = () if processors is None else tuple(processors)
self._processors_index = len(self.dicts)
# placeholder for context processors output
self.update({})
# empty dict for any new modifications
# (so that context processors don't overwrite them)
self.update({})
@contextmanager
def bind_template(self, template):
if self.template is not None:
raise RuntimeError("Context is already bound to a template")
self.template = template
# Set context processors according to the template engine's settings.
processors = (template.engine.template_context_processors +
self._processors)
updates = {}
for processor in processors:
updates.update(processor(self.request))
self.dicts[self._processors_index] = updates
try:
yield
finally:
self.template = None
# Unset context processors.
self.dicts[self._processors_index] = {}
def new(self, values=None):
new_context = super(RequestContext, self).new(values)
# This is for backwards-compatibility: RequestContexts created via
# Context.new don't include values from context processors.
if hasattr(new_context, '_processors_index'):
del new_context._processors_index
return new_context
def make_context(context, request=None):
"""
Create a suitable Context from a plain dict and optionally an HttpRequest.
"""
if request is None:
context = Context(context)
else:
# The following pattern is required to ensure values from
# context override those from template context processors.
original_context = context
context = RequestContext(request)
if original_context:
context.push(original_context)
return context
| bsd-3-clause |
stuckj/dupeguru | core_pe/tests/cache_test.py | 1 | 4134 | # Created By: Virgil Dupras
# Created On: 2006/09/14
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
import logging
from pytest import raises, skip
from hscommon.testutil import eq_
try:
from ..cache import Cache, colors_to_string, string_to_colors
except ImportError:
skip("Can't import the cache module, probably hasn't been compiled.")
class TestCasecolors_to_string:
def test_no_color(self):
eq_('',colors_to_string([]))
def test_single_color(self):
eq_('000000',colors_to_string([(0,0,0)]))
eq_('010101',colors_to_string([(1,1,1)]))
eq_('0a141e',colors_to_string([(10,20,30)]))
def test_two_colors(self):
eq_('000102030405',colors_to_string([(0,1,2),(3,4,5)]))
class TestCasestring_to_colors:
def test_empty(self):
eq_([],string_to_colors(''))
def test_single_color(self):
eq_([(0,0,0)],string_to_colors('000000'))
eq_([(2,3,4)],string_to_colors('020304'))
eq_([(10,20,30)],string_to_colors('0a141e'))
def test_two_colors(self):
eq_([(10,20,30),(40,50,60)],string_to_colors('0a141e28323c'))
def test_incomplete_color(self):
# don't return anything if it's not a complete color
eq_([],string_to_colors('102'))
class TestCaseCache:
def test_empty(self):
c = Cache()
eq_(0,len(c))
with raises(KeyError):
c['foo']
def test_set_then_retrieve_blocks(self):
c = Cache()
b = [(0,0,0),(1,2,3)]
c['foo'] = b
eq_(b,c['foo'])
def test_delitem(self):
c = Cache()
c['foo'] = ''
del c['foo']
assert 'foo' not in c
with raises(KeyError):
del c['foo']
def test_persistance(self, tmpdir):
DBNAME = tmpdir.join('hstest.db')
c = Cache(str(DBNAME))
c['foo'] = [(1,2,3)]
del c
c = Cache(str(DBNAME))
eq_([(1,2,3)],c['foo'])
def test_filter(self):
c = Cache()
c['foo'] = ''
c['bar'] = ''
c['baz'] = ''
c.filter(lambda p:p != 'bar') #only 'bar' is removed
eq_(2,len(c))
assert 'foo' in c
assert 'baz' in c
assert 'bar' not in c
def test_clear(self):
c = Cache()
c['foo'] = ''
c['bar'] = ''
c['baz'] = ''
c.clear()
eq_(0,len(c))
assert 'foo' not in c
assert 'baz' not in c
assert 'bar' not in c
def test_corrupted_db(self, tmpdir, monkeypatch):
# If we don't do this monkeypatching, we get a weird exception about trying to flush a
# closed file. I've tried setting logging level and stuff, but nothing worked. So, there we
# go, a dirty monkeypatch.
monkeypatch.setattr(logging, 'warning', lambda *args, **kw: None)
dbname = str(tmpdir.join('foo.db'))
fp = open(dbname, 'w')
fp.write('invalid sqlite content')
fp.close()
c = Cache(dbname) # should not raise a DatabaseError
c['foo'] = [(1, 2, 3)]
del c
c = Cache(dbname)
eq_(c['foo'], [(1, 2, 3)])
def test_by_id(self):
# it's possible to use the cache by referring to the files by their row_id
c = Cache()
b = [(0,0,0),(1,2,3)]
c['foo'] = b
foo_id = c.get_id('foo')
eq_(c[foo_id], b)
class TestCaseCacheSQLEscape:
def test_contains(self):
c = Cache()
assert "foo'bar" not in c
def test_getitem(self):
c = Cache()
with raises(KeyError):
c["foo'bar"]
def test_setitem(self):
c = Cache()
c["foo'bar"] = []
def test_delitem(self):
c = Cache()
c["foo'bar"] = []
try:
del c["foo'bar"]
except KeyError:
assert False
| gpl-3.0 |
geekboxzone/mmallow_prebuilts_gcc_darwin-x86_x86_x86_64-linux-android-4.9 | share/gdb/python/gdb/command/type_printers.py | 126 | 4424 | # Type printer commands.
# Copyright (C) 2010-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import copy
import gdb
"""GDB commands for working with type-printers."""
class InfoTypePrinter(gdb.Command):
"""GDB command to list all registered type-printers.
Usage: info type-printers
"""
def __init__ (self):
super(InfoTypePrinter, self).__init__("info type-printers",
gdb.COMMAND_DATA)
def list_type_printers(self, type_printers):
"""Print a list of type printers."""
# A potential enhancement is to provide an option to list printers in
# "lookup order" (i.e. unsorted).
sorted_type_printers = sorted (copy.copy(type_printers),
key = lambda x: x.name)
for printer in sorted_type_printers:
if printer.enabled:
enabled = ''
else:
enabled = " [disabled]"
print (" %s%s" % (printer.name, enabled))
def invoke(self, arg, from_tty):
"""GDB calls this to perform the command."""
sep = ''
for objfile in gdb.objfiles():
if objfile.type_printers:
print ("%sType printers for %s:" % (sep, objfile.name))
self.list_type_printers(objfile.type_printers)
sep = '\n'
if gdb.current_progspace().type_printers:
print ("%sType printers for program space:" % sep)
self.list_type_printers(gdb.current_progspace().type_printers)
sep = '\n'
if gdb.type_printers:
print ("%sGlobal type printers:" % sep)
self.list_type_printers(gdb.type_printers)
class _EnableOrDisableCommand(gdb.Command):
def __init__(self, setting, name):
super(_EnableOrDisableCommand, self).__init__(name, gdb.COMMAND_DATA)
self.setting = setting
def set_some(self, name, printers):
result = False
for p in printers:
if name == p.name:
p.enabled = self.setting
result = True
return result
def invoke(self, arg, from_tty):
"""GDB calls this to perform the command."""
for name in arg.split():
ok = False
for objfile in gdb.objfiles():
if self.set_some(name, objfile.type_printers):
ok = True
if self.set_some(name, gdb.current_progspace().type_printers):
ok = True
if self.set_some(name, gdb.type_printers):
ok = True
if not ok:
print ("No type printer named '%s'" % name)
def add_some(self, result, word, printers):
for p in printers:
if p.name.startswith(word):
result.append(p.name)
def complete(self, text, word):
result = []
for objfile in gdb.objfiles():
self.add_some(result, word, objfile.type_printers)
self.add_some(result, word, gdb.current_progspace().type_printers)
self.add_some(result, word, gdb.type_printers)
return result
class EnableTypePrinter(_EnableOrDisableCommand):
"""GDB command to enable the specified type printer.
Usage: enable type-printer NAME
NAME is the name of the type-printer.
"""
def __init__(self):
super(EnableTypePrinter, self).__init__(True, "enable type-printer")
class DisableTypePrinter(_EnableOrDisableCommand):
"""GDB command to disable the specified type-printer.
Usage: disable type-printer NAME
NAME is the name of the type-printer.
"""
def __init__(self):
super(DisableTypePrinter, self).__init__(False, "disable type-printer")
InfoTypePrinter()
EnableTypePrinter()
DisableTypePrinter()
| gpl-2.0 |
symmetricapi/django-symmetric | symmetric/management/generatemodels.py | 1 | 11905 | from importlib import import_module
from optparse import make_option
import os
from django.apps import apps
from django.conf import settings
from django.core.management.base import CommandError
from django.db.models.fields import NOT_PROVIDED, TimeField, DateField
from django.db.models.fields.related import ForeignKey
from django.template import Template, Context
import symmetric.management.overrides
from symmetric.functions import _ApiModel, underscore_to_camel_case
from symmetric.management.functions import get_base_classes, get_base_models, get_base_model, get_field, has_field
from symmetric.management.translate import translate_code
from symmetric.models import get_related_model
from symmetric.views import ApiAction, ApiRequirement, BasicApiView, api_view
get_model = apps.get_model
class GenerateModelsCommand(object):
option_list = (
make_option(
'--prefix',
type='string',
dest='prefix',
default='',
help='Prefix to add to each class name and file name.',
),
make_option(
'--dest',
type='string',
dest='dest',
help='Output the all detected models from api endpoints and render them into this destination directory.',
),
make_option(
'--exclude',
type='string',
dest='exclude',
action='append',
help='Do not output anything for the models specified.',
),
make_option(
'--indent',
dest='indent',
type='int',
default=2,
help='Each tab should instead indent with this number of spaces or 0 for hard tabs.',
),
)
def get_include_related_models(self, model):
related_models = set()
if hasattr(model, 'API') and hasattr(model.API, 'include_related'):
include_related = model.API.include_related
for field in model._meta.fields:
if field.name in include_related:
related_models.add(get_related_model(field))
related_models |= self.get_include_related_models(get_related_model(field))
return related_models
def post_render(self, output):
if self.indent:
return output.replace('\t', ' ' * self.indent)
return output
def base_extra_context(self, model, api_model):
has_date = False
has_bool = False
datetime_fields = []
primary_field = None
if api_model.id_field:
primary_field = api_model.id_field[1]
base = get_base_model(model)
base_name = None
if base:
base_name = base.__name__
for decoded_name, encoded_name, encode, decode in api_model.fields:
if has_field(base, decoded_name):
continue
field = get_field(model, decoded_name)
field_type = field.__class__.__name__
if field_type == 'DateTimeField' or field_type == 'DateField':
has_date = True
datetime_fields.append((encoded_name, encoded_name[0].upper() + encoded_name[1:]))
elif field_type == 'BooleanField':
has_bool = True
if not primary_field and field.primary_key:
primary_field = encoded_name
return {'prefix': self.prefix, 'base_name': base_name, 'name': model.__name__, 'name_lower': model.__name__[0].lower() + model.__name__[1:], 'has_date': has_date, 'has_bool': has_bool, 'primary_field': primary_field, 'datetime_fields': datetime_fields}
def perform_mapping(self, mapping, format_context):
if callable(mapping):
# callable method
return mapping(format_context)
elif isinstance(mapping, Template):
# django template
return mapping.render(Context(format_context, autoescape=False))
else:
# normal python string formatting
return mapping.format(**format_context)
def get_context(self, model):
api_model = _ApiModel(model)
context = self.base_extra_context(model, api_model)
if hasattr(self, 'extra_context'):
context.update(self.extra_context(model, api_model))
# Loop over the mappings
for mapping_name in self.mappings:
mapping = self.mappings[mapping_name]
write_only = False
if isinstance(mapping, dict):
write_only = mapping.get('WriteOnly', False)
lines = []
for decoded_name, encoded_name, encode, decode in api_model.fields:
field = get_field(model, decoded_name)
# Skip any field that is not directly on model and is not the primary id field (which could be on the base too)
if field.model is not model and encoded_name != context['primary_field']:
continue
# Skip any ptr field to base models
if decoded_name.endswith('_ptr_id'):
continue
include_related = hasattr(model, 'API') and hasattr(model.API, 'include_related') and field.name in model.API.include_related
included_readonly = False
included_obj_name = ''
if write_only and encoded_name not in api_model.encoded_fields:
# Skip readonly fields, but make an exception for included foreign keys, see Included Objects in the documentation
if isinstance(field, ForeignKey) and include_related:
included_obj_name = encoded_name
encoded_name += 'Id' if self.camelcase else '_id'
included_readonly = True
else:
continue
line = None
classes = [field.__class__] + get_base_classes(field.__class__)
for cls in classes:
field_type = cls.__name__
if callable(mapping):
line = mapping(model, encoded_name, field)
elif mapping.has_key(field_type):
format_context = {'name': encoded_name, 'null': field.null}
if field.default is not NOT_PROVIDED and not isinstance(field, (TimeField, DateField)):
# Only supply default values for non-date/time fields, it will be easier to just add these after manually
format_context['default'] = field.default
if include_related:
format_context['included'] = True
format_context['included_readonly'] = included_readonly
format_context['included_obj_name'] = included_obj_name
format_context['included_name'] = get_related_model(field).__name__
line = self.perform_mapping(mapping[field_type], format_context)
if line is not None:
break
if line is None:
raise CommandError("No such mapping for %s in %s." % (field_type, mapping_name))
elif line:
lines += line.split('\n')
context[mapping_name] = lines
# Translate the api properties
if hasattr(self, 'property_declarations') or hasattr(self, 'property_implementations'):
decl_lines = []
impl_lines = []
property_transformer = getattr(self, 'property_transformer', None)
for name in model.__dict__:
attr = model.__dict__[name]
if type(attr) is property and attr.fget and hasattr(attr.fget, 'api_code'):
if getattr(attr.fget, 'api_translations', None) and attr.fget.api_translations.has_key(self.lang):
code = attr.fget.api_translations[self.lang]
else:
code = translate_code(attr.fget.api_code, self.lang, (property_transformer(model) if property_transformer else None))
format_context = {'name': name if not self.camelcase else underscore_to_camel_case(name), 'type': self.property_types[attr.fget.api_type], 'code': code}
format_context['name_upper'] = format_context['name'][0].upper() + format_context['name'][1:]
if hasattr(self, 'property_declarations'):
line = self.perform_mapping(self.property_declarations, format_context)
decl_lines += line.split('\n')
if hasattr(self, 'property_implementations'):
line = self.perform_mapping(self.property_implementations, format_context)
impl_lines += line.split('\n')
if decl_lines:
context['property_declarations'] = decl_lines
if impl_lines:
context['property_implementations'] = impl_lines
return context
def enum_patterns(self, patterns):
for pattern in patterns:
if pattern.callback:
if isinstance(pattern.callback, (api_view, BasicApiView)) and pattern.callback.model:
self.models.add(pattern.callback.model)
self.models |= self.get_include_related_models(pattern.callback.model)
else:
self.enum_patterns(pattern.url_patterns)
def expand_mappings(self, field, *expanded_fields):
for mapping in self.mappings.values():
for key, value in mapping.items():
if key == field:
for expanded_field in expanded_fields:
if not mapping.has_key(expanded_field):
mapping[expanded_field] = mapping[field]
break
def render(self, *args, **options):
self.camelcase = getattr(settings, 'API_CAMELCASE', True)
self.prefix = options['prefix']
self.indent = options['indent']
if not hasattr(self, 'templates'):
raise CommandError('No templates set!')
if options and options['dest']:
try:
os.makedirs(options['dest'])
except:
print 'Warning: Overwriting any contents in %s' % options['dest']
self.models = set()
module = import_module(settings.ROOT_URLCONF)
self.enum_patterns(module.urlpatterns)
# Add any base models to the set
base_models = set()
for model in self.models:
base_models |= set(get_base_models(model))
self.models |= base_models
for model in self.models:
if options['exclude'] and model.__name__ in options['exclude']:
continue
context = self.get_context(model)
for i in range(len(self.templates)):
template = self.templates[i]
template_extension = self.template_extensions[i]
path = os.path.join(options['dest'], '%s%s.%s' % (self.prefix, model.__name__, template_extension))
print 'Rendering %s' % path
with open(path, 'w') as f:
f.write(self.post_render(template.render(Context(context, autoescape=False))))
elif args:
for model_name in args:
model = model_name.split('.')
model = get_model(model[0], model[1])
context = self.get_context(model)
for template in self.templates:
print self.post_render(template.render(Context(context, autoescape=False)))
else:
raise CommandError("No model or destination directory specified.")
| mit |
annarev/tensorflow | tensorflow/python/summary/writer/writer.py | 11 | 16949 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides an API for generating Event protocol buffers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import time
import warnings
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import plugin_asset
from tensorflow.python.summary.writer.event_file_writer import EventFileWriter
from tensorflow.python.summary.writer.event_file_writer_v2 import EventFileWriterV2
from tensorflow.python.util.tf_export import tf_export
_PLUGINS_DIR = "plugins"
class SummaryToEventTransformer(object):
"""Abstractly implements the SummaryWriter API.
This API basically implements a number of endpoints (add_summary,
add_session_log, etc). The endpoints all generate an event protobuf, which is
passed to the contained event_writer.
"""
def __init__(self, event_writer, graph=None, graph_def=None):
"""Creates a `SummaryWriter` and an event file.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.compat.v1.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.compat.v1.summary.FileWriter(<some-directory>, sess.graph)
```
Args:
event_writer: An EventWriter. Implements add_event and get_logdir.
graph: A `Graph` object, such as `sess.graph`.
graph_def: DEPRECATED: Use the `graph` argument instead.
"""
self.event_writer = event_writer
# For storing used tags for session.run() outputs.
self._session_run_tags = {}
if graph is not None or graph_def is not None:
# Calling it with both graph and graph_def for backward compatibility.
self.add_graph(graph=graph, graph_def=graph_def)
# Also export the meta_graph_def in this case.
# graph may itself be a graph_def due to positional arguments
maybe_graph_as_def = (graph.as_graph_def(add_shapes=True)
if isinstance(graph, ops.Graph) else graph)
self.add_meta_graph(
meta_graph.create_meta_graph_def(graph_def=graph_def or
maybe_graph_as_def))
# This set contains tags of Summary Values that have been encountered
# already. The motivation here is that the SummaryWriter only keeps the
# metadata property (which is a SummaryMetadata proto) of the first Summary
# Value encountered for each tag. The SummaryWriter strips away the
# SummaryMetadata for all subsequent Summary Values with tags seen
# previously. This saves space.
self._seen_summary_tags = set()
def add_summary(self, summary, global_step=None):
"""Adds a `Summary` protocol buffer to the event file.
This method wraps the provided summary in an `Event` protocol buffer
and adds it to the event file.
You can pass the result of evaluating any summary op, using
`tf.Session.run` or
`tf.Tensor.eval`, to this
function. Alternatively, you can pass a `tf.compat.v1.Summary` protocol
buffer that you populate with your own data. The latter is
commonly done to report evaluation results in event files.
Args:
summary: A `Summary` protocol buffer, optionally serialized as a string.
global_step: Number. Optional global step value to record with the
summary.
"""
if isinstance(summary, bytes):
summ = summary_pb2.Summary()
summ.ParseFromString(summary)
summary = summ
# We strip metadata from values with tags that we have seen before in order
# to save space - we just store the metadata on the first value with a
# specific tag.
for value in summary.value:
if not value.metadata:
continue
if value.tag in self._seen_summary_tags:
# This tag has been encountered before. Strip the metadata.
value.ClearField("metadata")
continue
# We encounter a value with a tag we have not encountered previously. And
# it has metadata. Remember to strip metadata from future values with this
# tag string.
self._seen_summary_tags.add(value.tag)
event = event_pb2.Event(summary=summary)
self._add_event(event, global_step)
def add_session_log(self, session_log, global_step=None):
"""Adds a `SessionLog` protocol buffer to the event file.
This method wraps the provided session in an `Event` protocol buffer
and adds it to the event file.
Args:
session_log: A `SessionLog` protocol buffer.
global_step: Number. Optional global step value to record with the
summary.
"""
event = event_pb2.Event(session_log=session_log)
self._add_event(event, global_step)
def _add_graph_def(self, graph_def, global_step=None):
graph_bytes = graph_def.SerializeToString()
event = event_pb2.Event(graph_def=graph_bytes)
self._add_event(event, global_step)
def add_graph(self, graph, global_step=None, graph_def=None):
"""Adds a `Graph` to the event file.
The graph described by the protocol buffer will be displayed by
TensorBoard. Most users pass a graph in the constructor instead.
Args:
graph: A `Graph` object, such as `sess.graph`.
global_step: Number. Optional global step counter to record with the
graph.
graph_def: DEPRECATED. Use the `graph` parameter instead.
Raises:
ValueError: If both graph and graph_def are passed to the method.
"""
if graph is not None and graph_def is not None:
raise ValueError("Please pass only graph, or graph_def (deprecated), "
"but not both.")
if isinstance(graph, ops.Graph) or isinstance(graph_def, ops.Graph):
# The user passed a `Graph`.
# Check if the user passed it via the graph or the graph_def argument and
# correct for that.
if not isinstance(graph, ops.Graph):
logging.warning("When passing a `Graph` object, please use the `graph`"
" named argument instead of `graph_def`.")
graph = graph_def
# Serialize the graph with additional info.
true_graph_def = graph.as_graph_def(add_shapes=True)
self._write_plugin_assets(graph)
elif (isinstance(graph, graph_pb2.GraphDef) or
isinstance(graph_def, graph_pb2.GraphDef)):
# The user passed a `GraphDef`.
logging.warning("Passing a `GraphDef` to the SummaryWriter is deprecated."
" Pass a `Graph` object instead, such as `sess.graph`.")
# Check if the user passed it via the graph or the graph_def argument and
# correct for that.
if isinstance(graph, graph_pb2.GraphDef):
true_graph_def = graph
else:
true_graph_def = graph_def
else:
# The user passed neither `Graph`, nor `GraphDef`.
raise TypeError("The passed graph must be an instance of `Graph` "
"or the deprecated `GraphDef`")
# Finally, add the graph_def to the summary writer.
self._add_graph_def(true_graph_def, global_step)
def _write_plugin_assets(self, graph):
plugin_assets = plugin_asset.get_all_plugin_assets(graph)
logdir = self.event_writer.get_logdir()
for asset_container in plugin_assets:
plugin_name = asset_container.plugin_name
plugin_dir = os.path.join(logdir, _PLUGINS_DIR, plugin_name)
gfile.MakeDirs(plugin_dir)
assets = asset_container.assets()
for (asset_name, content) in assets.items():
asset_path = os.path.join(plugin_dir, asset_name)
with gfile.Open(asset_path, "w") as f:
f.write(content)
def add_meta_graph(self, meta_graph_def, global_step=None):
"""Adds a `MetaGraphDef` to the event file.
The `MetaGraphDef` allows running the given graph via
`saver.import_meta_graph()`.
Args:
meta_graph_def: A `MetaGraphDef` object, often as returned by
`saver.export_meta_graph()`.
global_step: Number. Optional global step counter to record with the
graph.
Raises:
TypeError: If both `meta_graph_def` is not an instance of `MetaGraphDef`.
"""
if not isinstance(meta_graph_def, meta_graph_pb2.MetaGraphDef):
raise TypeError("meta_graph_def must be type MetaGraphDef, saw type: %s" %
type(meta_graph_def))
meta_graph_bytes = meta_graph_def.SerializeToString()
event = event_pb2.Event(meta_graph_def=meta_graph_bytes)
self._add_event(event, global_step)
def add_run_metadata(self, run_metadata, tag, global_step=None):
"""Adds a metadata information for a single session.run() call.
Args:
run_metadata: A `RunMetadata` protobuf object.
tag: The tag name for this metadata.
global_step: Number. Optional global step counter to record with the
StepStats.
Raises:
ValueError: If the provided tag was already used for this type of event.
"""
if tag in self._session_run_tags:
raise ValueError("The provided tag was already used for this event type")
self._session_run_tags[tag] = True
tagged_metadata = event_pb2.TaggedRunMetadata()
tagged_metadata.tag = tag
# Store the `RunMetadata` object as bytes in order to have postponed
# (lazy) deserialization when used later.
tagged_metadata.run_metadata = run_metadata.SerializeToString()
event = event_pb2.Event(tagged_run_metadata=tagged_metadata)
self._add_event(event, global_step)
def _add_event(self, event, step):
event.wall_time = time.time()
if step is not None:
event.step = int(step)
self.event_writer.add_event(event)
@tf_export(v1=["summary.FileWriter"])
class FileWriter(SummaryToEventTransformer):
"""Writes `Summary` protocol buffers to event files.
The `FileWriter` class provides a mechanism to create an event file in a
given directory and add summaries and events to it. The class updates the
file contents asynchronously. This allows a training program to call methods
to add data to the file directly from the training loop, without slowing down
training.
When constructed with a `tf.compat.v1.Session` parameter, a `FileWriter`
instead forms a compatibility layer over new graph-based summaries
to facilitate the use of new summary writing with
pre-existing code that expects a `FileWriter` instance.
This class is not thread-safe.
"""
def __init__(self,
logdir,
graph=None,
max_queue=10,
flush_secs=120,
graph_def=None,
filename_suffix=None,
session=None):
"""Creates a `FileWriter`, optionally shared within the given session.
Typically, constructing a file writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.compat.v1.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.compat.v1.summary.FileWriter(<some-directory>, sess.graph)
```
The `session` argument to the constructor makes the returned `FileWriter` a
compatibility layer over new graph-based summaries (`tf.summary`).
Crucially, this means the underlying writer resource and events file will
be shared with any other `FileWriter` using the same `session` and `logdir`.
In either case, ops will be added to `session.graph` to control the
underlying file writer resource.
Args:
logdir: A string. Directory where event file will be written.
graph: A `Graph` object, such as `sess.graph`.
max_queue: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
graph_def: DEPRECATED: Use the `graph` argument instead.
filename_suffix: A string. Every event file's name is suffixed with
`suffix`.
session: A `tf.compat.v1.Session` object. See details above.
Raises:
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
`v1.summary.FileWriter` is not compatible with eager execution.
To write TensorBoard summaries under eager execution,
use `tf.summary.create_file_writer` or
a `with v1.Graph().as_default():` context.
@end_compatibility
"""
if context.executing_eagerly():
raise RuntimeError(
"v1.summary.FileWriter is not compatible with eager execution. "
"Use `tf.summary.create_file_writer`,"
"or a `with v1.Graph().as_default():` context")
if session is not None:
event_writer = EventFileWriterV2(
session, logdir, max_queue, flush_secs, filename_suffix)
else:
event_writer = EventFileWriter(logdir, max_queue, flush_secs,
filename_suffix)
self._closed = False
super(FileWriter, self).__init__(event_writer, graph, graph_def)
def __enter__(self):
"""Make usable with "with" statement."""
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Make usable with "with" statement."""
self.close()
def get_logdir(self):
"""Returns the directory where event file will be written."""
return self.event_writer.get_logdir()
def _warn_if_event_writer_is_closed(self):
if self._closed:
warnings.warn("Attempting to use a closed FileWriter. "
"The operation will be a noop unless the FileWriter "
"is explicitly reopened.")
def _add_event(self, event, step):
self._warn_if_event_writer_is_closed()
super(FileWriter, self)._add_event(event, step)
def add_event(self, event):
"""Adds an event to the event file.
Args:
event: An `Event` protocol buffer.
"""
self._warn_if_event_writer_is_closed()
self.event_writer.add_event(event)
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
# Flushing a closed EventFileWriterV2 raises an exception. It is,
# however, a noop for EventFileWriter.
self._warn_if_event_writer_is_closed()
self.event_writer.flush()
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
self.event_writer.close()
self._closed = True
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close()` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
self.event_writer.reopen()
self._closed = False
| apache-2.0 |
mxOBS/deb-pkg_trusty_chromium-browser | tools/memory_inspector/memory_inspector/classification/rules.py | 109 | 5039 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module defines the core structure of the classification rules.
This module does NOT specify how the rules filter the data: this responsibility
is of to the concrete classifiers, which have to override the Rule class herein
defined and know how to do the math.
This module, instead, defines the format of the rules and the way they are
encoded and loaded (in a python-style dictionary file).
Rules are organized in a tree, where the root is always represented by a 'Total'
node, and the leaves are arbitrarily defined by the user, according to the
following principles:
- Order of siblings rules matter: what is caught by a rule will not be caught
by the next ones, but it is propagated to its children rules if any.
- Every non-leaf node X gets an implicit extra-children named X-other. This
catch-all child catches everything (within the parent rule scope) that is
not caught by the other siblings. This is to guarantee that, when doing the
math (the aggregation), at any level, the sum of the values in the leaves
match the value of their parent.
The format of a rule dictionary is the following:
[
{
'name': 'Name of the rule',
'filter-X': 'The embedder will know how to interpret this value and will use
it to filter the data'
'filter-Y': 'Idem'
children: [
{
'name': 'Name of the sub-rule 1'
... and so on recursively ,
},
]
},
]
And a typical resulting rule tree looks like this:
+----------------------+
| Total |
|----------------------|
+------------------+ Match all. +--------------------+
| +----------+-----------+ |
| | |
+-----v-----+ +-----v-----+ +------v----+
| Foo | | Bar | |Total-other|
|-----------| |-----------| |-----------|
|File: foo* | +---+File: bar* +-----+ | Match all |
+-----------+ | +-----------+ | +-----------+
| |
+------v------+ +------v----+
| Bar::Coffee | | Bar-other |
|-------------| |-----------|
|File: bar*cof| | Match all |
+-------------+ +-----------+
"""
import ast
def Load(content, rule_builder):
"""Construct a rule tree from a python-style dict representation.
Args:
content: a string containing the dict (i.e. content of the rule file).
rule_builder: a method which takes two arguments (rule_name, filters_dict)
and returns a subclass of |Rule|. |filters_dict| is a dict of the keys
(filter-foo, filter-bar in the example above) for the rule node.
"""
rules_dict = ast.literal_eval(content)
root = Rule('Total')
_MakeRuleNodeFromDictNode(root, rules_dict, rule_builder)
return root
class Rule(object):
""" An abstract class representing a rule node in the rules tree.
Embedders must override the Match method when deriving this class.
"""
def __init__(self, name):
self.name = name
self.children = []
def Match(self, _): # pylint: disable=R0201
""" The rationale of this default implementation is modeling the root
('Total') and the catch-all (*-other) rules that every |RuleTree| must have,
regardless of the embedder-specific children rules. This is to guarantee
that the totals match at any level of the tree.
"""
return True
def AppendChild(self, child_rule):
assert(isinstance(child_rule, Rule))
duplicates = filter(lambda x: x.name == child_rule.name, self.children)
assert(not duplicates), 'Duplicate rule ' + child_rule.name
self.children.append(child_rule)
def _MakeRuleNodeFromDictNode(rule_node, dict_nodes, rule_builder):
"""Recursive rule tree builder for traversing the rule dict."""
for dict_node in dict_nodes:
assert('name' in dict_node)
# Extract the filter keys (e.g., mmap-file, mmap-prot) that will be passed
# to the |rule_builder|
filter_keys = set(dict_node.keys()) - set(('name', 'children'))
filters = dict((k, dict_node[k]) for k in filter_keys)
child_rule = rule_builder(dict_node['name'], filters)
rule_node.AppendChild(child_rule)
dict_children = dict_node.get('children', {})
_MakeRuleNodeFromDictNode(child_rule, dict_children, rule_builder)
# If the rule_node isn't a leaf, add the 'name-other' catch-all sibling to
# catch all the entries that matched this node but none of its children.
if len(rule_node.children):
rule_node.AppendChild(Rule(rule_node.name + '-other')) | bsd-3-clause |
hogarthj/ansible | hacking/fix_test_syntax.py | 135 | 3563 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2017, Matt Martz <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Purpose:
# The purpose of this script is to convert uses of tests as filters to proper jinja test syntax
# as part of https://github.com/ansible/proposals/issues/83
# Notes:
# This script is imperfect, but was close enough to "fix" all integration tests
# with the exception of:
#
# 1. One file needed manual remediation, where \\\\ was ultimately replace with \\ in 8 locations.
# 2. Multiple filter pipeline is unsupported. Example:
# var|string|search('foo')
# Which should be converted to:
# var|string is search('foo')
import argparse
import os
import re
from ansible.plugins.test import core, files, mathstuff
TESTS = list(core.TestModule().tests().keys()) + list(files.TestModule().tests().keys()) + list(mathstuff.TestModule().tests().keys())
TEST_MAP = {
'version_compare': 'version',
'is_dir': 'directory',
'is_file': 'file',
'is_link': 'link',
'is_abs': 'abs',
'is_same_file': 'same_file',
'is_mount': 'mount',
'issubset': 'subset',
'issuperset': 'superset',
'isnan': 'nan',
'succeeded': 'successful',
'success': 'successful',
'change': 'changed',
'skip': 'skipped',
}
FILTER_RE = re.compile(r'((.+?)\s*([\w \.\'"]+)(\s*)\|(\s*)(\w+))')
NOT_RE = re.compile(r'( ?)not ')
ASSERT_SPACE_RE = re.compile(r'- ([\'"])\s+')
parser = argparse.ArgumentParser()
parser.add_argument(
'path',
help='Path to a directory that will be recursively walked. All .yml and .yaml files will be evaluated '
'and uses of tests as filters will be conveted to proper jinja test syntax files to have test syntax '
'fixed'
)
args = parser.parse_args()
for root, dirs, filenames in os.walk(args.path):
for name in filenames:
if os.path.splitext(name)[1] not in ('.yml', '.yaml'):
continue
path = os.path.join(root, name)
print(path)
with open(path) as f:
text = f.read()
for match in FILTER_RE.findall(text):
filter_name = match[5]
is_not = match[2].strip(' "\'').startswith('not ')
try:
test_name = TEST_MAP[filter_name]
except KeyError:
test_name = filter_name
if test_name not in TESTS:
continue
if is_not:
before = NOT_RE.sub(r'\1', match[2]).rstrip()
text = re.sub(
re.escape(match[0]),
'%s %s is not %s' % (match[1], before, test_name,),
text
)
else:
text = re.sub(
re.escape(match[0]),
'%s %s is %s' % (match[1], match[2].rstrip(), test_name,),
text
)
with open(path, 'w+') as f:
f.write(text)
| gpl-3.0 |
liberorbis/libernext | env/lib/python2.7/site-packages/pip/exceptions.py | 123 | 1125 | """Exceptions used throughout package"""
from __future__ import absolute_import
class PipError(Exception):
"""Base pip exception"""
class InstallationError(PipError):
"""General exception during installation"""
class UninstallationError(PipError):
"""General exception during uninstallation"""
class DistributionNotFound(InstallationError):
"""Raised when a distribution cannot be found to satisfy a requirement"""
class BestVersionAlreadyInstalled(PipError):
"""Raised when the most up-to-date version of a package is already
installed. """
class BadCommand(PipError):
"""Raised when virtualenv or a command is not found"""
class CommandError(PipError):
"""Raised when there is an error in command-line arguments"""
class PreviousBuildDirError(PipError):
"""Raised when there's a previous conflicting build directory"""
class HashMismatch(InstallationError):
"""Distribution file hash values don't match."""
class InvalidWheelFilename(InstallationError):
"""Invalid wheel filename."""
class UnsupportedWheel(InstallationError):
"""Unsupported wheel."""
| gpl-2.0 |
ketjow4/NOV | Lib/site-packages/scipy/stats/info.py | 55 | 8282 | """
Statistical Functions
=====================
This module contains a large number of probability distributions as
well as a growing library of statistical functions.
Each included distribution is an instance of the class rv_continous.
For each given name the following methods are available. See docstring
for rv_continuous for more information
:rvs:
random variates with the distribution
:pdf:
probability density function
:cdf:
cumulative distribution function
:sf:
survival function (1.0 - cdf)
:ppf:
percent-point function (inverse of cdf)
:isf:
inverse survival function
:stats:
mean, variance, and optionally skew and kurtosis
Calling the instance as a function returns a frozen pdf whose shape,
location, and scale parameters are fixed.
Distributions
---------------
The distributions available with the above methods are:
Continuous (Total == 81 distributions)
---------------------------------------
.. autosummary::
:toctree: generated/
norm Normal (Gaussian)
alpha Alpha
anglit Anglit
arcsine Arcsine
beta Beta
betaprime Beta Prime
bradford Bradford
burr Burr
cauchy Cauchy
chi Chi
chi2 Chi-squared
cosine Cosine
dgamma Double Gamma
dweibull Double Weibull
erlang Erlang
expon Exponential
exponweib Exponentiated Weibull
exponpow Exponential Power
f F (Snecdor F)
fatiguelife Fatigue Life (Birnbaum-Sanders)
fisk Fisk
foldcauchy Folded Cauchy
foldnorm Folded Normal
frechet_r Frechet Right Sided, Extreme Value Type II (Extreme LB) or weibull_min
frechet_l Frechet Left Sided, Weibull_max
genlogistic Generalized Logistic
genpareto Generalized Pareto
genexpon Generalized Exponential
genextreme Generalized Extreme Value
gausshyper Gauss Hypergeometric
gamma Gamma
gengamma Generalized gamma
genhalflogistic Generalized Half Logistic
gompertz Gompertz (Truncated Gumbel)
gumbel_r Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I
gumbel_l Left Sided Gumbel, etc.
halfcauchy Half Cauchy
halflogistic Half Logistic
halfnorm Half Normal
hypsecant Hyperbolic Secant
invgamma Inverse Gamma
invnorm Inverse Normal
invgauss Inverse Gaussian
invweibull Inverse Weibull
johnsonsb Johnson SB
johnsonsu Johnson SU
ksone Kolmogorov-Smirnov one-sided (no stats)
kstwobign Kolmogorov-Smirnov two-sided test for Large N (no stats)
laplace Laplace
logistic Logistic
loggamma Log-Gamma
loglaplace Log-Laplace (Log Double Exponential)
lognorm Log-Normal
gilbrat Gilbrat
lomax Lomax (Pareto of the second kind)
maxwell Maxwell
mielke Mielke's Beta-Kappa
nakagami Nakagami
ncx2 Non-central chi-squared
ncf Non-central F
nct Non-central Student's T
pareto Pareto
powerlaw Power-function
powerlognorm Power log normal
powernorm Power normal
rdist R distribution
reciprocal Reciprocal
rayleigh Rayleigh
rice Rice
recipinvgauss Reciprocal Inverse Gaussian
semicircular Semicircular
t Student's T
triang Triangular
truncexpon Truncated Exponential
truncnorm Truncated Normal
tukeylambda Tukey-Lambda
uniform Uniform
vonmises Von-Mises (Circular)
wald Wald
weibull_min Minimum Weibull (see Frechet)
weibull_max Maximum Weibull (see Frechet)
wrapcauchy Wrapped Cauchy
=============== ==============================================================
Discrete (Total == 10 distributions)
==============================================================================
binom Binomial
bernoulli Bernoulli
nbinom Negative Binomial
geom Geometric
hypergeom Hypergeometric
logser Logarithmic (Log-Series, Series)
poisson Poisson
planck Planck (Discrete Exponential)
boltzmann Boltzmann (Truncated Discrete Exponential)
randint Discrete Uniform
zipf Zipf
dlaplace Discrete Laplacian
=============== ==============================================================
Statistical Functions (adapted from Gary Strangman)
-----------------------------------------------------
================= ==============================================================
gmean Geometric mean
hmean Harmonic mean
mean Arithmetic mean
cmedian Computed median
median Median
mode Modal value
tmean Truncated arithmetic mean
tvar Truncated variance
tmin _
tmax _
tstd _
tsem _
moment Central moment
variation Coefficient of variation
skew Skewness
kurtosis Fisher or Pearson kurtosis
describe Descriptive statistics
skewtest _
kurtosistest _
normaltest _
================= ==============================================================
================= ==============================================================
itemfreq _
scoreatpercentile _
percentileofscore _
histogram2 _
histogram _
cumfreq _
relfreq _
================= ==============================================================
================= ==============================================================
obrientransform _
signaltonoise _
bayes_mvs _
sem _
zmap _
================= ==============================================================
================= ==============================================================
threshold _
trimboth _
trim1 _
================= ==============================================================
================= ==============================================================
f_oneway _
paired _
pearsonr _
spearmanr _
pointbiserialr _
kendalltau _
linregress _
================= ==============================================================
================= ==============================================================
ttest_1samp _
ttest_ind _
ttest_rel _
kstest _
chisquare _
ks_2samp _
meanwhitneyu _
tiecorrect _
ranksums _
wilcoxon _
kruskal _
friedmanchisquare _
================= ==============================================================
================= ==============================================================
ansari _
bartlett _
levene _
shapiro _
anderson _
binom_test _
fligner _
mood _
oneway _
================= ==============================================================
================= ==============================================================
glm _
================= ==============================================================
================= ==============================================================
Plot-tests
================================================================================
probplot _
ppcc_max _
ppcc_plot _
================= ==============================================================
For many more stat related functions install the software R and the
interface package rpy.
"""
postpone_import = 1
global_symbols = ['find_repeats']
depends = ['linalg','special']
ignore = False # importing stats causes a segfault
| gpl-3.0 |
LiquidSmooth-Devices/android_kernel_samsung_smdk4412 | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
Vutshi/qutip | qutip/tests/test_subsystem_apply.py | 1 | 5219 | # This file is part of QuTiP.
#
# QuTiP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# QuTiP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with QuTiP. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2011 and later, Paul D. Nation & Robert J. Johansson
#
###########################################################################
from numpy.linalg import norm
from numpy.testing import assert_, run_module_suite
from qutip.random_objects import rand_dm, rand_unitary, rand_kraus_map
from qutip.subsystem_apply import subsystem_apply
from qutip.superop_reps import kraus_to_super
from qutip.superoperator import mat2vec, vec2mat
from qutip.tensor import tensor
from qutip.qobj import Qobj
class TestSubsystemApply(object):
"""
A test class for the QuTiP function for applying superoperators to
subsystems.
The four tests below determine whether efficient numerics, naive numerics
and semi-analytic results are identical.
"""
def test_SimpleSingleApply(self):
"""
Non-composite system, operator on Hilbert space.
"""
rho_3 = rand_dm(3)
single_op = rand_unitary(3)
analytic_result = single_op * rho_3 * single_op.dag()
naive_result = subsystem_apply(rho_3, single_op, [True],
reference=True)
efficient_result = subsystem_apply(rho_3, single_op, [True])
naive_diff = (analytic_result - naive_result).data.todense()
efficient_diff = (efficient_result - analytic_result).data.todense()
assert_(norm(naive_diff) < 1e-12 and norm(efficient_diff) < 1e-12)
def test_SimpleSuperApply(self):
"""
Non-composite system, operator on Liouville space.
"""
rho_3 = rand_dm(3)
superop = kraus_to_super(rand_kraus_map(3))
analytic_result = vec2mat(superop.data.todense() *
mat2vec(rho_3.data.todense()))
naive_result = subsystem_apply(rho_3, superop, [True],
reference=True)
naive_diff = (analytic_result - naive_result).data.todense()
assert_(norm(naive_diff) < 1e-12)
efficient_result = subsystem_apply(rho_3, superop, [True])
efficient_diff = (efficient_result - analytic_result).data.todense()
assert_(norm(efficient_diff) < 1e-12)
def test_ComplexSingleApply(self):
"""
Composite system, operator on Hilbert space.
"""
rho_list = list(map(rand_dm, [2, 3, 2, 3, 2]))
rho_input = tensor(rho_list)
single_op = rand_unitary(3)
analytic_result = rho_list
analytic_result[1] = single_op * analytic_result[1] * single_op.dag()
analytic_result[3] = single_op * analytic_result[3] * single_op.dag()
analytic_result = tensor(analytic_result)
naive_result = subsystem_apply(rho_input, single_op,
[False, True, False, True, False],
reference=True)
naive_diff = (analytic_result - naive_result).data.todense()
assert_(norm(naive_diff) < 1e-12)
efficient_result = subsystem_apply(rho_input, single_op,
[False, True, False, True, False])
efficient_diff = (efficient_result - analytic_result).data.todense()
assert_(norm(efficient_diff) < 1e-12)
def test_ComplexSuperApply(self):
"""
Superoperator: Efficient numerics and reference return same result,
acting on non-composite system
"""
rho_list = list(map(rand_dm, [2, 3, 2, 3, 2]))
rho_input = tensor(rho_list)
superop = kraus_to_super(rand_kraus_map(3))
analytic_result = rho_list
analytic_result[1] = Qobj(vec2mat(superop.data.todense() *
mat2vec(analytic_result[1].data.todense())))
analytic_result[3] = Qobj(vec2mat(superop.data.todense() *
mat2vec(analytic_result[3].data.todense())))
analytic_result = tensor(analytic_result)
naive_result = subsystem_apply(rho_input, superop,
[False, True, False, True, False],
reference=True)
naive_diff = (analytic_result - naive_result).data.todense()
assert_(norm(naive_diff) < 1e-12)
efficient_result = subsystem_apply(rho_input, superop,
[False, True, False, True, False])
efficient_diff = (efficient_result - analytic_result).data.todense()
assert_(norm(efficient_diff) < 1e-12)
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
jgcaaprom/android_external_chromium_org | components/test/data/password_manager/run_tests.py | 43 | 4038 | # -*- coding: utf-8 -*-
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This file allows the bots to be easily configure and run the tests."""
import argparse
import os
import tempfile
from environment import Environment
import tests
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Password Manager automated tests runner help.")
parser.add_argument(
"--chrome-path", action="store", dest="chrome_path",
help="Set the chrome path (required).", nargs=1, required=True)
parser.add_argument(
"--chromedriver-path", action="store", dest="chromedriver_path",
help="Set the chromedriver path (required).", nargs=1, required=True)
parser.add_argument(
"--profile-path", action="store", dest="profile_path",
help="Set the profile path (required). You just need to choose a "
"temporary empty folder. If the folder is not empty all its content "
"is going to be removed.",
nargs=1, required=True)
parser.add_argument(
"--passwords-path", action="store", dest="passwords_path",
help="Set the usernames/passwords path (required).", nargs=1,
required=True)
parser.add_argument("--save-path", action="store", nargs=1, dest="save_path",
help="Write the results in a file.", required=True)
args = parser.parse_args()
environment = Environment('', '', '', None, False)
tests.Tests(environment)
xml = open(args.save_path[0],"w")
xml.write("<xml>")
try:
results = tempfile.NamedTemporaryFile(
dir=os.path.join(tempfile.gettempdir()), delete=False)
results_path = results.name
results.close()
full_path = os.path.realpath(__file__)
tests_dir = os.path.dirname(full_path)
tests_path = os.path.join(tests_dir, "tests.py")
for websitetest in environment.websitetests:
# The tests can be flaky. This is why we try to rerun up to 3 times.
for x in range(0, 3):
# TODO(rchtara): Using "pkill" is just temporary until a better,
# platform-independent solution is found.
os.system("pkill chrome")
try:
os.remove(results_path)
except Exception:
pass
# TODO(rchtara): Using "timeout is just temporary until a better,
# platform-independent solution is found.
# The website test runs in two passes, each pass has an internal
# timeout of 200s for waiting (see |remaining_time_to_wait| and
# Wait() in websitetest.py). Accounting for some more time spent on
# the non-waiting execution, 300 seconds should be the upper bound on
# the runtime of one pass, thus 600 seconds for the whole test.
os.system("timeout 600 python %s %s --chrome-path %s "
"--chromedriver-path %s --passwords-path %s --profile-path %s "
"--save-path %s" %
(tests_path, websitetest.name, args.chrome_path[0],
args.chromedriver_path[0], args.passwords_path[0],
args.profile_path[0], results_path))
if os.path.isfile(results_path):
results = open(results_path, "r")
count = 0 # Count the number of successful tests.
for line in results:
xml.write(line)
count += line.count("successful='True'")
results.close()
# There is only two tests running for every website: the prompt and
# the normal test. If both of the tests were successful, the tests
# would be stopped for the current website.
if count == 2:
break
else:
xml.write("<result><test name='%s' type='prompt' successful='false'>"
"</test><test name='%s' type='normal' successful='false'></test>"
"</result>" % (websitetest.name, websitetest.name))
finally:
try:
os.remove(results_path)
except Exception:
pass
xml.write("</xml>")
xml.close()
| bsd-3-clause |
manazhao/tf_recsys | tensorflow/contrib/predictor/saved_model_predictor_test.py | 93 | 6114 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for predictor.saved_model_predictor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.predictor import saved_model_predictor
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_def_utils
KEYS_AND_OPS = (('sum', lambda x, y: x + y),
('product', lambda x, y: x * y,),
('difference', lambda x, y: x - y))
MODEL_DIR_NAME = 'contrib/predictor/test_export_dir'
class SavedModelPredictorTest(test.TestCase):
@classmethod
def setUpClass(cls):
# Load a saved model exported from the arithmetic `Estimator`.
# See `testing_common.py`.
cls._export_dir = test.test_src_dir_path(MODEL_DIR_NAME)
def testDefault(self):
"""Test prediction with default signature."""
np.random.seed(1111)
x = np.random.rand()
y = np.random.rand()
predictor = saved_model_predictor.SavedModelPredictor(
export_dir=self._export_dir)
output = predictor({'x': x, 'y': y})['outputs']
self.assertAlmostEqual(output, x + y, places=3)
def testSpecifiedSignatureKey(self):
"""Test prediction with spedicified signature key."""
np.random.seed(1234)
for signature_def_key, op in KEYS_AND_OPS:
x = np.random.rand()
y = np.random.rand()
expected_output = op(x, y)
predictor = saved_model_predictor.SavedModelPredictor(
export_dir=self._export_dir,
signature_def_key=signature_def_key)
output_tensor_name = predictor.fetch_tensors['outputs'].name
self.assertRegexpMatches(
output_tensor_name,
signature_def_key,
msg='Unexpected fetch tensor.')
output = predictor({'x': x, 'y': y})['outputs']
self.assertAlmostEqual(
expected_output, output, places=3,
msg='Failed for signature "{}." '
'Got output {} for x = {} and y = {}'.format(
signature_def_key, output, x, y))
def testSpecifiedSignature(self):
"""Test prediction with spedicified signature definition."""
np.random.seed(4444)
for key, op in KEYS_AND_OPS:
x = np.random.rand()
y = np.random.rand()
expected_output = op(x, y)
inputs = {
'x': meta_graph_pb2.TensorInfo(
name='inputs/x:0',
dtype=types_pb2.DT_FLOAT,
tensor_shape=tensor_shape_pb2.TensorShapeProto()),
'y': meta_graph_pb2.TensorInfo(
name='inputs/y:0',
dtype=types_pb2.DT_FLOAT,
tensor_shape=tensor_shape_pb2.TensorShapeProto())}
outputs = {
key: meta_graph_pb2.TensorInfo(
name='outputs/{}:0'.format(key),
dtype=types_pb2.DT_FLOAT,
tensor_shape=tensor_shape_pb2.TensorShapeProto())}
signature_def = signature_def_utils.build_signature_def(
inputs=inputs,
outputs=outputs,
method_name='tensorflow/serving/regress')
predictor = saved_model_predictor.SavedModelPredictor(
export_dir=self._export_dir,
signature_def=signature_def)
output_tensor_name = predictor.fetch_tensors[key].name
self.assertRegexpMatches(
output_tensor_name,
key,
msg='Unexpected fetch tensor.')
output = predictor({'x': x, 'y': y})[key]
self.assertAlmostEqual(
expected_output, output, places=3,
msg='Failed for signature "{}". '
'Got output {} for x = {} and y = {}'.format(key, output, x, y))
def testSpecifiedTensors(self):
"""Test prediction with spedicified `Tensor`s."""
np.random.seed(987)
for key, op in KEYS_AND_OPS:
x = np.random.rand()
y = np.random.rand()
expected_output = op(x, y)
input_names = {'x': 'inputs/x:0',
'y': 'inputs/y:0'}
output_names = {key: 'outputs/{}:0'.format(key)}
predictor = saved_model_predictor.SavedModelPredictor(
export_dir=self._export_dir,
input_names=input_names,
output_names=output_names)
output_tensor_name = predictor.fetch_tensors[key].name
self.assertRegexpMatches(
output_tensor_name,
key,
msg='Unexpected fetch tensor.')
output = predictor({'x': x, 'y': y})[key]
self.assertAlmostEqual(
expected_output, output, places=3,
msg='Failed for signature "{}". '
'Got output {} for x = {} and y = {}'.format(key, output, x, y))
def testBadTagsFail(self):
"""Test that predictor construction fails for bad tags."""
bad_tags_regex = ('.* could not be found in SavedModel')
with self.assertRaisesRegexp(RuntimeError, bad_tags_regex):
_ = saved_model_predictor.SavedModelPredictor(
export_dir=self._export_dir,
tags=('zomg, bad, tags'))
def testSpecifiedGraph(self):
"""Test that the predictor remembers a specified `Graph`."""
g = ops.Graph()
predictor = saved_model_predictor.SavedModelPredictor(
export_dir=self._export_dir,
graph=g)
self.assertEqual(predictor.graph, g)
if __name__ == '__main__':
test.main()
| apache-2.0 |
googyanas/Googy-Max2-Kernel | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
zahodi/ansible | test/units/vars/test_variable_manager.py | 32 | 14137 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from collections import defaultdict
from ansible.compat.six import iteritems
from ansible.compat.six.moves import builtins
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import MagicMock, mock_open, patch
from ansible.inventory import Inventory
from ansible.playbook.play import Play
from units.mock.loader import DictDataLoader
from units.mock.path import mock_unfrackpath_noop
from ansible.vars import VariableManager
class TestVariableManager(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_basic_manager(self):
fake_loader = DictDataLoader({})
v = VariableManager()
vars = v.get_vars(loader=fake_loader, use_cache=False)
#FIXME: not sure why we remove all and only test playbook_dir
for remove in ['omit', 'vars', 'ansible_version', 'ansible_check_mode', 'ansible_playbook_python']:
if remove in vars:
del vars[remove]
self.assertEqual(vars, dict(playbook_dir='.'))
def test_variable_manager_extra_vars(self):
fake_loader = DictDataLoader({})
extra_vars = dict(a=1, b=2, c=3)
v = VariableManager()
v.extra_vars = extra_vars
vars = v.get_vars(loader=fake_loader, use_cache=False)
for (key, val) in iteritems(extra_vars):
self.assertEqual(vars.get(key), val)
self.assertIsNot(v.extra_vars, extra_vars)
def test_variable_manager_host_vars_file(self):
fake_loader = DictDataLoader({
"host_vars/hostname1.yml": """
foo: bar
""",
"other_path/host_vars/hostname1.yml": """
foo: bam
baa: bat
""",
"host_vars/host.name.yml": """
host_with_dots: true
""",
})
v = VariableManager()
v.add_host_vars_file("host_vars/hostname1.yml", loader=fake_loader)
v.add_host_vars_file("other_path/host_vars/hostname1.yml", loader=fake_loader)
self.assertIn("hostname1", v._host_vars_files)
self.assertEqual(v._host_vars_files["hostname1"], [dict(foo="bar"), dict(foo="bam", baa="bat")])
mock_host = MagicMock()
mock_host.get_name.return_value = "hostname1"
mock_host.get_vars.return_value = dict()
mock_host.get_groups.return_value = ()
mock_host.get_group_vars.return_value = dict()
self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host, use_cache=False).get("foo"), "bam")
self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host, use_cache=False).get("baa"), "bat")
v.add_host_vars_file("host_vars/host.name", loader=fake_loader)
self.assertEqual(v._host_vars_files["host.name"], [dict(host_with_dots=True)])
def test_variable_manager_group_vars_file(self):
fake_loader = DictDataLoader({
"group_vars/all.yml": """
foo: bar
""",
"group_vars/somegroup.yml": """
bam: baz
""",
"other_path/group_vars/somegroup.yml": """
baa: bat
""",
"group_vars/some.group.yml": """
group_with_dots: true
""",
})
v = VariableManager()
v.add_group_vars_file("group_vars/all.yml", loader=fake_loader)
v.add_group_vars_file("group_vars/somegroup.yml", loader=fake_loader)
v.add_group_vars_file("other_path/group_vars/somegroup.yml", loader=fake_loader)
self.assertIn("somegroup", v._group_vars_files)
self.assertEqual(v._group_vars_files["all"], [dict(foo="bar")])
self.assertEqual(v._group_vars_files["somegroup"], [dict(bam="baz"), dict(baa="bat")])
mock_group = MagicMock()
mock_group.name = "somegroup"
mock_group.get_ancestors.return_value = ()
mock_group.get_vars.return_value = dict()
mock_host = MagicMock()
mock_host.get_name.return_value = "hostname1"
mock_host.get_vars.return_value = dict()
mock_host.get_groups.return_value = (mock_group,)
mock_host.get_group_vars.return_value = dict()
vars = v.get_vars(loader=fake_loader, host=mock_host, use_cache=False)
self.assertEqual(vars.get("foo"), "bar")
self.assertEqual(vars.get("baa"), "bat")
v.add_group_vars_file("group_vars/some.group", loader=fake_loader)
self.assertEqual(v._group_vars_files["some.group"], [dict(group_with_dots=True)])
def test_variable_manager_play_vars(self):
fake_loader = DictDataLoader({})
mock_play = MagicMock()
mock_play.get_vars.return_value = dict(foo="bar")
mock_play.get_roles.return_value = []
mock_play.get_vars_files.return_value = []
v = VariableManager()
self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play, use_cache=False).get("foo"), "bar")
def test_variable_manager_play_vars_files(self):
fake_loader = DictDataLoader({
"/path/to/somefile.yml": """
foo: bar
"""
})
mock_play = MagicMock()
mock_play.get_vars.return_value = dict()
mock_play.get_roles.return_value = []
mock_play.get_vars_files.return_value = ['/path/to/somefile.yml']
v = VariableManager()
self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play, use_cache=False).get("foo"), "bar")
def test_variable_manager_task_vars(self):
fake_loader = DictDataLoader({})
mock_task = MagicMock()
mock_task._role = None
mock_task.loop = None
mock_task.get_vars.return_value = dict(foo="bar")
mock_task.get_include_params.return_value = dict()
v = VariableManager()
self.assertEqual(v.get_vars(loader=fake_loader, task=mock_task, use_cache=False).get("foo"), "bar")
@patch.object(Inventory, 'basedir')
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_variable_manager_precedence(self, mock_basedir):
'''
Tests complex variations and combinations of get_vars() with different
objects to modify the context under which variables are merged.
'''
v = VariableManager()
v._fact_cache = defaultdict(dict)
inventory1_filedata = """
[group2:children]
group1
[group1]
host1 host_var=host_var_from_inventory_host1
[group1:vars]
group_var = group_var_from_inventory_group1
[group2:vars]
group_var = group_var_from_inventory_group2
"""
fake_loader = DictDataLoader({
# inventory1
'/etc/ansible/inventory1': inventory1_filedata,
# role defaults_only1
'/etc/ansible/roles/defaults_only1/defaults/main.yml': """
default_var: "default_var_from_defaults_only1"
host_var: "host_var_from_defaults_only1"
group_var: "group_var_from_defaults_only1"
group_var_all: "group_var_all_from_defaults_only1"
extra_var: "extra_var_from_defaults_only1"
""",
'/etc/ansible/roles/defaults_only1/tasks/main.yml': """
- debug: msg="here i am"
""",
# role defaults_only2
'/etc/ansible/roles/defaults_only2/defaults/main.yml': """
default_var: "default_var_from_defaults_only2"
host_var: "host_var_from_defaults_only2"
group_var: "group_var_from_defaults_only2"
group_var_all: "group_var_all_from_defaults_only2"
extra_var: "extra_var_from_defaults_only2"
""",
})
mock_basedir.return_value = './'
with patch.object(builtins, 'open', mock_open(read_data=inventory1_filedata)):
inv1 = Inventory(loader=fake_loader, variable_manager=v, host_list='/etc/ansible/inventory1')
inv1.set_playbook_basedir('./')
play1 = Play.load(dict(
hosts=['all'],
roles=['defaults_only1', 'defaults_only2'],
), loader=fake_loader, variable_manager=v)
# first we assert that the defaults as viewed as a whole are the merged results
# of the defaults from each role, with the last role defined "winning" when
# there is a variable naming conflict
res = v.get_vars(loader=fake_loader, play=play1)
self.assertEqual(res['default_var'], 'default_var_from_defaults_only2')
# next, we assert that when vars are viewed from the context of a task within a
# role, that task will see its own role defaults before any other role's
blocks = play1.compile()
task = blocks[1].block[0]
res = v.get_vars(loader=fake_loader, play=play1, task=task)
self.assertEqual(res['default_var'], 'default_var_from_defaults_only1')
# next we assert the precendence of inventory variables
v.set_inventory(inv1)
h1 = inv1.get_host('host1')
res = v.get_vars(loader=fake_loader, play=play1, host=h1)
self.assertEqual(res['group_var'], 'group_var_from_inventory_group1')
self.assertEqual(res['host_var'], 'host_var_from_inventory_host1')
# next we test with group_vars/ files loaded
fake_loader.push("/etc/ansible/group_vars/all", """
group_var_all: group_var_all_from_group_vars_all
""")
fake_loader.push("/etc/ansible/group_vars/group1", """
group_var: group_var_from_group_vars_group1
""")
fake_loader.push("/etc/ansible/group_vars/group3", """
# this is a dummy, which should not be used anywhere
group_var: group_var_from_group_vars_group3
""")
fake_loader.push("/etc/ansible/host_vars/host1", """
host_var: host_var_from_host_vars_host1
""")
fake_loader.push("group_vars/group1", """
playbook_group_var: playbook_group_var
""")
fake_loader.push("host_vars/host1", """
playbook_host_var: playbook_host_var
""")
v.add_group_vars_file("/etc/ansible/group_vars/all", loader=fake_loader)
v.add_group_vars_file("/etc/ansible/group_vars/group1", loader=fake_loader)
v.add_group_vars_file("/etc/ansible/group_vars/group2", loader=fake_loader)
v.add_group_vars_file("group_vars/group1", loader=fake_loader)
v.add_host_vars_file("/etc/ansible/host_vars/host1", loader=fake_loader)
v.add_host_vars_file("host_vars/host1", loader=fake_loader)
res = v.get_vars(loader=fake_loader, play=play1, host=h1)
self.assertEqual(res['group_var'], 'group_var_from_group_vars_group1')
self.assertEqual(res['group_var_all'], 'group_var_all_from_group_vars_all')
self.assertEqual(res['playbook_group_var'], 'playbook_group_var')
self.assertEqual(res['host_var'], 'host_var_from_host_vars_host1')
self.assertEqual(res['playbook_host_var'], 'playbook_host_var')
# add in the fact cache
v._fact_cache['host1'] = dict(fact_cache_var="fact_cache_var_from_fact_cache")
res = v.get_vars(loader=fake_loader, play=play1, host=h1)
self.assertEqual(res['fact_cache_var'], 'fact_cache_var_from_fact_cache')
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_variable_manager_role_vars_dependencies(self):
'''
Tests vars from role dependencies with duplicate dependencies.
'''
v = VariableManager()
v._fact_cache = defaultdict(dict)
fake_loader = DictDataLoader({
# role common-role
'/etc/ansible/roles/common-role/tasks/main.yml': """
- debug: msg="{{role_var}}"
""",
# We do not need allow_duplicates: yes for this role
# because eliminating duplicates is done by the execution
# strategy, which we do not test here.
# role role1
'/etc/ansible/roles/role1/vars/main.yml': """
role_var: "role_var_from_role1"
""",
'/etc/ansible/roles/role1/meta/main.yml': """
dependencies:
- { role: common-role }
""",
# role role2
'/etc/ansible/roles/role2/vars/main.yml': """
role_var: "role_var_from_role2"
""",
'/etc/ansible/roles/role2/meta/main.yml': """
dependencies:
- { role: common-role }
""",
})
play1 = Play.load(dict(
hosts=['all'],
roles=['role1', 'role2'],
), loader=fake_loader, variable_manager=v)
# The task defined by common-role exists twice because role1
# and role2 depend on common-role. Check that the tasks see
# different values of role_var.
blocks = play1.compile()
task = blocks[1].block[0]
res = v.get_vars(loader=fake_loader, play=play1, task=task)
self.assertEqual(res['role_var'], 'role_var_from_role1')
task = blocks[2].block[0]
res = v.get_vars(loader=fake_loader, play=play1, task=task)
self.assertEqual(res['role_var'], 'role_var_from_role2')
| gpl-3.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/io/stata.py | 7 | 82769 | """
Module contains tools for processing Stata files into DataFrames
The StataReader below was originally written by Joe Presbrey as part of PyDTA.
It has been extended and improved by Skipper Seabold from the Statsmodels
project who also developed the StataWriter and was finally added to pandas in
a once again improved version.
You can find more information on http://presbrey.mit.edu/PyDTA and
http://www.statsmodels.org/devel/
"""
import numpy as np
import sys
import struct
from dateutil.relativedelta import relativedelta
from pandas.types.common import (is_categorical_dtype, is_datetime64_dtype,
_ensure_object)
from pandas.core.base import StringMixin
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.series import Series
import datetime
from pandas import compat, to_timedelta, to_datetime, isnull, DatetimeIndex
from pandas.compat import lrange, lmap, lzip, text_type, string_types, range, \
zip, BytesIO
from pandas.util.decorators import Appender
import pandas as pd
from pandas.io.common import get_filepath_or_buffer, BaseIterator
from pandas.lib import max_len_string_array, infer_dtype
from pandas.tslib import NaT, Timestamp
_version_error = ("Version of given Stata file is not 104, 105, 108, "
"111 (Stata 7SE), 113 (Stata 8/9), 114 (Stata 10/11), "
"115 (Stata 12), 117 (Stata 13), or 118 (Stata 14)")
_statafile_processing_params1 = """\
convert_dates : boolean, defaults to True
Convert date variables to DataFrame time values
convert_categoricals : boolean, defaults to True
Read value labels and convert columns to Categorical/Factor variables"""
_encoding_params = """\
encoding : string, None or encoding
Encoding used to parse the files. None defaults to iso-8859-1."""
_statafile_processing_params2 = """\
index : identifier of index column
identifier of column that should be used as index of the DataFrame
convert_missing : boolean, defaults to False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nans.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : boolean, defaults to True
Preserve Stata datatypes. If False, numeric data are upcast to pandas
default types for foreign data (float64 or int64)
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns all columns
order_categoricals : boolean, defaults to True
Flag indicating whether converted categorical data are ordered."""
_chunksize_params = """\
chunksize : int, default None
Return StataReader object for iterations, returns chunks with
given number of lines"""
_iterator_params = """\
iterator : boolean, default False
Return StataReader object"""
_read_stata_doc = """Read Stata file into DataFrame
Parameters
----------
filepath_or_buffer : string or file-like object
Path to .dta file or object implementing a binary read() functions
%s
%s
%s
%s
%s
Returns
-------
DataFrame or StataReader
Examples
--------
Read a Stata dta file:
>>> df = pandas.read_stata('filename.dta')
Read a Stata dta file in 10,000 line chunks:
>>> itr = pandas.read_stata('filename.dta', chunksize=10000)
>>> for chunk in itr:
>>> do_something(chunk)
""" % (_statafile_processing_params1, _encoding_params,
_statafile_processing_params2, _chunksize_params,
_iterator_params)
_data_method_doc = """Reads observations from Stata file, converting them into a dataframe
This is a legacy method. Use `read` in new code.
Parameters
----------
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_read_method_doc = """\
Reads observations from Stata file, converting them into a dataframe
Parameters
----------
nrows : int
Number of lines to read from data file, if None read whole file.
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_stata_reader_doc = """\
Class for reading Stata dta files.
Parameters
----------
path_or_buf : string or file-like object
Path to .dta file or object implementing a binary read() functions
%s
%s
%s
%s
""" % (_statafile_processing_params1, _statafile_processing_params2,
_encoding_params, _chunksize_params)
@Appender(_read_stata_doc)
def read_stata(filepath_or_buffer, convert_dates=True,
convert_categoricals=True, encoding=None, index=None,
convert_missing=False, preserve_dtypes=True, columns=None,
order_categoricals=True, chunksize=None, iterator=False):
reader = StataReader(filepath_or_buffer,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
index=index, convert_missing=convert_missing,
preserve_dtypes=preserve_dtypes,
columns=columns,
order_categoricals=order_categoricals,
chunksize=chunksize, encoding=encoding)
if iterator or chunksize:
data = reader
else:
data = reader.read()
reader.close()
return data
_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
stata_epoch = datetime.datetime(1960, 1, 1)
def _stata_elapsed_date_to_datetime_vec(dates, fmt):
"""
Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Returns
Returns
-------
converted : Series
The converted dates
Examples
--------
>>> import pandas as pd
>>> dates = pd.Series([52])
>>> _stata_elapsed_date_to_datetime_vec(dates , "%tw")
0 1961-01-01
dtype: datetime64[ns]
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
If you don't have pandas with datetime support, then you can't do
milliseconds accurately.
"""
MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year
MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days
MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days
MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
def convert_year_month_safe(year, month):
"""
Convert year and month to datetimes, using pandas vectorized versions
when the date range falls within the range supported by pandas. Other
wise it falls back to a slower but more robust method using datetime.
"""
if year.max() < MAX_YEAR and year.min() > MIN_YEAR:
return to_datetime(100 * year + month, format='%Y%m')
else:
index = getattr(year, 'index', None)
return Series(
[datetime.datetime(y, m, 1) for y, m in zip(year, month)],
index=index)
def convert_year_days_safe(year, days):
"""
Converts year (e.g. 1999) and days since the start of the year to a
datetime or datetime64 Series
"""
if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR:
return (to_datetime(year, format='%Y') +
to_timedelta(days, unit='d'))
else:
index = getattr(year, 'index', None)
value = [datetime.datetime(y, 1, 1) + relativedelta(days=int(d))
for y, d in zip(year, days)]
return Series(value, index=index)
def convert_delta_safe(base, deltas, unit):
"""
Convert base dates and deltas to datetimes, using pandas vectorized
versions if the deltas satisfy restrictions required to be expressed
as dates in pandas.
"""
index = getattr(deltas, 'index', None)
if unit == 'd':
if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA:
values = [base + relativedelta(days=int(d)) for d in deltas]
return Series(values, index=index)
elif unit == 'ms':
if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA:
values = [base + relativedelta(microseconds=(int(d) * 1000))
for d in deltas]
return Series(values, index=index)
else:
raise ValueError('format not understood')
base = to_datetime(base)
deltas = to_timedelta(deltas, unit=unit)
return base + deltas
# TODO: If/when pandas supports more than datetime64[ns], this should be
# improved to use correct range, e.g. datetime[Y] for yearly
bad_locs = np.isnan(dates)
has_bad_values = False
if bad_locs.any():
has_bad_values = True
data_col = Series(dates)
data_col[bad_locs] = 1.0 # Replace with NaT
dates = dates.astype(np.int64)
if fmt in ["%tc", "tc"]: # Delta ms relative to base
base = stata_epoch
ms = dates
conv_dates = convert_delta_safe(base, ms, 'ms')
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Encountered %tC format. Leaving in Stata Internal Format.")
conv_dates = Series(dates, dtype=np.object)
if has_bad_values:
conv_dates[bad_locs] = pd.NaT
return conv_dates
elif fmt in ["%td", "td", "%d", "d"]: # Delta days relative to base
base = stata_epoch
days = dates
conv_dates = convert_delta_safe(base, days, 'd')
elif fmt in ["%tw", "tw"]: # does not count leap days - 7 days is a week
year = stata_epoch.year + dates // 52
days = (dates % 52) * 7
conv_dates = convert_year_days_safe(year, days)
elif fmt in ["%tm", "tm"]: # Delta months relative to base
year = stata_epoch.year + dates // 12
month = (dates % 12) + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%tq", "tq"]: # Delta quarters relative to base
year = stata_epoch.year + dates // 4
month = (dates % 4) * 3 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%th", "th"]: # Delta half-years relative to base
year = stata_epoch.year + dates // 2
month = (dates % 2) * 6 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%ty", "ty"]: # Years -- not delta
year = dates
month = np.ones_like(dates)
conv_dates = convert_year_month_safe(year, month)
else:
raise ValueError("Date fmt %s not understood" % fmt)
if has_bad_values: # Restore NaT for bad values
conv_dates[bad_locs] = NaT
return conv_dates
def _datetime_to_stata_elapsed_vec(dates, fmt):
"""
Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
Series or array containing datetime.datetime or datetime64[ns] to
convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
index = dates.index
NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000
US_PER_DAY = NS_PER_DAY / 1000
def parse_dates_safe(dates, delta=False, year=False, days=False):
d = {}
if is_datetime64_dtype(dates.values):
if delta:
delta = dates - stata_epoch
d['delta'] = delta.values.astype(
np.int64) // 1000 # microseconds
if days or year:
dates = DatetimeIndex(dates)
d['year'], d['month'] = dates.year, dates.month
if days:
days = (dates.astype(np.int64) -
to_datetime(d['year'], format='%Y').astype(np.int64))
d['days'] = days // NS_PER_DAY
elif infer_dtype(dates) == 'datetime':
if delta:
delta = dates.values - stata_epoch
f = lambda x: \
US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds
v = np.vectorize(f)
d['delta'] = v(delta)
if year:
year_month = dates.apply(lambda x: 100 * x.year + x.month)
d['year'] = year_month.values // 100
d['month'] = (year_month.values - d['year'] * 100)
if days:
f = lambda x: (x - datetime.datetime(x.year, 1, 1)).days
v = np.vectorize(f)
d['days'] = v(dates)
else:
raise ValueError('Columns containing dates must contain either '
'datetime64, datetime.datetime or null values.')
return DataFrame(d, index=index)
bad_loc = isnull(dates)
index = dates.index
if bad_loc.any():
dates = Series(dates)
if is_datetime64_dtype(dates):
dates[bad_loc] = to_datetime(stata_epoch)
else:
dates[bad_loc] = stata_epoch
if fmt in ["%tc", "tc"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta / 1000
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Stata Internal Format tC not supported.")
conv_dates = dates
elif fmt in ["%td", "td"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta // US_PER_DAY
elif fmt in ["%tw", "tw"]:
d = parse_dates_safe(dates, year=True, days=True)
conv_dates = (52 * (d.year - stata_epoch.year) + d.days // 7)
elif fmt in ["%tm", "tm"]:
d = parse_dates_safe(dates, year=True)
conv_dates = (12 * (d.year - stata_epoch.year) + d.month - 1)
elif fmt in ["%tq", "tq"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3
elif fmt in ["%th", "th"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 2 * (d.year - stata_epoch.year) + \
(d.month > 6).astype(np.int)
elif fmt in ["%ty", "ty"]:
d = parse_dates_safe(dates, year=True)
conv_dates = d.year
else:
raise ValueError("Format %s is not a known Stata date format" % fmt)
conv_dates = Series(conv_dates, dtype=np.float64)
missing_value = struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0]
conv_dates[bad_loc] = missing_value
return Series(conv_dates, index=index)
excessive_string_length_error = """
Fixed width strings in Stata .dta files are limited to 244 (or fewer)
characters. Column '%s' does not satisfy this restriction.
"""
class PossiblePrecisionLoss(Warning):
pass
precision_loss_doc = """
Column converted from %s to %s, and some data are outside of the lossless
conversion range. This may result in a loss of precision in the saved data.
"""
class ValueLabelTypeMismatch(Warning):
pass
value_label_mismatch_doc = """
Stata value labels (pandas categories) must be strings. Column {0} contains
non-string labels which will be converted to strings. Please check that the
Stata data file created has not lost information due to duplicate labels.
"""
class InvalidColumnName(Warning):
pass
invalid_name_doc = """
Not all pandas column names were valid Stata variable names.
The following replacements have been made:
{0}
If this is not what you expect, please make sure you have Stata-compliant
column names in your DataFrame (strings only, max 32 characters, only
alphanumerics and underscores, no Stata reserved words)
"""
def _cast_to_stata_types(data):
"""Checks the dtypes of the columns of a pandas DataFrame for
compatibility with the data types and ranges supported by Stata, and
converts if necessary.
Parameters
----------
data : DataFrame
The DataFrame to check and convert
Notes
-----
Numeric columns in Stata must be one of int8, int16, int32, float32 or
float64, with some additional value restrictions. int8 and int16 columns
are checked for violations of the value restrictions and upcast if needed.
int64 data is not usable in Stata, and so it is downcast to int32 whenever
the value are in the int32 range, and sidecast to float64 when larger than
this range. If the int64 values are outside of the range of those
perfectly representable as float64 values, a warning is raised.
bool columns are cast to int8. uint colums are converted to int of the
same size if there is no loss in precision, other wise are upcast to a
larger type. uint64 is currently not supported since it is concerted to
object in a DataFrame.
"""
ws = ''
# original, if small, if large
conversion_data = ((np.bool, np.int8, np.int8),
(np.uint8, np.int8, np.int16),
(np.uint16, np.int16, np.int32),
(np.uint32, np.int32, np.int64))
float32_max = struct.unpack('<f', b'\xff\xff\xff\x7e')[0]
float64_max = struct.unpack('<d', b'\xff\xff\xff\xff\xff\xff\xdf\x7f')[0]
for col in data:
dtype = data[col].dtype
# Cast from unsupported types to supported types
for c_data in conversion_data:
if dtype == c_data[0]:
if data[col].max() <= np.iinfo(c_data[1]).max:
dtype = c_data[1]
else:
dtype = c_data[2]
if c_data[2] == np.float64: # Warn if necessary
if data[col].max() >= 2 ** 53:
ws = precision_loss_doc % ('uint64', 'float64')
data[col] = data[col].astype(dtype)
# Check values and upcast if necessary
if dtype == np.int8:
if data[col].max() > 100 or data[col].min() < -127:
data[col] = data[col].astype(np.int16)
elif dtype == np.int16:
if data[col].max() > 32740 or data[col].min() < -32767:
data[col] = data[col].astype(np.int32)
elif dtype == np.int64:
if (data[col].max() <= 2147483620 and
data[col].min() >= -2147483647):
data[col] = data[col].astype(np.int32)
else:
data[col] = data[col].astype(np.float64)
if data[col].max() >= 2 ** 53 or data[col].min() <= -2 ** 53:
ws = precision_loss_doc % ('int64', 'float64')
elif dtype in (np.float32, np.float64):
value = data[col].max()
if np.isinf(value):
msg = 'Column {0} has a maximum value of infinity which is ' \
'outside the range supported by Stata.'
raise ValueError(msg.format(col))
if dtype == np.float32 and value > float32_max:
data[col] = data[col].astype(np.float64)
elif dtype == np.float64:
if value > float64_max:
msg = 'Column {0} has a maximum value ({1}) outside the ' \
'range supported by Stata ({1})'
raise ValueError(msg.format(col, value, float64_max))
if ws:
import warnings
warnings.warn(ws, PossiblePrecisionLoss)
return data
class StataValueLabel(object):
"""
Parse a categorical column and prepare formatted output
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Methods
-------
generate_value_label
"""
def __init__(self, catarray):
self.labname = catarray.name
categories = catarray.cat.categories
self.value_labels = list(zip(np.arange(len(categories)), categories))
self.value_labels.sort(key=lambda x: x[0])
self.text_len = np.int32(0)
self.off = []
self.val = []
self.txt = []
self.n = 0
# Compute lengths and setup lists of offsets and labels
for vl in self.value_labels:
category = vl[1]
if not isinstance(category, string_types):
category = str(category)
import warnings
warnings.warn(value_label_mismatch_doc.format(catarray.name),
ValueLabelTypeMismatch)
self.off.append(self.text_len)
self.text_len += len(category) + 1 # +1 for the padding
self.val.append(vl[0])
self.txt.append(category)
self.n += 1
if self.text_len > 32000:
raise ValueError('Stata value labels for a single variable must '
'have a combined length less than 32,000 '
'characters.')
# Ensure int32
self.off = np.array(self.off, dtype=np.int32)
self.val = np.array(self.val, dtype=np.int32)
# Total length
self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len
def _encode(self, s):
"""
Python 3 compatability shim
"""
if compat.PY3:
return s.encode(self._encoding)
else:
return s
def generate_value_label(self, byteorder, encoding):
"""
Parameters
----------
byteorder : str
Byte order of the output
encoding : str
File encoding
Returns
-------
value_label : bytes
Bytes containing the formatted value label
"""
self._encoding = encoding
bio = BytesIO()
null_string = '\x00'
null_byte = b'\x00'
# len
bio.write(struct.pack(byteorder + 'i', self.len))
# labname
labname = self._encode(_pad_bytes(self.labname[:32], 33))
bio.write(labname)
# padding - 3 bytes
for i in range(3):
bio.write(struct.pack('c', null_byte))
# value_label_table
# n - int32
bio.write(struct.pack(byteorder + 'i', self.n))
# textlen - int32
bio.write(struct.pack(byteorder + 'i', self.text_len))
# off - int32 array (n elements)
for offset in self.off:
bio.write(struct.pack(byteorder + 'i', offset))
# val - int32 array (n elements)
for value in self.val:
bio.write(struct.pack(byteorder + 'i', value))
# txt - Text labels, null terminated
for text in self.txt:
bio.write(self._encode(text + null_string))
bio.seek(0)
return bio.read()
class StataMissingValue(StringMixin):
"""
An observation's missing value.
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Notes
-----
More information: <http://www.stata.com/help.cgi?missing>
Integer missing values make the code '.', '.a', ..., '.z' to the ranges
101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ...
2147483647 (for int32). Missing values for floating point data types are
more complex but the pattern is simple to discern from the following table.
np.float32 missing values (float in Stata)
0000007f .
0008007f .a
0010007f .b
...
00c0007f .x
00c8007f .y
00d0007f .z
np.float64 missing values (double in Stata)
000000000000e07f .
000000000001e07f .a
000000000002e07f .b
...
000000000018e07f .x
000000000019e07f .y
00000000001ae07f .z
"""
# Construct a dictionary of missing values
MISSING_VALUES = {}
bases = (101, 32741, 2147483621)
for b in bases:
# Conversion to long to avoid hash issues on 32 bit platforms #8968
MISSING_VALUES[compat.long(b)] = '.'
for i in range(1, 27):
MISSING_VALUES[compat.long(i + b)] = '.' + chr(96 + i)
float32_base = b'\x00\x00\x00\x7f'
increment = struct.unpack('<i', b'\x00\x08\x00\x00')[0]
for i in range(27):
value = struct.unpack('<f', float32_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('<i', struct.pack('<f', value))[
0] + increment
float32_base = struct.pack('<i', int_value)
float64_base = b'\x00\x00\x00\x00\x00\x00\xe0\x7f'
increment = struct.unpack('q', b'\x00\x00\x00\x00\x00\x01\x00\x00')[0]
for i in range(27):
value = struct.unpack('<d', float64_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('q', struct.pack('<d', value))[0] + increment
float64_base = struct.pack('q', int_value)
BASE_MISSING_VALUES = {'int8': 101,
'int16': 32741,
'int32': 2147483621,
'float32': struct.unpack('<f', float32_base)[0],
'float64': struct.unpack('<d', float64_base)[0]}
def __init__(self, value):
self._value = value
# Conversion to long to avoid hash issues on 32 bit platforms #8968
value = compat.long(value) if value < 2147483648 else float(value)
self._str = self.MISSING_VALUES[value]
string = property(lambda self: self._str,
doc="The Stata representation of the missing value: "
"'.', '.a'..'.z'")
value = property(lambda self: self._value,
doc='The binary representation of the missing value.')
def __unicode__(self):
return self.string
def __repr__(self):
# not perfect :-/
return "%s(%s)" % (self.__class__, self)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.string == other.string and self.value == other.value)
@classmethod
def get_base_missing_value(cls, dtype):
if dtype == np.int8:
value = cls.BASE_MISSING_VALUES['int8']
elif dtype == np.int16:
value = cls.BASE_MISSING_VALUES['int16']
elif dtype == np.int32:
value = cls.BASE_MISSING_VALUES['int32']
elif dtype == np.float32:
value = cls.BASE_MISSING_VALUES['float32']
elif dtype == np.float64:
value = cls.BASE_MISSING_VALUES['float64']
else:
raise ValueError('Unsupported dtype')
return value
class StataParser(object):
_default_encoding = 'iso-8859-1'
def __init__(self, encoding):
self._encoding = encoding
# type code.
# --------------------
# str1 1 = 0x01
# str2 2 = 0x02
# ...
# str244 244 = 0xf4
# byte 251 = 0xfb (sic)
# int 252 = 0xfc
# long 253 = 0xfd
# float 254 = 0xfe
# double 255 = 0xff
# --------------------
# NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
self.DTYPE_MAP = \
dict(
lzip(range(1, 245), ['a' + str(i) for i in range(1, 245)]) +
[
(251, np.int8),
(252, np.int16),
(253, np.int32),
(254, np.float32),
(255, np.float64)
]
)
self.DTYPE_MAP_XML = \
dict(
[
(32768, np.uint8), # Keys to GSO
(65526, np.float64),
(65527, np.float32),
(65528, np.int32),
(65529, np.int16),
(65530, np.int8)
]
)
self.TYPE_MAP = lrange(251) + list('bhlfd')
self.TYPE_MAP_XML = \
dict(
[
# Not really a Q, unclear how to handle byteswap
(32768, 'Q'),
(65526, 'd'),
(65527, 'f'),
(65528, 'l'),
(65529, 'h'),
(65530, 'b')
]
)
# NOTE: technically, some of these are wrong. there are more numbers
# that can be represented. it's the 27 ABOVE and BELOW the max listed
# numeric data type in [U] 12.2.2 of the 11.2 manual
float32_min = b'\xff\xff\xff\xfe'
float32_max = b'\xff\xff\xff\x7e'
float64_min = b'\xff\xff\xff\xff\xff\xff\xef\xff'
float64_max = b'\xff\xff\xff\xff\xff\xff\xdf\x7f'
self.VALID_RANGE = {
'b': (-127, 100),
'h': (-32767, 32740),
'l': (-2147483647, 2147483620),
'f': (np.float32(struct.unpack('<f', float32_min)[0]),
np.float32(struct.unpack('<f', float32_max)[0])),
'd': (np.float64(struct.unpack('<d', float64_min)[0]),
np.float64(struct.unpack('<d', float64_max)[0]))
}
self.OLD_TYPE_MAPPING = {
98: 251, # byte
105: 252, # int
108: 253, # long
102: 254 # float
# don't know old code for double
}
# These missing values are the generic '.' in Stata, and are used
# to replace nans
self.MISSING_VALUES = {
'b': 101,
'h': 32741,
'l': 2147483621,
'f': np.float32(struct.unpack('<f', b'\x00\x00\x00\x7f')[0]),
'd': np.float64(
struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
}
self.NUMPY_TYPE_MAP = {
'b': 'i1',
'h': 'i2',
'l': 'i4',
'f': 'f4',
'd': 'f8',
'Q': 'u8'
}
# Reserved words cannot be used as variable names
self.RESERVED_WORDS = ('aggregate', 'array', 'boolean', 'break',
'byte', 'case', 'catch', 'class', 'colvector',
'complex', 'const', 'continue', 'default',
'delegate', 'delete', 'do', 'double', 'else',
'eltypedef', 'end', 'enum', 'explicit',
'export', 'external', 'float', 'for', 'friend',
'function', 'global', 'goto', 'if', 'inline',
'int', 'local', 'long', 'NULL', 'pragma',
'protected', 'quad', 'rowvector', 'short',
'typedef', 'typename', 'virtual')
class StataReader(StataParser, BaseIterator):
__doc__ = _stata_reader_doc
def __init__(self, path_or_buf, convert_dates=True,
convert_categoricals=True, index=None,
convert_missing=False, preserve_dtypes=True,
columns=None, order_categoricals=True,
encoding='iso-8859-1', chunksize=None):
super(StataReader, self).__init__(encoding)
self.col_sizes = ()
# Arguments to the reader (can be temporarily overridden in
# calls to read).
self._convert_dates = convert_dates
self._convert_categoricals = convert_categoricals
self._index = index
self._convert_missing = convert_missing
self._preserve_dtypes = preserve_dtypes
self._columns = columns
self._order_categoricals = order_categoricals
self._encoding = encoding
self._chunksize = chunksize
# State variables for the file
self._has_string_data = False
self._missing_values = False
self._can_read_value_labels = False
self._column_selector_set = False
self._value_labels_read = False
self._data_read = False
self._dtype = None
self._lines_read = 0
self._native_byteorder = _set_endianness(sys.byteorder)
if isinstance(path_or_buf, str):
path_or_buf, encoding, _ = get_filepath_or_buffer(
path_or_buf, encoding=self._default_encoding
)
if isinstance(path_or_buf, (str, compat.text_type, bytes)):
self.path_or_buf = open(path_or_buf, 'rb')
else:
# Copy to BytesIO, and ensure no encoding
contents = path_or_buf.read()
try:
contents = contents.encode(self._default_encoding)
except:
pass
self.path_or_buf = BytesIO(contents)
self._read_header()
def __enter__(self):
""" enter context manager """
return self
def __exit__(self, exc_type, exc_value, traceback):
""" exit context manager """
self.close()
def close(self):
""" close the handle if its open """
try:
self.path_or_buf.close()
except IOError:
pass
def _read_header(self):
first_char = self.path_or_buf.read(1)
if struct.unpack('c', first_char)[0] == b'<':
self._read_new_header(first_char)
else:
self._read_old_header(first_char)
self.has_string_data = len([x for x in self.typlist
if type(x) is int]) > 0
# calculate size of a data record
self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist)
# remove format details from %td
self.fmtlist = ["%td" if x.startswith("%td") else x
for x in self.fmtlist]
def _read_new_header(self, first_char):
# The first part of the header is common to 117 and 118.
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
if self.format_version not in [117, 118]:
raise ValueError(_version_error)
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == "MSF" and '>' or '<'
self.path_or_buf.read(15) # </byteorder><K>
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.path_or_buf.read(7) # </K><N>
self.nobs = self._get_nobs()
self.path_or_buf.read(11) # </N><label>
self.data_label = self._get_data_label()
self.path_or_buf.read(19) # </label><timestamp>
self.time_stamp = self._get_time_stamp()
self.path_or_buf.read(26) # </timestamp></header><map>
self.path_or_buf.read(8) # 0x0000000000000000
self.path_or_buf.read(8) # position of <map>
self._seek_vartypes = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 16
self._seek_varnames = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
self._seek_sortlist = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
self._seek_formats = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 9
self._seek_value_label_names = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 19
# Requires version-specific treatment
self._seek_variable_labels = self._get_seek_variable_labels()
self.path_or_buf.read(8) # <characteristics>
self.data_location = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 6
self.seek_strls = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 7
self.seek_value_labels = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 14
self.typlist, self.dtyplist = self._get_dtypes(self._seek_vartypes)
self.path_or_buf.seek(self._seek_varnames)
self.varlist = self._get_varlist()
self.path_or_buf.seek(self._seek_sortlist)
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
self.path_or_buf.seek(self._seek_formats)
self.fmtlist = self._get_fmtlist()
self.path_or_buf.seek(self._seek_value_label_names)
self.lbllist = self._get_lbllist()
self.path_or_buf.seek(self._seek_variable_labels)
self._variable_labels = self._get_variable_labels()
# Get data type information, works for versions 117-118.
def _get_dtypes(self, seek_vartypes):
self.path_or_buf.seek(seek_vartypes)
raw_typlist = [struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
for i in range(self.nvar)]
def f(typ):
if typ <= 2045:
return typ
try:
return self.TYPE_MAP_XML[typ]
except KeyError:
raise ValueError("cannot convert stata types [{0}]".
format(typ))
typlist = [f(x) for x in raw_typlist]
def f(typ):
if typ <= 2045:
return str(typ)
try:
return self.DTYPE_MAP_XML[typ]
except KeyError:
raise ValueError("cannot convert stata dtype [{0}]"
.format(typ))
dtyplist = [f(x) for x in raw_typlist]
return typlist, dtyplist
def _get_varlist(self):
if self.format_version == 117:
b = 33
elif self.format_version == 118:
b = 129
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
# Returns the format list
def _get_fmtlist(self):
if self.format_version == 118:
b = 57
elif self.format_version > 113:
b = 49
elif self.format_version > 104:
b = 12
else:
b = 7
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
# Returns the label list
def _get_lbllist(self):
if self.format_version >= 118:
b = 129
elif self.format_version > 108:
b = 33
else:
b = 9
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
def _get_variable_labels(self):
if self.format_version == 118:
vlblist = [self._decode(self.path_or_buf.read(321))
for i in range(self.nvar)]
elif self.format_version > 105:
vlblist = [self._null_terminate(self.path_or_buf.read(81))
for i in range(self.nvar)]
else:
vlblist = [self._null_terminate(self.path_or_buf.read(32))
for i in range(self.nvar)]
return vlblist
def _get_nobs(self):
if self.format_version == 118:
return struct.unpack(self.byteorder + 'Q',
self.path_or_buf.read(8))[0]
else:
return struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
def _get_data_label(self):
if self.format_version == 118:
strlen = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version == 117:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self._null_terminate(self.path_or_buf.read(strlen))
elif self.format_version > 105:
return self._null_terminate(self.path_or_buf.read(81))
else:
return self._null_terminate(self.path_or_buf.read(32))
def _get_time_stamp(self):
if self.format_version == 118:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self.path_or_buf.read(strlen).decode("utf-8")
elif self.format_version == 117:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self._null_terminate(self.path_or_buf.read(strlen))
elif self.format_version > 104:
return self._null_terminate(self.path_or_buf.read(18))
else:
raise ValueError()
def _get_seek_variable_labels(self):
if self.format_version == 117:
self.path_or_buf.read(8) # <variable_lables>, throw away
# Stata 117 data files do not follow the described format. This is
# a work around that uses the previous label, 33 bytes for each
# variable, 20 for the closing tag and 17 for the opening tag
return self._seek_value_label_names + (33 * self.nvar) + 20 + 17
elif self.format_version == 118:
return struct.unpack(self.byteorder + 'q',
self.path_or_buf.read(8))[0] + 17
else:
raise ValueError()
def _read_old_header(self, first_char):
self.format_version = struct.unpack('b', first_char)[0]
if self.format_version not in [104, 105, 108, 111, 113, 114, 115]:
raise ValueError(_version_error)
self.byteorder = struct.unpack('b', self.path_or_buf.read(1))[
0] == 0x1 and '>' or '<'
self.filetype = struct.unpack('b', self.path_or_buf.read(1))[0]
self.path_or_buf.read(1) # unused
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.nobs = self._get_nobs()
self.data_label = self._get_data_label()
self.time_stamp = self._get_time_stamp()
# descriptors
if self.format_version > 108:
typlist = [ord(self.path_or_buf.read(1))
for i in range(self.nvar)]
else:
buf = self.path_or_buf.read(self.nvar)
typlistb = np.frombuffer(buf, dtype=np.uint8)
typlist = []
for tp in typlistb:
if tp in self.OLD_TYPE_MAPPING:
typlist.append(self.OLD_TYPE_MAPPING[tp])
else:
typlist.append(tp - 127) # py2 string, py3 bytes
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata types [{0}]"
.format(','.join(str(x) for x in typlist)))
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata dtypes [{0}]"
.format(','.join(str(x) for x in typlist)))
if self.format_version > 108:
self.varlist = [self._null_terminate(self.path_or_buf.read(33))
for i in range(self.nvar)]
else:
self.varlist = [self._null_terminate(self.path_or_buf.read(9))
for i in range(self.nvar)]
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
self.fmtlist = self._get_fmtlist()
self.lbllist = self._get_lbllist()
self._variable_labels = self._get_variable_labels()
# ignore expansion fields (Format 105 and later)
# When reading, read five bytes; the last four bytes now tell you
# the size of the next read, which you discard. You then continue
# like this until you read 5 bytes of zeros.
if self.format_version > 104:
while True:
data_type = struct.unpack(self.byteorder + 'b',
self.path_or_buf.read(1))[0]
if self.format_version > 108:
data_len = struct.unpack(self.byteorder + 'i',
self.path_or_buf.read(4))[0]
else:
data_len = struct.unpack(self.byteorder + 'h',
self.path_or_buf.read(2))[0]
if data_type == 0:
break
self.path_or_buf.read(data_len)
# necessary data to continue parsing
self.data_location = self.path_or_buf.tell()
def _calcsize(self, fmt):
return (type(fmt) is int and fmt or
struct.calcsize(self.byteorder + fmt))
def _decode(self, s):
s = s.partition(b"\0")[0]
return s.decode('utf-8')
def _null_terminate(self, s):
if compat.PY3 or self._encoding is not None:
# have bytes not strings, so must decode
s = s.partition(b"\0")[0]
return s.decode(self._encoding or self._default_encoding)
else:
null_byte = "\0"
try:
return s.lstrip(null_byte)[:s.index(null_byte)]
except:
return s
def _read_value_labels(self):
if self.format_version <= 108:
# Value labels are not supported in version 108 and earlier.
return
if self._value_labels_read:
# Don't read twice
return
if self.format_version >= 117:
self.path_or_buf.seek(self.seek_value_labels)
else:
offset = self.nobs * self._dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
self._value_labels_read = True
self.value_label_dict = dict()
while True:
if self.format_version >= 117:
if self.path_or_buf.read(5) == b'</val': # <lbl>
break # end of value label table
slength = self.path_or_buf.read(4)
if not slength:
break # end of value label table (format < 117)
if self.format_version <= 117:
labname = self._null_terminate(self.path_or_buf.read(33))
else:
labname = self._decode(self.path_or_buf.read(129))
self.path_or_buf.read(3) # padding
n = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
txtlen = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
off = np.frombuffer(self.path_or_buf.read(4 * n),
dtype=self.byteorder + "i4",
count=n)
val = np.frombuffer(self.path_or_buf.read(4 * n),
dtype=self.byteorder + "i4",
count=n)
ii = np.argsort(off)
off = off[ii]
val = val[ii]
txt = self.path_or_buf.read(txtlen)
self.value_label_dict[labname] = dict()
for i in range(n):
end = off[i + 1] if i < n - 1 else txtlen
if self.format_version <= 117:
self.value_label_dict[labname][val[i]] = (
self._null_terminate(txt[off[i]:end]))
else:
self.value_label_dict[labname][val[i]] = (
self._decode(txt[off[i]:end]))
if self.format_version >= 117:
self.path_or_buf.read(6) # </lbl>
self._value_labels_read = True
def _read_strls(self):
self.path_or_buf.seek(self.seek_strls)
self.GSO = {0: ''}
while True:
if self.path_or_buf.read(3) != b'GSO':
break
if self.format_version == 117:
v_o = struct.unpack(self.byteorder + 'Q',
self.path_or_buf.read(8))[0]
else:
buf = self.path_or_buf.read(12)
# Only tested on little endian file on little endian machine.
if self.byteorder == '<':
buf = buf[0:2] + buf[4:10]
else:
buf = buf[0:2] + buf[6:]
v_o = struct.unpack('Q', buf)[0]
typ = struct.unpack('B', self.path_or_buf.read(1))[0]
length = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
va = self.path_or_buf.read(length)
if typ == 130:
encoding = 'utf-8'
if self.format_version == 117:
encoding = self._encoding or self._default_encoding
va = va[0:-1].decode(encoding)
self.GSO[v_o] = va
# legacy
@Appender('DEPRECATED: ' + _data_method_doc)
def data(self, **kwargs):
import warnings
warnings.warn("'data' is deprecated, use 'read' instead")
if self._data_read:
raise Exception("Data has already been read.")
self._data_read = True
return self.read(None, **kwargs)
def __next__(self):
return self.read(nrows=self._chunksize or 1)
def get_chunk(self, size=None):
"""
Reads lines from Stata file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
@Appender(_read_method_doc)
def read(self, nrows=None, convert_dates=None,
convert_categoricals=None, index=None,
convert_missing=None, preserve_dtypes=None,
columns=None, order_categoricals=None):
# Handle empty file or chunk. If reading incrementally raise
# StopIteration. If reading the whole thing return an empty
# data frame.
if (self.nobs == 0) and (nrows is None):
self._can_read_value_labels = True
self._data_read = True
self.close()
return DataFrame(columns=self.varlist)
# Handle options
if convert_dates is None:
convert_dates = self._convert_dates
if convert_categoricals is None:
convert_categoricals = self._convert_categoricals
if convert_missing is None:
convert_missing = self._convert_missing
if preserve_dtypes is None:
preserve_dtypes = self._preserve_dtypes
if columns is None:
columns = self._columns
if order_categoricals is None:
order_categoricals = self._order_categoricals
if nrows is None:
nrows = self.nobs
if (self.format_version >= 117) and (self._dtype is None):
self._can_read_value_labels = True
self._read_strls()
# Setup the dtype.
if self._dtype is None:
dtype = [] # Convert struct data types to numpy data type
for i, typ in enumerate(self.typlist):
if typ in self.NUMPY_TYPE_MAP:
dtype.append(('s' + str(i), self.byteorder +
self.NUMPY_TYPE_MAP[typ]))
else:
dtype.append(('s' + str(i), 'S' + str(typ)))
dtype = np.dtype(dtype)
self._dtype = dtype
# Read data
dtype = self._dtype
max_read_len = (self.nobs - self._lines_read) * dtype.itemsize
read_len = nrows * dtype.itemsize
read_len = min(read_len, max_read_len)
if read_len <= 0:
# Iterator has finished, should never be here unless
# we are reading the file incrementally
if convert_categoricals:
self._read_value_labels()
self.close()
raise StopIteration
offset = self._lines_read * dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
read_lines = min(nrows, self.nobs - self._lines_read)
data = np.frombuffer(self.path_or_buf.read(read_len), dtype=dtype,
count=read_lines)
self._lines_read += read_lines
if self._lines_read == self.nobs:
self._can_read_value_labels = True
self._data_read = True
# if necessary, swap the byte order to native here
if self.byteorder != self._native_byteorder:
data = data.byteswap().newbyteorder()
if convert_categoricals:
self._read_value_labels()
if len(data) == 0:
data = DataFrame(columns=self.varlist, index=index)
else:
data = DataFrame.from_records(data, index=index)
data.columns = self.varlist
# If index is not specified, use actual row number rather than
# restarting at 0 for each chunk.
if index is None:
ix = np.arange(self._lines_read - read_lines, self._lines_read)
data = data.set_index(ix)
if columns is not None:
try:
data = self._do_select_columns(data, columns)
except ValueError:
self.close()
raise
# Decode strings
for col, typ in zip(data, self.typlist):
if type(typ) is int:
data[col] = data[col].apply(
self._null_terminate, convert_dtype=True)
data = self._insert_strls(data)
cols_ = np.where(self.dtyplist)[0]
# Convert columns (if needed) to match input type
index = data.index
requires_type_conversion = False
data_formatted = []
for i in cols_:
if self.dtyplist[i] is not None:
col = data.columns[i]
dtype = data[col].dtype
if dtype != np.dtype(object) and dtype != self.dtyplist[i]:
requires_type_conversion = True
data_formatted.append(
(col, Series(data[col], index, self.dtyplist[i])))
else:
data_formatted.append((col, data[col]))
if requires_type_conversion:
data = DataFrame.from_items(data_formatted)
del data_formatted
self._do_convert_missing(data, convert_missing)
if convert_dates:
cols = np.where(lmap(lambda x: x in _date_formats,
self.fmtlist))[0]
for i in cols:
col = data.columns[i]
try:
data[col] = _stata_elapsed_date_to_datetime_vec(
data[col],
self.fmtlist[i])
except ValueError:
self.close()
raise
if convert_categoricals and self.format_version > 108:
data = self._do_convert_categoricals(data,
self.value_label_dict,
self.lbllist,
order_categoricals)
if not preserve_dtypes:
retyped_data = []
convert = False
for col in data:
dtype = data[col].dtype
if dtype in (np.float16, np.float32):
dtype = np.float64
convert = True
elif dtype in (np.int8, np.int16, np.int32):
dtype = np.int64
convert = True
retyped_data.append((col, data[col].astype(dtype)))
if convert:
data = DataFrame.from_items(retyped_data)
return data
def _do_convert_missing(self, data, convert_missing):
# Check for missing values, and replace if found
for i, colname in enumerate(data):
fmt = self.typlist[i]
if fmt not in self.VALID_RANGE:
continue
nmin, nmax = self.VALID_RANGE[fmt]
series = data[colname]
missing = np.logical_or(series < nmin, series > nmax)
if not missing.any():
continue
if convert_missing: # Replacement follows Stata notation
missing_loc = np.argwhere(missing)
umissing, umissing_loc = np.unique(series[missing],
return_inverse=True)
replacement = Series(series, dtype=np.object)
for j, um in enumerate(umissing):
missing_value = StataMissingValue(um)
loc = missing_loc[umissing_loc == j]
replacement.iloc[loc] = missing_value
else: # All replacements are identical
dtype = series.dtype
if dtype not in (np.float32, np.float64):
dtype = np.float64
replacement = Series(series, dtype=dtype)
replacement[missing] = np.nan
data[colname] = replacement
def _insert_strls(self, data):
if not hasattr(self, 'GSO') or len(self.GSO) == 0:
return data
for i, typ in enumerate(self.typlist):
if typ != 'Q':
continue
data.iloc[:, i] = [self.GSO[k] for k in data.iloc[:, i]]
return data
def _do_select_columns(self, data, columns):
if not self._column_selector_set:
column_set = set(columns)
if len(column_set) != len(columns):
raise ValueError('columns contains duplicate entries')
unmatched = column_set.difference(data.columns)
if unmatched:
raise ValueError('The following columns were not found in the '
'Stata data set: ' +
', '.join(list(unmatched)))
# Copy information for retained columns for later processing
dtyplist = []
typlist = []
fmtlist = []
lbllist = []
for col in columns:
i = data.columns.get_loc(col)
dtyplist.append(self.dtyplist[i])
typlist.append(self.typlist[i])
fmtlist.append(self.fmtlist[i])
lbllist.append(self.lbllist[i])
self.dtyplist = dtyplist
self.typlist = typlist
self.fmtlist = fmtlist
self.lbllist = lbllist
self._column_selector_set = True
return data[columns]
def _do_convert_categoricals(self, data, value_label_dict, lbllist,
order_categoricals):
"""
Converts categorical columns to Categorical type.
"""
value_labels = list(compat.iterkeys(value_label_dict))
cat_converted_data = []
for col, label in zip(data, lbllist):
if label in value_labels:
# Explicit call with ordered=True
cat_data = Categorical(data[col], ordered=order_categoricals)
categories = []
for category in cat_data.categories:
if category in value_label_dict[label]:
categories.append(value_label_dict[label][category])
else:
categories.append(category) # Partially labeled
try:
cat_data.categories = categories
except ValueError:
vc = Series(categories).value_counts()
repeats = list(vc.index[vc > 1])
repeats = '\n' + '-' * 80 + '\n'.join(repeats)
msg = 'Value labels for column {0} are not unique. The ' \
'repeated labels are:\n{1}'.format(col, repeats)
raise ValueError(msg)
# TODO: is the next line needed above in the data(...) method?
cat_data = Series(cat_data, index=data.index)
cat_converted_data.append((col, cat_data))
else:
cat_converted_data.append((col, data[col]))
data = DataFrame.from_items(cat_converted_data)
return data
def data_label(self):
"""Returns data label of Stata file"""
return self.data_label
def variable_labels(self):
"""Returns variable labels as a dict, associating each variable name
with corresponding label
"""
return dict(zip(self.varlist, self._variable_labels))
def value_labels(self):
"""Returns a dict, associating each variable name a dict, associating
each value its corresponding label
"""
if not self._value_labels_read:
self._read_value_labels()
return self.value_label_dict
def _open_file_binary_write(fname, encoding):
if hasattr(fname, 'write'):
# if 'b' not in fname.mode:
return fname
return open(fname, "wb")
def _set_endianness(endianness):
if endianness.lower() in ["<", "little"]:
return "<"
elif endianness.lower() in [">", "big"]:
return ">"
else: # pragma : no cover
raise ValueError("Endianness %s not understood" % endianness)
def _pad_bytes(name, length):
"""
Takes a char string and pads it with null bytes until it's length chars
"""
return name + "\x00" * (length - len(name))
def _convert_datetime_to_stata_type(fmt):
"""
Converts from one of the stata date formats to a type in TYPE_MAP
"""
if fmt in ["tc", "%tc", "td", "%td", "tw", "%tw", "tm", "%tm", "tq",
"%tq", "th", "%th", "ty", "%ty"]:
return np.float64 # Stata expects doubles for SIFs
else:
raise NotImplementedError("Format %s not implemented" % fmt)
def _maybe_convert_to_int_keys(convert_dates, varlist):
new_dict = {}
for key in convert_dates:
if not convert_dates[key].startswith("%"): # make sure proper fmts
convert_dates[key] = "%" + convert_dates[key]
if key in varlist:
new_dict.update({varlist.index(key): convert_dates[key]})
else:
if not isinstance(key, int):
raise ValueError("convert_dates key must be a "
"column or an integer")
new_dict.update({key: convert_dates[key]})
return new_dict
def _dtype_to_stata_type(dtype, column):
"""
Converts dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
Pandas Stata
251 - chr(251) - for int8 byte
252 - chr(252) - for int16 int
253 - chr(253) - for int32 long
254 - chr(254) - for float32 float
255 - chr(255) - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if dtype.type == np.string_:
return chr(dtype.itemsize)
elif dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array(_ensure_object(column.values))
return chr(max(itemsize, 1))
elif dtype == np.float64:
return chr(255)
elif dtype == np.float32:
return chr(254)
elif dtype == np.int32:
return chr(253)
elif dtype == np.int16:
return chr(252)
elif dtype == np.int8:
return chr(251)
else: # pragma : no cover
raise NotImplementedError("Data type %s not supported." % dtype)
def _dtype_to_default_stata_fmt(dtype, column):
"""
Maps numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
object -> "%DDs" where DD is the length of the string. If not a string,
raise ValueError
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%12.0g"
int16 -> "%8.0g"
int8 -> "%8.0g"
"""
# TODO: Refactor to combine type with format
# TODO: expand this to handle a default datetime format?
if dtype.type == np.object_:
inferred_dtype = infer_dtype(column.dropna())
if not (inferred_dtype in ('string', 'unicode') or
len(column) == 0):
raise ValueError('Writing general object arrays is not supported')
itemsize = max_len_string_array(_ensure_object(column.values))
if itemsize > 244:
raise ValueError(excessive_string_length_error % column.name)
return "%" + str(max(itemsize, 1)) + "s"
elif dtype == np.float64:
return "%10.0g"
elif dtype == np.float32:
return "%9.0g"
elif dtype == np.int32:
return "%12.0g"
elif dtype == np.int8 or dtype == np.int16:
return "%8.0g"
else: # pragma : no cover
raise NotImplementedError("Data type %s not supported." % dtype)
class StataWriter(StataParser):
"""
A class for writing Stata binary dta files
Parameters
----------
fname : str or buffer
String path of file-like object
data : DataFrame
Input to save
convert_dates : dict
Dictionary mapping columns containing datetime types to stata internal
format to use when wirting the dates. Options are 'tc', 'td', 'tm',
'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.
Datetime columns that do not have a conversion type specified will be
converted to 'tc'. Raises NotImplementedError if a datetime column has
timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current time
dataset_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as values.
Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
Returns
-------
writer : StataWriter instance
The StataWriter instance has a write_file method, which will
write the file to the given `fname`.
Raises
------
NotImplementedError
* If datetimes contain timezone information
ValueError
* Columns listed in convert_dates are noth either datetime64[ns]
or datetime.datetime
* Column dtype is not representable in Stata
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
Examples
--------
>>> import pandas as pd
>>> data = pd.DataFrame([[1.0, 1]], columns=['a', 'b'])
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> from datetime import datetime
>>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date'])
>>> writer = StataWriter('./date_data_file.dta', data, {'date' : 'tw'})
>>> writer.write_file()
"""
def __init__(self, fname, data, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
super(StataWriter, self).__init__(encoding)
self._convert_dates = {} if convert_dates is None else convert_dates
self._write_index = write_index
self._time_stamp = time_stamp
self._data_label = data_label
self._variable_labels = variable_labels
# attach nobs, nvars, data, varlist, typlist
self._prepare_pandas(data)
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
self._fname = fname
self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8}
def _write(self, to_write):
"""
Helper to call encode before writing to file for Python 3 compat.
"""
if compat.PY3:
self._file.write(to_write.encode(self._encoding or
self._default_encoding))
else:
self._file.write(to_write)
def _prepare_categoricals(self, data):
"""Check for categorical columns, retain categorical information for
Stata file and convert categorical data to int"""
is_cat = [is_categorical_dtype(data[col]) for col in data]
self._is_col_cat = is_cat
self._value_labels = []
if not any(is_cat):
return data
get_base_missing_value = StataMissingValue.get_base_missing_value
index = data.index
data_formatted = []
for col, col_is_cat in zip(data, is_cat):
if col_is_cat:
self._value_labels.append(StataValueLabel(data[col]))
dtype = data[col].cat.codes.dtype
if dtype == np.int64:
raise ValueError('It is not possible to export '
'int64-based categorical data to Stata.')
values = data[col].cat.codes.values.copy()
# Upcast if needed so that correct missing values can be set
if values.max() >= get_base_missing_value(dtype):
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
else:
dtype = np.float64
values = np.array(values, dtype=dtype)
# Replace missing values with Stata missing value for type
values[values == -1] = get_base_missing_value(dtype)
data_formatted.append((col, values, index))
else:
data_formatted.append((col, data[col]))
return DataFrame.from_items(data_formatted)
def _replace_nans(self, data):
# return data
"""Checks floating point data columns for nans, and replaces these with
the generic Stata for missing value (.)"""
for c in data:
dtype = data[c].dtype
if dtype in (np.float32, np.float64):
if dtype == np.float32:
replacement = self.MISSING_VALUES['f']
else:
replacement = self.MISSING_VALUES['d']
data[c] = data[c].fillna(replacement)
return data
def _check_column_names(self, data):
"""
Checks column names to ensure that they are valid Stata column names.
This includes checks for:
* Non-string names
* Stata keywords
* Variables that start with numbers
* Variables with names that are too long
When an illegal variable name is detected, it is converted, and if
dates are exported, the variable name is propagated to the date
conversion dictionary
"""
converted_names = []
columns = list(data.columns)
original_columns = columns[:]
duplicate_var_id = 0
for j, name in enumerate(columns):
orig_name = name
if not isinstance(name, string_types):
name = text_type(name)
for c in name:
if (c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and \
(c < '0' or c > '9') and c != '_':
name = name.replace(c, '_')
# Variable name must not be a reserved word
if name in self.RESERVED_WORDS:
name = '_' + name
# Variable name may not start with a number
if name[0] >= '0' and name[0] <= '9':
name = '_' + name
name = name[:min(len(name), 32)]
if not name == orig_name:
# check for duplicates
while columns.count(name) > 0:
# prepend ascending number to avoid duplicates
name = '_' + str(duplicate_var_id) + name
name = name[:min(len(name), 32)]
duplicate_var_id += 1
# need to possibly encode the orig name if its unicode
try:
orig_name = orig_name.encode('utf-8')
except:
pass
converted_names.append(
'{0} -> {1}'.format(orig_name, name))
columns[j] = name
data.columns = columns
# Check date conversion, and fix key if needed
if self._convert_dates:
for c, o in zip(columns, original_columns):
if c != o:
self._convert_dates[c] = self._convert_dates[o]
del self._convert_dates[o]
if converted_names:
import warnings
ws = invalid_name_doc.format('\n '.join(converted_names))
warnings.warn(ws, InvalidColumnName)
return data
def _prepare_pandas(self, data):
# NOTE: we might need a different API / class for pandas objects so
# we can set different semantics - handle this with a PR to pandas.io
data = data.copy()
if self._write_index:
data = data.reset_index()
# Ensure column names are strings
data = self._check_column_names(data)
# Check columns for compatibility with stata, upcast if necessary
# Raise if outside the supported range
data = _cast_to_stata_types(data)
# Replace NaNs with Stata missing values
data = self._replace_nans(data)
# Convert categoricals to int data, and strip labels
data = self._prepare_categoricals(data)
self.nobs, self.nvar = data.shape
self.data = data
self.varlist = data.columns.tolist()
dtypes = data.dtypes
# Ensure all date columns are converted
for col in data:
if col in self._convert_dates:
continue
if is_datetime64_dtype(data[col]):
self._convert_dates[col] = 'tc'
self._convert_dates = _maybe_convert_to_int_keys(self._convert_dates,
self.varlist)
for key in self._convert_dates:
new_type = _convert_datetime_to_stata_type(
self._convert_dates[key]
)
dtypes[key] = np.dtype(new_type)
self.typlist = []
self.fmtlist = []
for col, dtype in dtypes.iteritems():
self.fmtlist.append(_dtype_to_default_stata_fmt(dtype, data[col]))
self.typlist.append(_dtype_to_stata_type(dtype, data[col]))
# set the given format for the datetime cols
if self._convert_dates is not None:
for key in self._convert_dates:
self.fmtlist[key] = self._convert_dates[key]
def write_file(self):
self._file = _open_file_binary_write(
self._fname, self._encoding or self._default_encoding
)
try:
self._write_header(time_stamp=self._time_stamp,
data_label=self._data_label)
self._write_descriptors()
self._write_variable_labels()
# write 5 zeros for expansion fields
self._write(_pad_bytes("", 5))
self._prepare_data()
self._write_data()
self._write_value_labels()
finally:
self._file.close()
def _write_value_labels(self):
for vl in self._value_labels:
self._file.write(vl.generate_value_label(self._byteorder,
self._encoding))
def _write_header(self, data_label=None, time_stamp=None):
byteorder = self._byteorder
# ds_format - just use 114
self._file.write(struct.pack("b", 114))
# byteorder
self._write(byteorder == ">" and "\x01" or "\x02")
# filetype
self._write("\x01")
# unused
self._write("\x00")
# number of vars, 2 bytes
self._file.write(struct.pack(byteorder + "h", self.nvar)[:2])
# number of obs, 4 bytes
self._file.write(struct.pack(byteorder + "i", self.nobs)[:4])
# data label 81 bytes, char, null terminated
if data_label is None:
self._file.write(self._null_terminate(_pad_bytes("", 80)))
else:
self._file.write(
self._null_terminate(_pad_bytes(data_label[:80], 80))
)
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime.datetime):
raise ValueError("time_stamp should be datetime type")
self._file.write(
self._null_terminate(time_stamp.strftime("%d %b %Y %H:%M"))
)
def _write_descriptors(self, typlist=None, varlist=None, srtlist=None,
fmtlist=None, lbllist=None):
nvar = self.nvar
# typlist, length nvar, format byte array
for typ in self.typlist:
self._write(typ)
# varlist names are checked by _check_column_names
# varlist, requires null terminated
for name in self.varlist:
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
# srtlist, 2*(nvar+1), int array, encoded by byteorder
srtlist = _pad_bytes("", 2 * (nvar + 1))
self._write(srtlist)
# fmtlist, 49*nvar, char array
for fmt in self.fmtlist:
self._write(_pad_bytes(fmt, 49))
# lbllist, 33*nvar, char array
for i in range(nvar):
# Use variable name when categorical
if self._is_col_cat[i]:
name = self.varlist[i]
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
else: # Default is empty label
self._write(_pad_bytes("", 33))
def _write_variable_labels(self):
# Missing labels are 80 blank characters plus null termination
blank = _pad_bytes('', 81)
if self._variable_labels is None:
for i in range(self.nvar):
self._write(blank)
return
for col in self.data:
if col in self._variable_labels:
label = self._variable_labels[col]
if len(label) > 80:
raise ValueError('Variable labels must be 80 characters '
'or fewer')
is_latin1 = all(ord(c) < 256 for c in label)
if not is_latin1:
raise ValueError('Variable labels must contain only '
'characters that can be encoded in '
'Latin-1')
self._write(_pad_bytes(label, 81))
else:
self._write(blank)
def _prepare_data(self):
data = self.data
typlist = self.typlist
convert_dates = self._convert_dates
# 1. Convert dates
if self._convert_dates is not None:
for i, col in enumerate(data):
if i in convert_dates:
data[col] = _datetime_to_stata_elapsed_vec(data[col],
self.fmtlist[i])
# 2. Convert bad string data to '' and pad to correct length
dtype = []
data_cols = []
has_strings = False
for i, col in enumerate(data):
typ = ord(typlist[i])
if typ <= 244:
has_strings = True
data[col] = data[col].fillna('').apply(_pad_bytes, args=(typ,))
stype = 'S%d' % typ
dtype.append(('c' + str(i), stype))
string = data[col].str.encode(self._encoding)
data_cols.append(string.values.astype(stype))
else:
dtype.append(('c' + str(i), data[col].dtype))
data_cols.append(data[col].values)
dtype = np.dtype(dtype)
if has_strings:
self.data = np.fromiter(zip(*data_cols), dtype=dtype)
else:
self.data = data.to_records(index=False)
def _write_data(self):
data = self.data
data.tofile(self._file)
def _null_terminate(self, s, as_string=False):
null_byte = '\x00'
if compat.PY3 and not as_string:
s += null_byte
return s.encode(self._encoding)
else:
s += null_byte
return s
| gpl-3.0 |
Snailed/group-generator | gruppeapp/views.py | 1 | 10629 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.datastructures import MultiValueDictKeyError
from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404
from django.views import View
from random import shuffle, randint
from django.contrib.auth import authenticate, login
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.password_validation import validate_password, password_validators_help_texts
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from .models import Gruppe, GruppeElev, Klasse, Elev, Advertisement
from .forms import UserForm, LoginForm
import uuid
import operator
# Create your views here.
#Here, users can enter student names etc. and submit.
def makegroup(request, selectedclassid=0):
loginform = LoginForm(None)
error = False
errormessage = ""
classes = None
selectedclass = None
if request.user.is_authenticated:
classes = Klasse.objects.filter(user=request.user)
if selectedclassid != 0:
selectedclass = Klasse.objects.filter(id=selectedclassid).first()
context = {"error": error, "errormessage": errormessage, "loginform": loginform, "classes":classes, "selectedclass":selectedclass}
return render(request, "gruppeapp/welcome.html", context)
#Here, users can view the newly generated group!
class Creategroup(View):
def post(self, request):
numberofgroups = 1
students = []
studentCounter = request.POST["studentcounter"]
numberofgroups = int(request.POST["numberofgroupsinput"])
currentStudent=""
"""if int(request.POST["createfromclass"]) == 1:
for i in range(0, int(studentCounter)+1):
if int(request.POST["studentexists"+str(i)])==1:
if request.POST["student"+str(i)]:
students.append(request.POST["student"+str(i)])
else:"""
print(str(request.POST))
for i in range(0, int(studentCounter)+1):
print("trying to find student "+str(i))
try:
if request.POST.get("student"+str(i),0) is not 0:
print("Added student "+str(i))
currentStudent = request.POST["student"+str(i)]
if currentStudent is not "":
students.append(currentStudent)
except MultiValueDictKeyError:
error = True
errormessage = "No students added"
print("Tried to find student"+str(i))
print(str(request.POST))
context = {"error": error, "errormessage": errormessage}
return render(request, "gruppeapp/welcome.html", context)
except ValueError:
error = True
errormessage = "You didn't choose how many groups should be made"
context = {"error": error, "errormessage": errormessage}
return render(request, "gruppeapp/welcome.html", context)
shuffle(students)
linkhash=uuid.uuid4().hex
gruppe = Gruppe(link=linkhash, antalgrupper=numberofgroups)
if request.user.is_authenticated():
gruppe.user = request.user
gruppe.save()
for number, iterator in enumerate(students):
student = GruppeElev(navn=iterator, position=number, gruppe=gruppe)
student.save()
return redirect("gruppeapp:viewgroup", linkhash=linkhash)
def get(self,request):
raise Http404("Page not found")
class Creategroupfromclass(View):
def get(self,request):
return redirect("gruppeapp:makegroup")
def post(self,request):
classid=request.POST["classid"]
return redirect("gruppeapp:makegroupwithclassid", selectedclassid=classid)
class About(View):
def get(self,request):
return render(request, "gruppeapp/about.html", {"loginform":LoginForm(None)})
def post(self, request):
raise Http404("Page not found")
def viewgroup(request, linkhash):
loginform = LoginForm(None)
gruppe = Gruppe.objects.get(link=linkhash)
students = []
for student in GruppeElev.objects.filter(gruppe=gruppe):
students.append(student)
smallqueryset = Advertisement.objects.filter(size="small").order_by('?')
bigqueryset = Advertisement.objects.filter(size="large").order_by('?')
print(str(bigqueryset))
smalloverhead = smallqueryset.first()
bigoverhead = bigqueryset.first()
try:
smallunderhead = smallqueryset[1]
bigunderhead = bigqueryset[1]
except IndexError:
smallunderhead = smalloverhead
bigunderhead = bigoverhead
context = {
"students": students,
"numberofgroups": gruppe.antalgrupper,
"numberofgroupsrange": range(0,gruppe.antalgrupper),
"loginform": loginform,
"smalloverhead": smalloverhead,
"bigoverhead": bigoverhead,
"smallunderhead": smallunderhead,
"bigunderhead": bigunderhead,
}
return render(request, "gruppeapp/viewgroup.html", context)
class SignUpView(View):
form_class=UserForm
template_name="gruppeapp/registration_form.html"
def post(self, request):
form = self.form_class(request.POST)
loginform = LoginForm(None)
if form.is_valid():
user = form.save(commit=False)
user.username = form.cleaned_data["username"]
user.email = form.cleaned_data["email"]
password = form.cleaned_data["password1"]
try:
validate_password(password)
except(ValidationError):
return render(request, self.template_name, {"form": form, "errorhelp": password_validators_help_texts(), "loginform": loginform,})
user.set_password(password)
user.save()
user = authenticate(username=form.cleaned_data["username"], password=password)
if user is not None:
if user.is_active:
login(request, user)
return redirect("gruppeapp:makegroup")
return render(request, self.template_name, {"form": form,"errorhelp": password_validators_help_texts(), "loginform": loginform,})
def get(self, request):
form = self.form_class(None)
loginform = LoginForm(None)
return render(request, self.template_name, {"form": form,"errorhelp": password_validators_help_texts(), "loginform": loginform,})
class LoginView(View):
def post(self, request):
form = LoginForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
if request.POST.get('remember_me', None):
print("remember_me!")
request.session.set_expiry(60*60*24*30)
else:
print("No remember_me!")
request.session.set_expiry(360)
return redirect("gruppeapp:makegroup")
else:
return redirect("gruppeapp:makegroup")
else:
return redirect("gruppeapp:makegroup")
def get(self, request):
return redirect("gruppeapp:makegroup")
class MyClassesView(View):
template_name="gruppeapp/myclasses.html"
def post(self, request):
if request.user.is_authenticated:
classid = 0
#print("Post: "+str(sorted(request.POST, key=operator.itemgetter(0))))
for key in request.POST: #Gets class id and deletes every student of that class
if key.endswith("classid"):
classid = request.POST[key]
currentclass = Klasse.objects.filter(id=classid)[0]
currentclass.elev_set.all().delete()
for key in sorted(request.POST):
if key.endswith("name"): #gets the name of a student and creates it.
currentstudentname = request.POST[key]
currentclass = Klasse.objects.filter(id=classid)[0]
student = Elev(navn=currentstudentname, klasse=currentclass)
student.save()
elif key.endswith("newstudentname"):
currentstudentname = request.POST[key]
currentclass = Klasse.objects.filter(id=classid)[0]
student = Elev(navn=currentstudentname, klasse=currentclass)
student.save()
classes = Klasse.objects.filter(user=request.user)
classfromquery = classes.filter(pk=classid).first()
return render(request, self.template_name,{"classes": classes, "loginform": LoginForm(None), "currentclass":classfromquery})
def get(self, request, currentclass=0):
if request.user.is_authenticated:
classes = Klasse.objects.filter(user=request.user)
# print("Thing!"+str(classes.first().id))
print("Currentclass="+str(currentclass))
if currentclass is not 0:
classfromquery = classes.filter(pk=currentclass).first()
else:
classfromquery = classes.first()
print("Class from query:"+str(classfromquery))
context = {"classes": classes, "loginform": LoginForm(None), "currentclass": classfromquery}
return render(request, self.template_name, context)
else:
context = {"loginerror": True, "loginform":LoginForm(None)}
return render(request, self.template_name, context)
class CreateNewClass(View):
def post(self, request):
if request.user.is_authenticated:
classname=request.POST["classname"]
description = request.POST["classdescription"]
newclass = Klasse(navn=classname, description=description, user=request.user)
newclass.save()
return redirect("gruppeapp:myclasses")
else:
raise Http404("Page not found")
def get(self, request):
return redirect("gruppeapp:myclasses")
class DeleteClass(View):
def post(self, request):
classid=request.POST["classid"]
Klasse.objects.filter(id=classid).delete()
return redirect("gruppeapp:myclasses")
def get(self, request):
return redirect("gruppeapp:myclasses")
def privacypolicy(request):
return render(request, "gruppeapp/privacypolicy.htm") | mit |
mozilla/captain | vendor/lib/python/django/contrib/staticfiles/finders.py | 102 | 9658 | import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import default_storage, Storage, FileSystemStorage
from django.utils.datastructures import SortedDict
from django.utils.functional import empty, memoize, LazyObject
from django.utils.importlib import import_module
from django.utils._os import safe_join
from django.utils import six
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.storage import AppStaticStorage
_finders = SortedDict()
class BaseFinder(object):
"""
A base file finder to be used for custom staticfiles finder classes.
"""
def find(self, path, all=False):
"""
Given a relative file path this ought to find an
absolute file path.
If the ``all`` parameter is ``False`` (default) only
the first found file path will be returned; if set
to ``True`` a list of all found files paths is returned.
"""
raise NotImplementedError()
def list(self, ignore_patterns):
"""
Given an optional list of paths to ignore, this should return
a two item iterable consisting of the relative path and storage
instance.
"""
raise NotImplementedError()
class FileSystemFinder(BaseFinder):
"""
A static files finder that uses the ``STATICFILES_DIRS`` setting
to locate files.
"""
def __init__(self, apps=None, *args, **kwargs):
# List of locations with static files
self.locations = []
# Maps dir paths to an appropriate storage instance
self.storages = SortedDict()
if not isinstance(settings.STATICFILES_DIRS, (list, tuple)):
raise ImproperlyConfigured(
"Your STATICFILES_DIRS setting is not a tuple or list; "
"perhaps you forgot a trailing comma?")
for root in settings.STATICFILES_DIRS:
if isinstance(root, (list, tuple)):
prefix, root = root
else:
prefix = ''
if os.path.abspath(settings.STATIC_ROOT) == os.path.abspath(root):
raise ImproperlyConfigured(
"The STATICFILES_DIRS setting should "
"not contain the STATIC_ROOT setting")
if (prefix, root) not in self.locations:
self.locations.append((prefix, root))
for prefix, root in self.locations:
filesystem_storage = FileSystemStorage(location=root)
filesystem_storage.prefix = prefix
self.storages[root] = filesystem_storage
super(FileSystemFinder, self).__init__(*args, **kwargs)
def find(self, path, all=False):
"""
Looks for files in the extra locations
as defined in ``STATICFILES_DIRS``.
"""
matches = []
for prefix, root in self.locations:
matched_path = self.find_location(root, path, prefix)
if matched_path:
if not all:
return matched_path
matches.append(matched_path)
return matches
def find_location(self, root, path, prefix=None):
"""
Finds a requested static file in a location, returning the found
absolute path (or ``None`` if no match).
"""
if prefix:
prefix = '%s%s' % (prefix, os.sep)
if not path.startswith(prefix):
return None
path = path[len(prefix):]
path = safe_join(root, path)
if os.path.exists(path):
return path
def list(self, ignore_patterns):
"""
List all files in all locations.
"""
for prefix, root in self.locations:
storage = self.storages[root]
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
class AppDirectoriesFinder(BaseFinder):
"""
A static files finder that looks in the directory of each app as
specified in the source_dir attribute of the given storage class.
"""
storage_class = AppStaticStorage
def __init__(self, apps=None, *args, **kwargs):
# The list of apps that are handled
self.apps = []
# Mapping of app module paths to storage instances
self.storages = SortedDict()
if apps is None:
apps = settings.INSTALLED_APPS
for app in apps:
app_storage = self.storage_class(app)
if os.path.isdir(app_storage.location):
self.storages[app] = app_storage
if app not in self.apps:
self.apps.append(app)
super(AppDirectoriesFinder, self).__init__(*args, **kwargs)
def list(self, ignore_patterns):
"""
List all files in all app storages.
"""
for storage in six.itervalues(self.storages):
if storage.exists(''): # check if storage location exists
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
def find(self, path, all=False):
"""
Looks for files in the app directories.
"""
matches = []
for app in self.apps:
match = self.find_in_app(app, path)
if match:
if not all:
return match
matches.append(match)
return matches
def find_in_app(self, app, path):
"""
Find a requested static file in an app's static locations.
"""
storage = self.storages.get(app, None)
if storage:
if storage.prefix:
prefix = '%s%s' % (storage.prefix, os.sep)
if not path.startswith(prefix):
return None
path = path[len(prefix):]
# only try to find a file if the source dir actually exists
if storage.exists(path):
matched_path = storage.path(path)
if matched_path:
return matched_path
class BaseStorageFinder(BaseFinder):
"""
A base static files finder to be used to extended
with an own storage class.
"""
storage = None
def __init__(self, storage=None, *args, **kwargs):
if storage is not None:
self.storage = storage
if self.storage is None:
raise ImproperlyConfigured("The staticfiles storage finder %r "
"doesn't have a storage class "
"assigned." % self.__class__)
# Make sure we have an storage instance here.
if not isinstance(self.storage, (Storage, LazyObject)):
self.storage = self.storage()
super(BaseStorageFinder, self).__init__(*args, **kwargs)
def find(self, path, all=False):
"""
Looks for files in the default file storage, if it's local.
"""
try:
self.storage.path('')
except NotImplementedError:
pass
else:
if self.storage.exists(path):
match = self.storage.path(path)
if all:
match = [match]
return match
return []
def list(self, ignore_patterns):
"""
List all files of the storage.
"""
for path in utils.get_files(self.storage, ignore_patterns):
yield path, self.storage
class DefaultStorageFinder(BaseStorageFinder):
"""
A static files finder that uses the default storage backend.
"""
storage = default_storage
def __init__(self, *args, **kwargs):
super(DefaultStorageFinder, self).__init__(*args, **kwargs)
base_location = getattr(self.storage, 'base_location', empty)
if not base_location:
raise ImproperlyConfigured("The storage backend of the "
"staticfiles finder %r doesn't have "
"a valid location." % self.__class__)
def find(path, all=False):
"""
Find a static file with the given path using all enabled finders.
If ``all`` is ``False`` (default), return the first matching
absolute path (or ``None`` if no match). Otherwise return a list.
"""
matches = []
for finder in get_finders():
result = finder.find(path, all=all)
if not all and result:
return result
if not isinstance(result, (list, tuple)):
result = [result]
matches.extend(result)
if matches:
return matches
# No match.
return all and [] or None
def get_finders():
for finder_path in settings.STATICFILES_FINDERS:
yield get_finder(finder_path)
def _get_finder(import_path):
"""
Imports the staticfiles finder class described by import_path, where
import_path is the full Python path to the class.
"""
module, attr = import_path.rsplit('.', 1)
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured('Error importing module %s: "%s"' %
(module, e))
try:
Finder = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" '
'class.' % (module, attr))
if not issubclass(Finder, BaseFinder):
raise ImproperlyConfigured('Finder "%s" is not a subclass of "%s"' %
(Finder, BaseFinder))
return Finder()
get_finder = memoize(_get_finder, _finders, 1)
| mpl-2.0 |
datadesk/panda | config/settings.py | 4 | 7153 | #!/usr/bin/env python
import datetime
import os
import django
from django.utils.translation import ugettext_lazy as _
# Which settings are we using?
# Useful for debugging.
SETTINGS = 'base'
# Base paths
DJANGO_ROOT = os.path.dirname(os.path.realpath(django.__file__))
SITE_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
# Debugging
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
LOGIN_URL = '/admin/login/'
LOGOUT_URL = '/admin/logout/'
LOGIN_REDIRECT_URL = '/admin/'
SITE_ID = 1
# Default connection to socket
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': 'localhost',
'PORT': '5432',
'NAME': 'panda',
'USER': 'panda',
'PASSWORD': 'panda'
}
}
TIME_ZONE = 'Etc/UTC'
USE_TZ = True
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = False
LOCALE_PATHS = (os.path.join(SITE_ROOT, 'locale'),)
# Media
STATIC_ROOT = os.path.join(SITE_ROOT, 'media')
STATIC_URL = '/site_media/'
ADMIN_MEDIA_PREFIX = '/site_media/admin/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# Uploads
MEDIA_ROOT = '/tmp/panda'
EXPORT_ROOT = '/tmp/panda_exports'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '-lyd+@8@=9oni01+gjvb(txz3%hh_7a9m5*n0q^ce5+&c1fkm('
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.media',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.csrf',
'django.core.context_processors.i18n'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'panda.middleware.CsrfCookieUsedMiddleware'
)
ROOT_URLCONF = 'config.urls'
TEMPLATE_DIRS = (
os.path.join(SITE_ROOT, 'templates')
)
INSTALLED_APPS = (
'longerusername',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.humanize',
'django.contrib.sites',
'django.contrib.staticfiles',
'south',
'tastypie',
'djcelery',
'compressor',
'livesettings',
'jumpstart',
'panda',
'client'
)
SESSION_COOKIE_AGE = 2592000 # 30 days
AUTH_PROFILE_MODULE = 'panda.UserProfile'
# Django-compressor
COMPRESS_ENABLED = False
# Celery
import djcelery
djcelery.setup_loader()
BROKER_TRANSPORT = 'sqlalchemy'
BROKER_URL = 'postgresql://%(USER)s:%(PASSWORD)s@%(HOST)s/%(NAME)s' % DATABASES['default']
CELERY_RESULT_DBURI = 'postgresql://%(USER)s:%(PASSWORD)s@%(HOST)s/%(NAME)s' % DATABASES['default']
CELERYD_HIJACK_ROOT_LOGGER = False
CELERYD_CONCURRENCY = 1
CELERY_IGNORE_RESULT = True
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
CELERYBEAT_SCHEDULE_FILENAME = 'celerybeat-schedule'
from celery.schedules import crontab
CELERYBEAT_SCHEDULE = {
'purge_orphaned_uploads': {
'task': 'panda.tasks.cron.purge_orphaned_uploads',
'schedule': crontab(minute=0, hour=2),
'kwargs': { 'fake': False }
},
'run_subscriptions': {
'task': 'panda.tasks.cron.run_subscriptions',
'schedule': crontab(minute=30, hour=2)
},
'run_admin_alerts': {
'task': 'panda.tasks.cron.run_admin_alerts',
'schedule': crontab(minute=0, hour=4)
}
}
# South
SOUTH_TESTS_MIGRATE = False
# Hack, see: http://stackoverflow.com/questions/3898239/souths-syncdb-migrate-creates-pages-of-output
import south.logger
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'console': {
'level':'DEBUG',
'class':'logging.StreamHandler',
'formatter': 'standard'
},
'default': {
'level':'INFO',
'class':'loghandlers.GroupWriteRotatingFileHandler',
'filename': '/var/log/panda/panda.log',
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter':'standard',
},
'request_handler': {
'level':'INFO',
'class':'loghandlers.GroupWriteRotatingFileHandler',
'filename': '/var/log/panda/requests.log',
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter':'standard',
},
'backend_handler': {
'level':'DEBUG',
'class':'django.utils.log.NullHandler',
},
},
'loggers': {
'': {
'handlers': ['default', 'console'],
'level': 'DEBUG',
'propagate': True
},
'django.request': {
'handlers': ['request_handler', 'console'],
'level': 'DEBUG',
'propagate': False
},
'django.db': {
'handlers': ['backend_handler'],
'level': 'DEBUG',
'propagate': False
},
'south': {
'handlers': ['console'],
'level': 'INFO',
'propogate': False
},
'keyedcache': {
'handlers': ['console'],
'level': 'ERROR',
'propogate': False
},
'requests.packages.urllib3.connectionpool': {
'handlers': ['console'],
'level': 'ERROR',
'propogate': False
}
}
}
# Solr
SOLR_ENDPOINT = 'http://localhost:8983/solr'
SOLR_DATA_CORE = 'data'
SOLR_DATASETS_CORE = 'datasets'
SOLR_DIRECTORY = '/var/solr'
# Miscellaneous configuration
PANDA_VERSION = '1.1.2'
PANDA_DEFAULT_SEARCH_GROUPS = 10
PANDA_DEFAULT_SEARCH_ROWS_PER_GROUP = 5
PANDA_DEFAULT_SEARCH_ROWS = 50
PANDA_SNIFFER_MAX_SAMPLE_SIZE = 1024 * 100 # 100 KB
PANDA_SAMPLE_DATA_ROWS = 5
PANDA_SCHEMA_SAMPLE_ROWS = 100
PANDA_ACTIVATION_PERIOD = datetime.timedelta(days=30)
PANDA_AVAILABLE_SPACE_WARN = 1024 * 1024 * 1024 * 2 # 2GB
PANDA_AVAILABLE_SPACE_CRITICAL = 1024 * 1024 * 1024 * 1 # 1GB
PANDA_NOTIFICATIONS_TO_SHOW = 50
PANDA_UNCATEGORIZED_ID = 0
PANDA_UNCATEGORIZED_SLUG = 'uncategorized'
# running this through gettext causes file uploads not to work, so disabled until solved!
PANDA_UNCATEGORIZED_NAME = _('Uncategorized')
MOMENT_LANGUAGE_MAPPING = {
'en': None,
'es': 'es',
'de': 'de'
}
# Allow for local (per-user) override
try:
from local_settings import *
except ImportError:
pass
| mit |
hmpf/nav | python/nav/statemon/event.py | 2 | 1484 | #
# Copyright (C) 2018 Uninett AS
#
# This file is part of Network Administration Visualized (NAV)
#
# NAV is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# NAV is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NAV; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
class Event(object):
"""
Class representing a NAV Event
"""
UP = 'UP'
DOWN = 'DOWN'
boxState = 'boxState'
serviceState = 'serviceState'
def __init__(self, serviceid, netboxid, deviceid,
eventtype, source, status, info='', version=''):
self.serviceid = serviceid
self.netboxid = netboxid
self.deviceid = deviceid
self.info = info
self.eventtype = eventtype
self.status = status
self.version = version
self.source = source
def __repr__(self):
return "Service: %s, netbox: %s, eventtype: %s, status: %s" % \
(self.serviceid, self.netboxid, self.eventtype, self.status)
| gpl-3.0 |
cbrentharris/bricklayer | bricklayer/tests/doctor/config_test.py | 1 | 2313 | from unittest import TestCase
from bricklayer.doctor.config import Configurator
import uuid
import os
import shutil
import tempfile
import ConfigParser
class ConfiguratorTest(TestCase):
def setUp(self):
self.random_dir = tempfile.gettempdir() + '/.' + uuid.uuid4().hex
os.makedirs(self.random_dir)
def tearDown(self):
if self.random_dir is not None:
shutil.rmtree(self.random_dir)
def test_it_creates_a_config_file_with_a_uuid_if_one_doesnt_exist(self):
os.chdir(self.random_dir)
Configurator.create_config_if_doesnt_exist()
self.assertTrue(os.path.exists(self.random_dir + '/.bricklayer' + '/settings.cfg'))
def test_it_adds_the_uuid_to_the_config_file(self):
os.chdir(self.random_dir)
Configurator.create_config_if_doesnt_exist()
config = ConfigParser.RawConfigParser()
config.read([self.random_dir + '/.bricklayer/settings.cfg'])
self.assertIsInstance(config.get('General', 'uuid'), str)
def test_it_returns_the_uuid_stored(self):
os.chdir(self.random_dir)
random_uuid = Configurator.get('uuid')
config = ConfigParser.RawConfigParser()
config.read([self.random_dir + '/.bricklayer/settings.cfg'])
self.assertEqual(config.get('General', 'uuid'), random_uuid)
def test_it_doesnt_overwrite_the_config_file(self):
os.chdir(self.random_dir)
Configurator.create_config_if_doesnt_exist()
config = ConfigParser.RawConfigParser()
config.read([self.random_dir + '/.bricklayer/settings.cfg'])
generated_uuid = config.get('General', 'uuid')
Configurator.create_config_if_doesnt_exist()
config2 = ConfigParser.RawConfigParser()
config2.read([self.random_dir + '/.bricklayer/settings.cfg'])
self.assertEqual(generated_uuid, config.get('General', 'uuid'))
def test_it_adds_to_the_config_file(self):
os.chdir(self.random_dir)
Configurator.create_config_if_doesnt_exist()
Configurator.set('name', 'chris')
self.assertIsNotNone(Configurator.get('name'))
def test_it_gets_from_the_config_file(self):
os.chdir(self.random_dir)
Configurator.create_config_if_doesnt_exist()
self.assertIsNotNone(Configurator.get('uuid'))
| mit |
rich-digi/wp-xml-transformer | cts-import.py | 1 | 4100 | # ------------------------------------------------------------------------------------------------
# Split Wordpress XML (using LXML)
# ------------------------------------------------------------------------------------------------
import sys, os, re, pprint, codecs, datetime, subprocess
# sys.path.append('/usr/local/lib/python2.7/site-packages/')
# from lxml import etree as ET
# from phpserialize import serialize, unserialize
class trml:
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
BOLD = '\033[1m'
NORMAL = '\033[0;0m'
# Wordpress XML namespaces
namespaces = {
'wp' : 'http://wordpress.org/export/1.2/',
'excerpt' : 'http://wordpress.org/export/1.2/excerpt/',
'content' : 'http://purl.org/rss/1.0/modules/content/',
'wfw' : 'http://wellformedweb.org/CommentAPI/',
'dc' : 'http://purl.org/dc/elements/1.1/',
}
"""
REGISTER NAMESPACE WHEN WRITING ONLY
for prefix, uri in namespaces.iteritems():
ET.register_namespace(prefix, uri)
"""
# ------------------------------------------------------------------------------------------------
# Utility functions
def make_dir(dir):
dir = os.getcwd() + dir
if not os.path.exists(dir): os.makedirs(dir)
def write_utf8_file(fp, ustr):
f = codecs.open(os.getcwd()+fp, 'w', 'utf-8');
f.write(ustr)
f.close()
def logprint(ustr=''):
# Unicode-safe logger
print ustr
lfp.write(ustr+'\n')
def shexec(cmd):
try:
res = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except:
res = 'ERROR: Shell command error, running ' + cmd
logprint(res)
return res
def parse_shellvars(file_name):
TIC = "'"
QUOTE = '"'
return_dict = dict()
with open(file_name) as reader:
for line in reader.readlines():
line = re.sub(r"export\s+", "", line.strip())
if "=" in line:
key, value = line.split("=", 1)
# Values that are wrapped in tics: remove the tics but otherwise leave as is
if value.startswith(TIC):
# Remove first tic and everything after the last tic
last_tic_position = value.rindex(TIC)
value = value[1:last_tic_position]
return_dict[key] = value
continue
# Values that are wrapped in quotes: remove the quotes and optional trailing comment
elif value.startswith(QUOTE): # Values that are wrapped quotes
value = re.sub(r'^"(.+?)".+', '\g<1>', value)
# Values that are followed by whitespace or comments: remove the whitespace and/or comments
else:
value = re.sub(r'(#|\s+).*', '', value)
for variable in re.findall(r"\$\{?\w+\}?", value):
# Find embedded shell variables
dict_key = variable.strip("${}")
# Replace them with their values
value = value.replace(variable, return_dict.get(dict_key, ""))
# Add this key to the dictionary
return_dict[key] = value
return return_dict
# --------------------------------------------------------------------------------
# RUN
def run():
logprint()
logprint('------------------------------------------------------')
logprint('cts-import.py : running at ' + logtime)
logprint('------------------------------------------------------')
logprint()
logprint('Let\'s join & import...')
logprint()
logprint(pprint.pformat(config))
logprint()
if len(sys.argv) > 1: revision = sys.argv[1]
# Pull latest version from central Git repo
os.chdir(config['GIT_ContentLocal'])
shexec('pwd')
shexec('git pull')
# parse_html_xml_and_join()
logprint('Copying into import area @')
shexec(' '.join(['cp -pr', config['GIT_ContentLocal'], config['GIT_ImportTarget']]))
# res = trigger_import()
logprint()
logprint('STATUS: SUCCESS')
logprint('DONE')
logprint()
# --------------------------------------------------------------------------------
if __name__ == '__main__':
# Parse config file
config = parse_shellvars('bizclub-instance.cfg')
# Create logfile as global
today = datetime.datetime.today()
logtime = today.strftime('%Y-%m-%d-%H-%M-%S')
logfile = config['CTS_ImportLogDir'] + 'cts-import-' + logtime + '.log'
lfp = codecs.open(logfile, 'w', 'utf-8')
# Run
run();
# Close logfile
lfp.close()
| mit |
yd0str/infernal-twin | build/pillow/PIL/PaletteFile.py | 72 | 1113 | #
# Python Imaging Library
# $Id$
#
# stuff to read simple, teragon-style palette files
#
# History:
# 97-08-23 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
from PIL._binary import o8
##
# File handler for Teragon-style palette files.
class PaletteFile(object):
rawmode = "RGB"
def __init__(self, fp):
self.palette = [(i, i, i) for i in range(256)]
while True:
s = fp.readline()
if not s:
break
if s[0:1] == b"#":
continue
if len(s) > 100:
raise SyntaxError("bad palette file")
v = [int(x) for x in s.split()]
try:
[i, r, g, b] = v
except ValueError:
[i, r] = v
g = b = r
if 0 <= i <= 255:
self.palette[i] = o8(r) + o8(g) + o8(b)
self.palette = b"".join(self.palette)
def getpalette(self):
return self.palette, self.rawmode
| gpl-3.0 |
daphne-yu/aubio | python/tests/test_source.py | 8 | 2264 | #! /usr/bin/env python
from numpy.testing import TestCase, assert_equal, assert_almost_equal
from aubio import fvec, source
from numpy import array
from utils import list_all_sounds
list_of_sounds = list_all_sounds('sounds')
path = None
class aubio_source_test_case(TestCase):
def setUp(self):
if not len(list_of_sounds): self.skipTest('add some sound files in \'python/tests/sounds\'')
def read_from_sink(self, f):
total_frames = 0
while True:
vec, read = f()
total_frames += read
if read < f.hop_size: break
print "read", "%.2fs" % (total_frames / float(f.samplerate) ),
print "(", total_frames, "frames", "in",
print total_frames / f.hop_size, "blocks", "at", "%dHz" % f.samplerate, ")",
print "from", f.uri
def test_samplerate_hopsize(self):
for p in list_of_sounds:
for samplerate, hop_size in zip([0, 44100, 8000, 32000], [ 512, 512, 64, 256]):
f = source(p, samplerate, hop_size)
assert f.samplerate != 0
self.read_from_sink(f)
def test_samplerate_none(self):
for p in list_of_sounds:
f = source(p)
assert f.samplerate != 0
self.read_from_sink(f)
def test_samplerate_0(self):
for p in list_of_sounds:
f = source(p, 0)
assert f.samplerate != 0
self.read_from_sink(f)
def test_wrong_samplerate(self):
for p in list_of_sounds:
try:
f = source(p, -1)
except Exception, e:
print e
else:
self.fail('does not fail with wrong samplerate')
def test_wrong_hop_size(self):
for p in list_of_sounds:
try:
f = source(p, 0, -1)
except Exception, e:
print e
else:
self.fail('does not fail with wrong hop_size %d' % f.hop_size)
def test_zero_hop_size(self):
for p in list_of_sounds:
f = source(p, 0, 0)
assert f.samplerate != 0
assert f.hop_size != 0
self.read_from_sink(f)
if __name__ == '__main__':
from unittest import main
main()
| gpl-3.0 |
yjmade/odoo | addons/decimal_precision/tests/test_qweb_float.py | 103 | 2000 | # -*- coding: utf-8 -*-
from openerp.tests import common
class TestFloatExport(common.TransactionCase):
def setUp(self):
super(TestFloatExport, self).setUp()
self.Model = self.registry('decimal.precision.test')
def get_converter(self, name):
converter = self.registry('ir.qweb.field.float')
column = self.Model._all_columns[name].column
return lambda value, options=None: converter.value_to_html(
self.cr, self.uid, value, column, options=options, context=None)
def test_basic_float(self):
converter = self.get_converter('float')
self.assertEqual(
converter(42.0),
"42.0")
self.assertEqual(
converter(42.12345),
"42.12345")
converter = self.get_converter('float_2')
self.assertEqual(
converter(42.0),
"42.00")
self.assertEqual(
converter(42.12345),
"42.12")
converter = self.get_converter('float_4')
self.assertEqual(
converter(42.0),
'42.0000')
self.assertEqual(
converter(42.12345),
'42.1234')
def test_precision_domain(self):
DP = self.registry('decimal.precision')
DP.create(self.cr, self.uid, {
'name': 'A',
'digits': 2,
})
DP.create(self.cr, self.uid, {
'name': 'B',
'digits': 6,
})
converter = self.get_converter('float')
self.assertEqual(
converter(42.0, {'decimal_precision': 'A'}),
'42.00')
self.assertEqual(
converter(42.0, {'decimal_precision': 'B'}),
'42.000000')
converter = self.get_converter('float_4')
self.assertEqual(
converter(42.12345, {'decimal_precision': 'A'}),
'42.12')
self.assertEqual(
converter(42.12345, {'decimal_precision': 'B'}),
'42.123450')
| agpl-3.0 |
richo/groundstation | groundstation/stream_client.py | 1 | 1052 | from sockets.stream_socket import StreamSocket
from transfer.request import Request
from transfer.notification import Notification
import settings
from groundstation.utils import path2id
import groundstation.logger
log = groundstation.logger.getLogger(__name__)
class StreamClient(StreamSocket):
def __init__(self, addr):
super(StreamClient, self).__init__()
# TODO Pretty sure this should be a struct sockaddr
self.peer = addr
self.socket.connect((addr, settings.PORT))
self.socket.setblocking(False)
def begin_handshake(self, station):
request = Request("LISTALLOBJECTS", station=station, stream=self)
station.register_request(request)
self.enqueue(request)
def notify_new_object(self, station, path):
# TODO FSWatcher should probably be responsible for catching these to
# keep signal:noise sane
obj = path2id(path)
notification = Notification("NEWOBJECT", station=station, stream=self, payload=obj)
self.enqueue(notification)
| mit |
argivaitv/argivaitv | plugin.video.salts/scrapers/movie25_scraper.py | 1 | 3790 | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import scraper
import urllib
import urlparse
import re
from salts_lib import kodi
import base64
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import QUALITIES
QUALITY_MAP = {'DVD': QUALITIES.HIGH, 'CAM': QUALITIES.LOW}
BASE_URL = 'http://movie25.ag'
class Movie25_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'movie25'
def resolve_link(self, link):
url = urlparse.urljoin(self.base_url, link)
html = self._http_get(url, cache_limit=0)
match = re.search('href=\'([^\']*)\'"\s+value="Click Here to Play"', html, re.DOTALL | re.I)
if match:
return match.group(1)
else:
match = re.search('<IFRAME SRC="(?:/?tz\.php\?url=external\.php\?url=)?([^"]+)', html, re.DOTALL | re.I)
if match:
try:
return base64.b64decode(match.group(1))
except TypeError:
return match.group(1)
else:
return link
def format_source_label(self, item):
return '[%s] %s' % (item['quality'], item['host'])
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
quality = None
match = re.search('Links\s+-\s+Quality\s*([^<]*)</h1>', html, re.DOTALL | re.I)
if match:
quality = QUALITY_MAP.get(match.group(1).strip().upper())
for match in re.finditer('id="link_name">\s*([^<]+).*?href="([^"]+)', html, re.DOTALL):
host, url = match.groups()
hoster = {'multi-part': False, 'host': host, 'class': self, 'url': url, 'quality': self._get_quality(video, host, quality), 'rating': None, 'views': None, 'direct': False}
hosters.append(hoster)
return hosters
def get_url(self, video):
return super(Movie25_Scraper, self)._default_get_url(video)
def search(self, video_type, title, year):
search_url = urlparse.urljoin(self.base_url, '/search.php?key=')
search_url += urllib.quote_plus('%s %s' % (title, year))
search_url += '&submit='
html = self._http_get(search_url, cache_limit=.25)
pattern = 'class="movie_about">.*?href="([^"]+).*?>\s+(.*?)\s*\(?(\d{4})?\)?\s+</a></h1>'
results = []
for match in re.finditer(pattern, html, re.DOTALL):
url, title, year = match.groups('')
result = {'url': self._pathify_url(url), 'title': title, 'year': year}
results.append(result)
return results
| gpl-2.0 |
formiano/enigma2-4.4 | lib/python/Components/Renderer/valioPosition.py | 13 | 1298 | # -*- coding: utf-8 -*-
#
# Maximum Temperature Renderer for Dreambox/Enigma-2
# Version: 1.0
# Coded by Vali (c)2010-2011
#
#######################################################################
from Components.VariableText import VariableText
from enigma import eLabel
from Renderer import Renderer
class valioPosition(Renderer, VariableText):
def __init__(self):
Renderer.__init__(self)
VariableText.__init__(self)
GUI_WIDGET = eLabel
def changed(self, what):
if not self.suspended:
orb_pos = " "
service = self.source.service
feinfo = (service and service.frontendInfo())
if (feinfo is not None):
frontendData = (feinfo and feinfo.getAll(True))
if (frontendData is not None):
if (frontendData.get("tuner_type") == "DVB-S"):
orbital_pos = int(frontendData["orbital_position"])
if orbital_pos > 1800:
orb_pos = str((float(3600 - orbital_pos))/10.0) + "°W"
elif orbital_pos > 0:
orb_pos = str((float(orbital_pos))/10.0) + "°E"
elif (frontendData.get("tuner_type") == "DVB-T"):
orb_pos = "DVB-T"
elif (frontendData.get("tuner_type") == "DVB-C"):
orb_pos = "DVB-C"
self.text = orb_pos
def onShow(self):
self.suspended = False
self.changed(None)
def onHide(self):
self.suspended = True
| gpl-2.0 |
derekjchow/models | research/skip_thoughts/skip_thoughts/skip_thoughts_model_test.py | 19 | 6755 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_models.skip_thoughts.skip_thoughts_model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from skip_thoughts import configuration
from skip_thoughts import skip_thoughts_model
class SkipThoughtsModel(skip_thoughts_model.SkipThoughtsModel):
"""Subclass of SkipThoughtsModel without the disk I/O."""
def build_inputs(self):
if self.mode == "encode":
# Encode mode doesn't read from disk, so defer to parent.
return super(SkipThoughtsModel, self).build_inputs()
else:
# Replace disk I/O with random Tensors.
self.encode_ids = tf.random_uniform(
[self.config.batch_size, 15],
minval=0,
maxval=self.config.vocab_size,
dtype=tf.int64)
self.decode_pre_ids = tf.random_uniform(
[self.config.batch_size, 15],
minval=0,
maxval=self.config.vocab_size,
dtype=tf.int64)
self.decode_post_ids = tf.random_uniform(
[self.config.batch_size, 15],
minval=0,
maxval=self.config.vocab_size,
dtype=tf.int64)
self.encode_mask = tf.ones_like(self.encode_ids)
self.decode_pre_mask = tf.ones_like(self.decode_pre_ids)
self.decode_post_mask = tf.ones_like(self.decode_post_ids)
class SkipThoughtsModelTest(tf.test.TestCase):
def setUp(self):
super(SkipThoughtsModelTest, self).setUp()
self._model_config = configuration.model_config()
def _countModelParameters(self):
"""Counts the number of parameters in the model at top level scope."""
counter = {}
for v in tf.global_variables():
name = v.op.name.split("/")[0]
num_params = v.get_shape().num_elements()
if not num_params:
self.fail("Could not infer num_elements from Variable %s" % v.op.name)
counter[name] = counter.get(name, 0) + num_params
return counter
def _checkModelParameters(self):
"""Verifies the number of parameters in the model."""
param_counts = self._countModelParameters()
expected_param_counts = {
# vocab_size * embedding_size
"word_embedding": 12400000,
# GRU Cells
"encoder": 21772800,
"decoder_pre": 21772800,
"decoder_post": 21772800,
# (encoder_dim + 1) * vocab_size
"logits": 48020000,
"global_step": 1,
}
self.assertDictEqual(expected_param_counts, param_counts)
def _checkOutputs(self, expected_shapes, feed_dict=None):
"""Verifies that the model produces expected outputs.
Args:
expected_shapes: A dict mapping Tensor or Tensor name to expected output
shape.
feed_dict: Values of Tensors to feed into Session.run().
"""
fetches = expected_shapes.keys()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
outputs = sess.run(fetches, feed_dict)
for index, output in enumerate(outputs):
tensor = fetches[index]
expected = expected_shapes[tensor]
actual = output.shape
if expected != actual:
self.fail("Tensor %s has shape %s (expected %s)." % (tensor, actual,
expected))
def testBuildForTraining(self):
model = SkipThoughtsModel(self._model_config, mode="train")
model.build()
self._checkModelParameters()
expected_shapes = {
# [batch_size, length]
model.encode_ids: (128, 15),
model.decode_pre_ids: (128, 15),
model.decode_post_ids: (128, 15),
model.encode_mask: (128, 15),
model.decode_pre_mask: (128, 15),
model.decode_post_mask: (128, 15),
# [batch_size, length, word_embedding_dim]
model.encode_emb: (128, 15, 620),
model.decode_pre_emb: (128, 15, 620),
model.decode_post_emb: (128, 15, 620),
# [batch_size, encoder_dim]
model.thought_vectors: (128, 2400),
# [batch_size * length]
model.target_cross_entropy_losses[0]: (1920,),
model.target_cross_entropy_losses[1]: (1920,),
# [batch_size * length]
model.target_cross_entropy_loss_weights[0]: (1920,),
model.target_cross_entropy_loss_weights[1]: (1920,),
# Scalar
model.total_loss: (),
}
self._checkOutputs(expected_shapes)
def testBuildForEval(self):
model = SkipThoughtsModel(self._model_config, mode="eval")
model.build()
self._checkModelParameters()
expected_shapes = {
# [batch_size, length]
model.encode_ids: (128, 15),
model.decode_pre_ids: (128, 15),
model.decode_post_ids: (128, 15),
model.encode_mask: (128, 15),
model.decode_pre_mask: (128, 15),
model.decode_post_mask: (128, 15),
# [batch_size, length, word_embedding_dim]
model.encode_emb: (128, 15, 620),
model.decode_pre_emb: (128, 15, 620),
model.decode_post_emb: (128, 15, 620),
# [batch_size, encoder_dim]
model.thought_vectors: (128, 2400),
# [batch_size * length]
model.target_cross_entropy_losses[0]: (1920,),
model.target_cross_entropy_losses[1]: (1920,),
# [batch_size * length]
model.target_cross_entropy_loss_weights[0]: (1920,),
model.target_cross_entropy_loss_weights[1]: (1920,),
# Scalar
model.total_loss: (),
}
self._checkOutputs(expected_shapes)
def testBuildForEncode(self):
model = SkipThoughtsModel(self._model_config, mode="encode")
model.build()
# Test feeding a batch of word embeddings to get skip thought vectors.
encode_emb = np.random.rand(64, 15, 620)
encode_mask = np.ones((64, 15), dtype=np.int64)
feed_dict = {model.encode_emb: encode_emb, model.encode_mask: encode_mask}
expected_shapes = {
# [batch_size, encoder_dim]
model.thought_vectors: (64, 2400),
}
self._checkOutputs(expected_shapes, feed_dict)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
edry/edx-platform | common/lib/xmodule/xmodule/annotator_token.py | 211 | 1542 | """
This file contains a function used to retrieve the token for the annotation backend
without having to create a view, but just returning a string instead.
It can be called from other files by using the following:
from xmodule.annotator_token import retrieve_token
"""
import datetime
from firebase_token_generator import create_token
def retrieve_token(userid, secret):
'''
Return a token for the backend of annotations.
It uses the course id to retrieve a variable that contains the secret
token found in inheritance.py. It also contains information of when
the token was issued. This will be stored with the user along with
the id for identification purposes in the backend.
'''
# the following five lines of code allows you to include the default timezone in the iso format
# for more information: http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
newhour, newmin = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60, 60)
newtime = "%s%+02d:%02d" % (dtnow.isoformat(), newhour, newmin)
# uses the issued time (UTC plus timezone), the consumer key and the user's email to maintain a
# federated system in the annotation backend server
custom_data = {"issuedAt": newtime, "consumerKey": secret, "userId": userid, "ttl": 86400}
newtoken = create_token(secret, custom_data)
return newtoken
| agpl-3.0 |
ericMayer/tekton-master | backend/venv/lib/python2.7/site-packages/unidecode/x067.py | 252 | 4635 | data = (
'Zui ', # 0x00
'Can ', # 0x01
'Xu ', # 0x02
'Hui ', # 0x03
'Yin ', # 0x04
'Qie ', # 0x05
'Fen ', # 0x06
'Pi ', # 0x07
'Yue ', # 0x08
'You ', # 0x09
'Ruan ', # 0x0a
'Peng ', # 0x0b
'Ban ', # 0x0c
'Fu ', # 0x0d
'Ling ', # 0x0e
'Fei ', # 0x0f
'Qu ', # 0x10
'[?] ', # 0x11
'Nu ', # 0x12
'Tiao ', # 0x13
'Shuo ', # 0x14
'Zhen ', # 0x15
'Lang ', # 0x16
'Lang ', # 0x17
'Juan ', # 0x18
'Ming ', # 0x19
'Huang ', # 0x1a
'Wang ', # 0x1b
'Tun ', # 0x1c
'Zhao ', # 0x1d
'Ji ', # 0x1e
'Qi ', # 0x1f
'Ying ', # 0x20
'Zong ', # 0x21
'Wang ', # 0x22
'Tong ', # 0x23
'Lang ', # 0x24
'[?] ', # 0x25
'Meng ', # 0x26
'Long ', # 0x27
'Mu ', # 0x28
'Deng ', # 0x29
'Wei ', # 0x2a
'Mo ', # 0x2b
'Ben ', # 0x2c
'Zha ', # 0x2d
'Zhu ', # 0x2e
'Zhu ', # 0x2f
'[?] ', # 0x30
'Zhu ', # 0x31
'Ren ', # 0x32
'Ba ', # 0x33
'Po ', # 0x34
'Duo ', # 0x35
'Duo ', # 0x36
'Dao ', # 0x37
'Li ', # 0x38
'Qiu ', # 0x39
'Ji ', # 0x3a
'Jiu ', # 0x3b
'Bi ', # 0x3c
'Xiu ', # 0x3d
'Ting ', # 0x3e
'Ci ', # 0x3f
'Sha ', # 0x40
'Eburi ', # 0x41
'Za ', # 0x42
'Quan ', # 0x43
'Qian ', # 0x44
'Yu ', # 0x45
'Gan ', # 0x46
'Wu ', # 0x47
'Cha ', # 0x48
'Shan ', # 0x49
'Xun ', # 0x4a
'Fan ', # 0x4b
'Wu ', # 0x4c
'Zi ', # 0x4d
'Li ', # 0x4e
'Xing ', # 0x4f
'Cai ', # 0x50
'Cun ', # 0x51
'Ren ', # 0x52
'Shao ', # 0x53
'Tuo ', # 0x54
'Di ', # 0x55
'Zhang ', # 0x56
'Mang ', # 0x57
'Chi ', # 0x58
'Yi ', # 0x59
'Gu ', # 0x5a
'Gong ', # 0x5b
'Du ', # 0x5c
'Yi ', # 0x5d
'Qi ', # 0x5e
'Shu ', # 0x5f
'Gang ', # 0x60
'Tiao ', # 0x61
'Moku ', # 0x62
'Soma ', # 0x63
'Tochi ', # 0x64
'Lai ', # 0x65
'Sugi ', # 0x66
'Mang ', # 0x67
'Yang ', # 0x68
'Ma ', # 0x69
'Miao ', # 0x6a
'Si ', # 0x6b
'Yuan ', # 0x6c
'Hang ', # 0x6d
'Fei ', # 0x6e
'Bei ', # 0x6f
'Jie ', # 0x70
'Dong ', # 0x71
'Gao ', # 0x72
'Yao ', # 0x73
'Xian ', # 0x74
'Chu ', # 0x75
'Qun ', # 0x76
'Pa ', # 0x77
'Shu ', # 0x78
'Hua ', # 0x79
'Xin ', # 0x7a
'Chou ', # 0x7b
'Zhu ', # 0x7c
'Chou ', # 0x7d
'Song ', # 0x7e
'Ban ', # 0x7f
'Song ', # 0x80
'Ji ', # 0x81
'Yue ', # 0x82
'Jin ', # 0x83
'Gou ', # 0x84
'Ji ', # 0x85
'Mao ', # 0x86
'Pi ', # 0x87
'Bi ', # 0x88
'Wang ', # 0x89
'Ang ', # 0x8a
'Fang ', # 0x8b
'Fen ', # 0x8c
'Yi ', # 0x8d
'Fu ', # 0x8e
'Nan ', # 0x8f
'Xi ', # 0x90
'Hu ', # 0x91
'Ya ', # 0x92
'Dou ', # 0x93
'Xun ', # 0x94
'Zhen ', # 0x95
'Yao ', # 0x96
'Lin ', # 0x97
'Rui ', # 0x98
'E ', # 0x99
'Mei ', # 0x9a
'Zhao ', # 0x9b
'Guo ', # 0x9c
'Zhi ', # 0x9d
'Cong ', # 0x9e
'Yun ', # 0x9f
'Waku ', # 0xa0
'Dou ', # 0xa1
'Shu ', # 0xa2
'Zao ', # 0xa3
'[?] ', # 0xa4
'Li ', # 0xa5
'Haze ', # 0xa6
'Jian ', # 0xa7
'Cheng ', # 0xa8
'Matsu ', # 0xa9
'Qiang ', # 0xaa
'Feng ', # 0xab
'Nan ', # 0xac
'Xiao ', # 0xad
'Xian ', # 0xae
'Ku ', # 0xaf
'Ping ', # 0xb0
'Yi ', # 0xb1
'Xi ', # 0xb2
'Zhi ', # 0xb3
'Guai ', # 0xb4
'Xiao ', # 0xb5
'Jia ', # 0xb6
'Jia ', # 0xb7
'Gou ', # 0xb8
'Fu ', # 0xb9
'Mo ', # 0xba
'Yi ', # 0xbb
'Ye ', # 0xbc
'Ye ', # 0xbd
'Shi ', # 0xbe
'Nie ', # 0xbf
'Bi ', # 0xc0
'Duo ', # 0xc1
'Yi ', # 0xc2
'Ling ', # 0xc3
'Bing ', # 0xc4
'Ni ', # 0xc5
'La ', # 0xc6
'He ', # 0xc7
'Pan ', # 0xc8
'Fan ', # 0xc9
'Zhong ', # 0xca
'Dai ', # 0xcb
'Ci ', # 0xcc
'Yang ', # 0xcd
'Fu ', # 0xce
'Bo ', # 0xcf
'Mou ', # 0xd0
'Gan ', # 0xd1
'Qi ', # 0xd2
'Ran ', # 0xd3
'Rou ', # 0xd4
'Mao ', # 0xd5
'Zhao ', # 0xd6
'Song ', # 0xd7
'Zhe ', # 0xd8
'Xia ', # 0xd9
'You ', # 0xda
'Shen ', # 0xdb
'Ju ', # 0xdc
'Tuo ', # 0xdd
'Zuo ', # 0xde
'Nan ', # 0xdf
'Ning ', # 0xe0
'Yong ', # 0xe1
'Di ', # 0xe2
'Zhi ', # 0xe3
'Zha ', # 0xe4
'Cha ', # 0xe5
'Dan ', # 0xe6
'Gu ', # 0xe7
'Pu ', # 0xe8
'Jiu ', # 0xe9
'Ao ', # 0xea
'Fu ', # 0xeb
'Jian ', # 0xec
'Bo ', # 0xed
'Duo ', # 0xee
'Ke ', # 0xef
'Nai ', # 0xf0
'Zhu ', # 0xf1
'Bi ', # 0xf2
'Liu ', # 0xf3
'Chai ', # 0xf4
'Zha ', # 0xf5
'Si ', # 0xf6
'Zhu ', # 0xf7
'Pei ', # 0xf8
'Shi ', # 0xf9
'Guai ', # 0xfa
'Cha ', # 0xfb
'Yao ', # 0xfc
'Jue ', # 0xfd
'Jiu ', # 0xfe
'Shi ', # 0xff
)
| mit |
mscherer/ansible-modules-core | cloud/openstack/os_port.py | 70 | 12457 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_port
short_description: Add/Update/Delete ports from an OpenStack cloud.
extends_documentation_fragment: openstack
author: "Davide Agnello (@dagnello)"
version_added: "2.0"
description:
- Add, Update or Remove ports from an OpenStack cloud. A I(state) of
'present' will ensure the port is created or updated if required.
options:
network:
description:
- Network ID or name this port belongs to.
required: true
name:
description:
- Name that has to be given to the port.
required: false
default: None
fixed_ips:
description:
- Desired IP and/or subnet for this port. Subnet is referenced by
subnet_id and IP is referenced by ip_address.
required: false
default: None
admin_state_up:
description:
- Sets admin state.
required: false
default: None
mac_address:
description:
- MAC address of this port.
required: false
default: None
security_groups:
description:
- Security group(s) ID(s) or name(s) associated with the port (comma
separated string or YAML list)
required: false
default: None
no_security_groups:
description:
- Do not associate a security group with this port.
required: false
default: False
allowed_address_pairs:
description:
- "Allowed address pairs list. Allowed address pairs are supported with
dictionary structure.
e.g. allowed_address_pairs:
- ip_address: 10.1.0.12
mac_address: ab:cd:ef:12:34:56
- ip_address: ..."
required: false
default: None
extra_dhcp_opts:
description:
- "Extra dhcp options to be assigned to this port. Extra options are
supported with dictionary structure.
e.g. extra_dhcp_opts:
- opt_name: opt name1
opt_value: value1
- opt_name: ..."
required: false
default: None
device_owner:
description:
- The ID of the entity that uses this port.
required: false
default: None
device_id:
description:
- Device ID of device using this port.
required: false
default: None
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
'''
EXAMPLES = '''
# Create a port
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: port1
network: foo
# Create a port with a static IP
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: port1
network: foo
fixed_ips:
- ip_address: 10.1.0.21
# Create a port with No security groups
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: port1
network: foo
no_security_groups: True
# Update the existing 'port1' port with multiple security groups (version 1)
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/d
username: admin
password: admin
project_name: admin
name: port1
security_groups: 1496e8c7-4918-482a-9172-f4f00fc4a3a5,057d4bdf-6d4d-472...
# Update the existing 'port1' port with multiple security groups (version 2)
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/d
username: admin
password: admin
project_name: admin
name: port1
security_groups:
- 1496e8c7-4918-482a-9172-f4f00fc4a3a5
- 057d4bdf-6d4d-472...
'''
RETURN = '''
id:
description: Unique UUID.
returned: success
type: string
name:
description: Name given to the port.
returned: success
type: string
network_id:
description: Network ID this port belongs in.
returned: success
type: string
security_groups:
description: Security group(s) associated with this port.
returned: success
type: list of strings
status:
description: Port's status.
returned: success
type: string
fixed_ips:
description: Fixed ip(s) associated with this port.
returned: success
type: list of dicts
tenant_id:
description: Tenant id associated with this port.
returned: success
type: string
allowed_address_pairs:
description: Allowed address pairs with this port.
returned: success
type: list of dicts
admin_state_up:
description: Admin state up flag for this port.
returned: success
type: bool
'''
def _needs_update(module, port, cloud):
"""Check for differences in the updatable values.
NOTE: We don't currently allow name updates.
"""
compare_simple = ['admin_state_up',
'mac_address',
'device_owner',
'device_id']
compare_dict = ['allowed_address_pairs',
'extra_dhcp_opts']
compare_list = ['security_groups']
for key in compare_simple:
if module.params[key] is not None and module.params[key] != port[key]:
return True
for key in compare_dict:
if module.params[key] is not None and cmp(module.params[key],
port[key]) != 0:
return True
for key in compare_list:
if module.params[key] is not None and (set(module.params[key]) !=
set(port[key])):
return True
# NOTE: if port was created or updated with 'no_security_groups=True',
# subsequent updates without 'no_security_groups' flag or
# 'no_security_groups=False' and no specified 'security_groups', will not
# result in an update to the port where the default security group is
# applied.
if module.params['no_security_groups'] and port['security_groups'] != []:
return True
if module.params['fixed_ips'] is not None:
for item in module.params['fixed_ips']:
if 'ip_address' in item:
# if ip_address in request does not match any in existing port,
# update is required.
if not any(match['ip_address'] == item['ip_address']
for match in port['fixed_ips']):
return True
if 'subnet_id' in item:
return True
for item in port['fixed_ips']:
# if ip_address in existing port does not match any in request,
# update is required.
if not any(match.get('ip_address') == item['ip_address']
for match in module.params['fixed_ips']):
return True
return False
def _system_state_change(module, port, cloud):
state = module.params['state']
if state == 'present':
if not port:
return True
return _needs_update(module, port, cloud)
if state == 'absent' and port:
return True
return False
def _compose_port_args(module, cloud):
port_kwargs = {}
optional_parameters = ['name',
'fixed_ips',
'admin_state_up',
'mac_address',
'security_groups',
'allowed_address_pairs',
'extra_dhcp_opts',
'device_owner',
'device_id']
for optional_param in optional_parameters:
if module.params[optional_param] is not None:
port_kwargs[optional_param] = module.params[optional_param]
if module.params['no_security_groups']:
port_kwargs['security_groups'] = []
return port_kwargs
def get_security_group_id(module, cloud, security_group_name_or_id):
security_group = cloud.get_security_group(security_group_name_or_id)
if not security_group:
module.fail_json(msg="Security group: %s, was not found"
% security_group_name_or_id)
return security_group['id']
def main():
argument_spec = openstack_full_argument_spec(
network=dict(required=False),
name=dict(required=False),
fixed_ips=dict(type='list', default=None),
admin_state_up=dict(type='bool', default=None),
mac_address=dict(default=None),
security_groups=dict(default=None, type='list'),
no_security_groups=dict(default=False, type='bool'),
allowed_address_pairs=dict(type='list', default=None),
extra_dhcp_opts=dict(type='list', default=None),
device_owner=dict(default=None),
device_id=dict(default=None),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['no_security_groups', 'security_groups'],
]
)
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
name = module.params['name']
state = module.params['state']
try:
cloud = shade.openstack_cloud(**module.params)
if module.params['security_groups']:
# translate security_groups to UUID's if names where provided
module.params['security_groups'] = [
get_security_group_id(module, cloud, v)
for v in module.params['security_groups']
]
port = None
network_id = None
if name:
port = cloud.get_port(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, port, cloud))
changed = False
if state == 'present':
if not port:
network = module.params['network']
if not network:
module.fail_json(
msg="Parameter 'network' is required in Port Create"
)
port_kwargs = _compose_port_args(module, cloud)
network_object = cloud.get_network(network)
if network_object:
network_id = network_object['id']
else:
module.fail_json(
msg="Specified network was not found."
)
port = cloud.create_port(network_id, **port_kwargs)
changed = True
else:
if _needs_update(module, port, cloud):
port_kwargs = _compose_port_args(module, cloud)
port = cloud.update_port(port['id'], **port_kwargs)
changed = True
module.exit_json(changed=changed, id=port['id'], port=port)
if state == 'absent':
if port:
cloud.delete_port(port['id'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
adviti/melange | app/gdata/dublincore/data.py | 126 | 2106 | #!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the data classes of the Dublin Core Metadata Initiative (DCMI) Extension"""
__author__ = '[email protected] (Jeff Scudder)'
import atom.core
DC_TEMPLATE = '{http://purl.org/dc/terms/}%s'
class Creator(atom.core.XmlElement):
"""Entity primarily responsible for making the resource."""
_qname = DC_TEMPLATE % 'creator'
class Date(atom.core.XmlElement):
"""Point or period of time associated with an event in the lifecycle of the resource."""
_qname = DC_TEMPLATE % 'date'
class Description(atom.core.XmlElement):
"""Account of the resource."""
_qname = DC_TEMPLATE % 'description'
class Format(atom.core.XmlElement):
"""File format, physical medium, or dimensions of the resource."""
_qname = DC_TEMPLATE % 'format'
class Identifier(atom.core.XmlElement):
"""An unambiguous reference to the resource within a given context."""
_qname = DC_TEMPLATE % 'identifier'
class Language(atom.core.XmlElement):
"""Language of the resource."""
_qname = DC_TEMPLATE % 'language'
class Publisher(atom.core.XmlElement):
"""Entity responsible for making the resource available."""
_qname = DC_TEMPLATE % 'publisher'
class Rights(atom.core.XmlElement):
"""Information about rights held in and over the resource."""
_qname = DC_TEMPLATE % 'rights'
class Subject(atom.core.XmlElement):
"""Topic of the resource."""
_qname = DC_TEMPLATE % 'subject'
class Title(atom.core.XmlElement):
"""Name given to the resource."""
_qname = DC_TEMPLATE % 'title'
| apache-2.0 |
Zopieux/py-gfm | gfm/standalone_fenced_code.py | 1 | 2080 | # Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import markdown
from markdown.extensions.fenced_code import FencedCodeExtension, FencedBlockPreprocessor
class StandaloneFencedCodeExtension(FencedCodeExtension):
def __init__(self, **kwargs):
self.config = {
"linenums": [False, "Use lines numbers. True=yes, False=no, None=auto"],
"guess_lang": [False, "Automatic language detection - Default: True"],
"css_class": [
"highlight",
"Set class name for wrapper <div> - " "Default: codehilite",
],
"pygments_style": [
"default",
"Pygments HTML Formatter Style " "(Colorscheme) - Default: default",
],
"noclasses": [
False,
"Use inline styles instead of CSS classes - " "Default false",
],
"use_pygments": [
True,
"Use Pygments to Highlight code blocks. "
"Disable if using a JavaScript library. "
"Default: True",
],
}
# Markdown 3.3 introduced a breaking change.
if markdown.__version_info__ >= (3, 3):
super().setConfigs(kwargs)
else:
super().__init__(**kwargs)
def extendMarkdown(self, md):
""" Add FencedBlockPreprocessor to the Markdown instance. """
md.registerExtension(self)
# Markdown 3.3 introduced a breaking change.
if markdown.__version_info__ >= (3, 3):
processor = FencedBlockPreprocessor(md, self.config)
processor.codehilite_conf = self.getConfigs()
else:
processor = FencedBlockPreprocessor(md)
processor.checked_for_codehilite = True
processor.codehilite_conf = self.config
md.preprocessors.register(processor, "fenced_code_block", 25)
| bsd-3-clause |
centic9/subversion-ppa | tools/dev/graph-dav-servers.py | 5 | 5465 | #!/usr/bin/env python
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
#
# graph-svn-dav.py by Brian W. Fitzpatrick <[email protected]>
#
# This was originally a quick hack to make a pretty picture of svn DAV servers.
#
# I've dropped it in Subversion's repository at the request of Karl Fogel.
#
# Be warned this this script has many dependencies that don't ship with Python.
import sys
import os
import fileinput
import datetime
import time
import datetime
from matplotlib import dates
import matplotlib
matplotlib.use('Agg')
from matplotlib import pylab
import Image
OUTPUT_FILE = '../../www/images/svn-dav-securityspace-survey.png'
OUTPUT_IMAGE_WIDTH = 800
STATS = [
('1/1/2003', 70),
('2/1/2003', 158),
('3/1/2003', 222),
('4/1/2003', 250),
('5/1/2003', 308),
('6/1/2003', 369),
('7/1/2003', 448),
('8/1/2003', 522),
('9/1/2003', 665),
('10/1/2003', 782),
('11/1/2003', 969),
('12/1/2003', 1009),
('1/1/2004', 1162),
('2/1/2004', 1307),
('3/1/2004', 1424),
('4/1/2004', 1792),
('5/1/2004', 2113),
('6/1/2004', 2502),
('7/1/2004', 2941),
('8/1/2004', 3863),
('9/1/2004', 4174),
('10/1/2004', 4187),
('11/1/2004', 4783),
('12/1/2004', 4995),
('1/1/2005', 5565),
('2/1/2005', 6505),
('3/1/2005', 7897),
('4/1/2005', 8751),
('5/1/2005', 9793),
('6/1/2005', 11534),
('7/1/2005', 12808),
('8/1/2005', 13545),
('9/1/2005', 15233),
('10/1/2005', 17588),
('11/1/2005', 18893),
('12/1/2005', 20278),
('1/1/2006', 21084),
('2/1/2006', 23861),
('3/1/2006', 26540),
('4/1/2006', 29396),
('5/1/2006', 33001),
('6/1/2006', 35082),
('7/1/2006', 38939),
('8/1/2006', 40672),
('9/1/2006', 46525),
('10/1/2006', 54247),
('11/1/2006', 63145),
('12/1/2006', 68988),
('1/1/2007', 77027),
('2/1/2007', 84813),
('3/1/2007', 95679),
('4/1/2007', 103852),
('5/1/2007', 117267),
('6/1/2007', 133665),
('7/1/2007', 137575),
('8/1/2007', 155426),
('9/1/2007', 159055),
('10/1/2007', 169939),
('11/1/2007', 180831),
('12/1/2007', 187093),
('1/1/2008', 199432),
('2/1/2008', 221547),
('3/1/2008', 240794),
('4/1/2008', 255520),
('5/1/2008', 269478),
('6/1/2008', 286614),
('7/1/2008', 294579),
('8/1/2008', 307923),
('9/1/2008', 254757),
('10/1/2008', 268081),
('11/1/2008', 299071),
('12/1/2008', 330884),
('1/1/2009', 369719),
('2/1/2009', 378434),
('3/1/2009', 390502),
('4/1/2009', 408658),
('5/1/2009', 407044),
('6/1/2009', 406520),
('7/1/2009', 334276),
]
def get_date(raw_date):
month, day, year = map(int, raw_date.split('/'))
return datetime.datetime(year, month, day)
def get_ordinal_date(date):
# This is the only way I can get matplotlib to do the dates right.
return int(dates.date2num(get_date(date)))
def load_stats():
dates = [get_ordinal_date(date) for date, value in STATS]
counts = [x[1] for x in STATS]
return dates, counts
def draw_graph(dates, counts):
###########################################################
# Drawing takes place here.
pylab.figure(1)
ax = pylab.subplot(111)
pylab.plot_date(dates, counts,
color='r', linestyle='-', marker='o', markersize=3)
ax.xaxis.set_major_formatter( pylab.DateFormatter('%Y') )
ax.xaxis.set_major_locator( pylab.YearLocator() )
ax.xaxis.set_minor_locator( pylab.MonthLocator() )
ax.set_xlim( (dates[0] - 92, dates[len(dates) - 1] + 92) )
ax.yaxis.set_major_formatter( pylab.FormatStrFormatter('%d') )
pylab.ylabel('Total # of Public DAV Servers')
lastdate = datetime.datetime.fromordinal(dates[len(dates) - 1]).strftime("%B %Y")
pylab.xlabel("Data as of " + lastdate)
pylab.title('Security Space Survey of\nPublic Subversion DAV Servers')
# End drawing
###########################################################
png = open(OUTPUT_FILE, 'w')
pylab.savefig(png)
png.close()
os.rename(OUTPUT_FILE, OUTPUT_FILE + ".tmp.png")
try:
im = Image.open(OUTPUT_FILE + ".tmp.png", 'r')
(width, height) = im.size
print("Original size: %d x %d pixels" % (width, height))
scale = float(OUTPUT_IMAGE_WIDTH) / float(width)
width = OUTPUT_IMAGE_WIDTH
height = int(float(height) * scale)
print("Final size: %d x %d pixels" % (width, height))
im = im.resize((width, height), Image.ANTIALIAS)
im.save(OUTPUT_FILE, im.format)
os.unlink(OUTPUT_FILE + ".tmp.png")
except Exception, e:
sys.stderr.write("Error attempting to resize the graphic: %s\n" % (str(e)))
os.rename(OUTPUT_FILE + ".tmp.png", OUTPUT_FILE)
raise
pylab.close()
if __name__ == '__main__':
dates, counts = load_stats()
draw_graph(dates, counts)
print("Don't forget to update ../../www/svn-dav-securityspace-survey.html!")
| apache-2.0 |
JulyKikuAkita/PythonPrac | cs15211/FindEventualSafeStates.py | 1 | 7463 | __source__ = 'https://leetcode.com/problems/find-eventual-safe-states/'
# Time: O(N + E)
# Space: O(N)
#
# Description: Leetcode # 802. Find Eventual Safe States
#
# In a directed graph, we start at some node and every turn,
# walk along a directed edge of the graph.
# If we reach a node that is terminal (that is, it has no outgoing directed edges), we stop.
#
# Now, say our starting node is eventually safe if and only if we must eventually walk to a terminal node.
# More specifically, there exists a natural number K so that for any choice of where to walk,
# we must have stopped at a terminal node in less than K steps.
#
# Which nodes are eventually safe? Return them as an array in sorted order.
#
# The directed graph has N nodes with labels 0, 1, ..., N-1, where N is the length of graph.
# The graph is given in the following form: graph[i] is a list of labels j
# such that (i, j) is a directed edge of the graph.
#
# Example:
# Input: graph = [[1,2],[2,3],[5],[0],[5],[],[]]
# Output: [2,4,5,6]
# Here is a diagram of the above graph.
#
# Illustration of graph
#
# Note:
#
# graph will have length at most 10000.
# The number of edges in the graph will not exceed 32000.
# Each graph[i] will be a sorted list of different integers,
# chosen within the range [0, graph.length - 1].
#
import collections
import unittest
# 512ms 18.54%
class Solution(object):
def eventualSafeNodes(self, graph):
"""
:type graph: List[List[int]]
:rtype: List[int]
"""
N = len(graph)
safe = [False] * N
graph = map(set, graph)
rgraph = [set() for _ in xrange(N)]
q = collections.deque()
for i, js in enumerate(graph):
if not js:
q.append(i)
for j in js:
rgraph[j].add(i)
while q:
j = q.popleft()
safe[j] = True
for i in rgraph[j]:
graph[i].remove(j)
if len(graph[i]) == 0:
q.append(i)
return [i for i, v in enumerate(safe) if v]
# 304ms 35.35%
class Solution2(object):
def eventualSafeNodes(self, graph):
"""
:type graph: List[List[int]]
:rtype: List[int]
"""
WHITE, GRAY, BLACK = 0, 1, 2
color = collections.defaultdict(int)
def dfs(node):
if color[node] != WHITE:
return color[node] == BLACK
color[node] = GRAY
for nei in graph[node]:
if color[nei] == BLACK:
continue
if color[nei] == GRAY or not dfs(nei):
return False
color[node] = BLACK
return True
return filter(dfs, range(len(graph)))
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/find-eventual-safe-states/solution/
Approach #1: Reverse Edges [Accepted]
Complexity Analysis
Time Complexity: O(N + E), where N is the number of nodes in the given graph, and E is the total number of edges.
Space Complexity: O(N) in additional space complexity.
# 114ms 22.56%
class Solution {
public List<Integer> eventualSafeNodes(int[][] graph) {
int N = graph.length;
boolean[] safe = new boolean[N];
List<Set<Integer>> tmp = new ArrayList();
List<Set<Integer>> rgraph = new ArrayList();
for (int i = 0; i < N; ++i) {
tmp.add(new HashSet());
rgraph.add(new HashSet());
}
Queue<Integer> queue = new LinkedList();
for (int i = 0; i < N; i++) {
if (graph[i].length == 0) queue.offer(i);
for (int j : graph[i]) {
tmp.get(i).add(j);
rgraph.get(j).add(i);
}
}
while (!queue.isEmpty()) {
int j = queue.poll();
safe[j] = true;
for (int i : rgraph.get(j)) {
tmp.get(i).remove(j);
if (tmp.get(i).isEmpty()) queue.offer(i);
}
}
List<Integer> ans = new ArrayList();
for (int i = 0; i < N; i++) {
if (safe[i]) ans.add(i);
}
return ans;
}
}
Approach #2: Depth-First Search [Accepted]
Complexity Analysis
Time Complexity: O(N + E), where N is the number of nodes in the given graph, and E is the total number of edges.
Space Complexity: O(N) in additional space complexity.
# 11ms 97.36%
class Solution {
public List<Integer> eventualSafeNodes(int[][] graph) {
int N = graph.length;
int[] color = new int[N];
List<Integer> ans = new ArrayList();
for (int i = 0; i < N; i++) {
if (dfs(i, color, graph)) ans.add(i);
}
return ans;
}
// colors: WHITE 0, GRAY 1, BLACK 2;
private boolean dfs(int node, int[] color, int[][] graph) {
if (color[node] > 0) return color[node] == 2;
color[node] = 1;
for (int nei: graph[node]) {
if (color[node] == 2) continue;
if (color[nei] == 1 || !dfs(nei, color, graph)) return false;
}
color[node] = 2;
return true;
}
}
# https://leetcode.com/problems/find-eventual-safe-states/discuss/120633/Java-Solution-(DFS-andand-Topological-Sort)
# topological sort
# 62ms 36.36%
class Solution {
public List<Integer> eventualSafeNodes(int[][] graph) {
int n = graph.length;
int[] degree = new int [n];
Set<Integer>[] map = new HashSet[n];
for (int i = 0; i < n; i++) map[i] = new HashSet();
for (int i = 0; i < n; i++) {
for (int node : graph[i]) {
map[node].add(i);
degree[i]++;
}
}
Queue<Integer> queue = new LinkedList();
Set<Integer> set = new HashSet();
for (int i = 0; i < n; i++) {
if (degree[i] == 0) {
set.add(i);
queue.add(i);
}
}
while (!queue.isEmpty()) {
int node = queue.poll();
set.add(node);
for (int nei : map[node]) {
degree[nei]--;
if (degree[nei] == 0) {
queue.add(nei);
}
}
}
List<Integer> ans = new ArrayList(set);
Collections.sort(ans);
return ans;
}
}
# https://leetcode.com/problems/find-eventual-safe-states/discuss/119871/Straightforward-Java-solution-easy-to-understand!
# 14ms 60.33%
class Solution {
// value of color represents three states:
static int NOT_V = 0; // 0:have not been visited
static int SAFE = 1; // 1:safe
static int LOOP = 2; // 2:unsafe
public List<Integer> eventualSafeNodes(int[][] graph) {
List<Integer> res = new ArrayList();
int[] color = new int[graph.length];
for (int i = 0; i < graph.length; i++) {
if (dfs(graph, color, i)) res.add(i);
}
return res;
}
private boolean dfs(int[][] graph, int[] color, int start) {
if (color[start] == LOOP) return false;
if (color[start] == SAFE) return true;
color[start] = LOOP;
for (int nei : graph[start]) {
if (!dfs(graph, color, nei)) return false;
}
color[start] = SAFE;
return true;
}
}
'''
| apache-2.0 |
openstack/congress | congress/utils.py | 1 | 9459 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import contextlib
import json
import os
import shutil
import tempfile
import yaml
from oslo_config import cfg
from oslo_log import log as logging
import six
LOG = logging.getLogger(__name__)
utils_opts = [
cfg.StrOpt('tempdir',
help='Explicitly specify the temporary working directory'),
]
CONF = cfg.CONF
CONF.register_opts(utils_opts)
# Note(thread-safety): blocking function
@contextlib.contextmanager
def tempdir(**kwargs):
argdict = kwargs.copy()
if 'dir' not in argdict:
argdict['dir'] = CONF.tempdir
tmpdir = tempfile.mkdtemp(**argdict)
try:
yield tmpdir
finally:
try:
shutil.rmtree(tmpdir)
except OSError as e:
LOG.error(('Could not remove tmpdir: %s'), e)
def value_to_congress(value):
if isinstance(value, six.string_types):
# TODO(ayip): This throws away high unicode data because congress does
# not have full support for unicode yet. We'll need to fix this to
# handle unicode coming from datasources.
try:
six.text_type(value).encode('ascii')
except UnicodeEncodeError:
LOG.warning('Ignoring non-ascii characters')
# Py3: decode back into str for compat (bytes != str)
return six.text_type(value).encode('ascii', 'ignore').decode('ascii')
# Check for bool before int, because True and False are also ints.
elif isinstance(value, bool):
return str(value)
elif (isinstance(value, six.integer_types) or
isinstance(value, float)):
return value
return str(value)
def tuple_to_congress(value_tuple):
return tuple(value_to_congress(v) for v in value_tuple)
# Note(thread-safety): blocking function
def create_datasource_policy(bus, datasource, engine):
# Get the schema for the datasource using
# Note(thread-safety): blocking call
schema = bus.rpc(datasource, 'get_datasource_schema',
{'source_id': datasource})
# Create policy and sets the schema once datasource is created.
args = {'name': datasource, 'schema': schema}
# Note(thread-safety): blocking call
bus.rpc(engine, 'initialize_datasource', args)
def get_root_path():
return os.path.dirname(os.path.dirname(__file__))
class Location (object):
"""A location in the program source code."""
__slots__ = ['line', 'col']
def __init__(self, line=None, col=None, obj=None):
try:
self.line = obj.location.line
self.col = obj.location.col
except AttributeError:
pass
self.col = col
self.line = line
def __str__(self):
s = ""
if self.line is not None:
s += " line: {}".format(self.line)
if self.col is not None:
s += " col: {}".format(self.col)
return s
def __repr__(self):
return "Location(line={}, col={})".format(
repr(self.line), repr(self.col))
def __hash__(self):
return hash(('Location', hash(self.line), hash(self.col)))
def pretty_json(data):
print(json.dumps(data, sort_keys=True,
indent=4, separators=(',', ': ')))
def pretty_rule(rule_str):
# remove line breaks
rule_str = ''.join(
[line.strip() for line in rule_str.strip().splitlines()])
head_and_body = rule_str.split(':-')
# drop empty body
head_and_body = [item.strip()
for item in head_and_body if len(item.strip()) > 0]
head = head_and_body[0]
if len(head_and_body) == 1:
return head
else:
body = head_and_body[1]
# split the literal by spliting on ')'
body_list = body.split(')')
body_list = body_list[:-1] # drop part behind the final ')'
new_body_list = []
for literal in body_list:
# remove commas between literals
if literal[0] == ',':
literal = literal[1:]
# add back the ')', also add an indent
new_body_list.append(' ' + literal.strip() + ')')
pretty_rule_str = head + " :-\n" + ",\n".join(new_body_list)
return pretty_rule_str
class YamlConfigs (object):
def __init__(self, dir_path, key_attrib, reusables_path=None):
self.dir_path = dir_path
self.key_attrib = key_attrib
self.reusables_path = reusables_path
# dictionary of loaded structures
# indexed by the value of each struct[key_attrib]
self.loaded_structures = {}
# dictionary of reusable yaml-style structures
# indexed by unique name
self.reusables = {}
yaml.SafeLoader.add_constructor(
'!ref', self._resolve_reuse_reference_constructor)
def _resolve_reuse_reference_constructor(self, loader, node):
import six
if not isinstance(node.value, six.string_types):
raise yaml.YAMLError(
'Cannot resolve reference {} because the value is not '
'a string.'.format(node))
if node.value in self.reusables:
return self.reusables[node.value]
else:
raise yaml.YAMLError(
'Cannot resolve reference {} because no reusable '
'data has been defined with the name "{}". Please double '
'check the reference name or the reusables file "{}".'.format(
node, node.value, self.reusables_path))
def load_from_files(self):
'''load YAML config files from directory
return total number of files on which error encountered.
Separately callable apart from __init__ to support reloading changed
files.
'''
if self.reusables_path is not None:
self.reusables = {}
try:
with open(self.reusables_path, "r") as stream:
try:
self.reusables = yaml.safe_load(stream)
except Exception:
LOG.warning(
'Unable to YAML-load reusables file at path %s. '
'Proceeding with empty reusables.',
self.reusables_path)
except IOError:
LOG.warning('Unable to find or open reusables file at path %s.'
' Proceeding with empty reusables.',
self.reusables_path)
if not isinstance(self.reusables, dict):
LOG.warning('The loaded reusables file does not conform to the'
' expected format (must be a hash at the top '
'level). Proceeding with empty reusables. '
'Provided structure: %s', self.reusables)
def _load_yaml_config_file(full_path):
try:
success_yaml_count = 0
error_yaml_count = 0
doc_num_in_file = 0
file_error = False
with open(full_path, "r") as stream:
policies = yaml.safe_load_all(stream)
for policy in policies:
doc_num_in_file += 1
# FIXME: validate YAML config
if policy[self.key_attrib] in self.loaded_structures:
error_yaml_count += 1
LOG.warning('Duplicate name')
else:
self.loaded_structures[
policy[self.key_attrib]] = policy
success_yaml_count += 1
except Exception:
LOG.exception(
'Failed to load YAML config file %s', full_path)
file_error = True
return success_yaml_count, file_error or (error_yaml_count > 0)
file_count = 0
file_error_count = 0
policy_count = 0
for (dirpath, dirnames, filenames) in os.walk(
self.dir_path):
for filename in filenames:
name, extension = os.path.splitext(filename)
if extension in ['.yaml', '.yml']:
count, has_error = _load_yaml_config_file(
os.path.join(dirpath, filename))
if count > 0:
file_count += 1
policy_count += count
if has_error:
file_error_count += 1
return file_error_count
| apache-2.0 |
lixiangning888/whole_project | modules/signatures/antiemu_wine_func.py | 3 | 1273 | # -*- coding: utf-8 -*-
# Copyright (C) 2015 Accuvant, Inc. ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class WineDetectFunc(Signature):
name = "antiemu_wine_func"
description = "通过功能名检测是否存在Wine模拟器"
severity = 3
categories = ["anti-emulation"]
authors = ["Accuvant"]
minimum = "1.0"
evented = True
filter_apinames = set(["LdrGetProcedureAddress"])
def on_call(self, call, process):
funcname = self.get_argument(call, "FunctionName")
if not call["status"] and funcname == "wine_get_unix_file_name":
return True
| lgpl-3.0 |
chhao91/QGIS | python/plugins/processing/gui/MessageBarProgress.py | 5 | 2541 | # -*- coding: utf-8 -*-
"""
***************************************************************************
MessageBarProgress.py
---------------------
Date : April 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'April 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import Qt, QCoreApplication
from PyQt4.QtGui import QProgressBar
from qgis.utils import iface
from qgis.gui import QgsMessageBar
class MessageBarProgress:
def __init__(self, algname=None):
self.progressMessageBar = \
iface.messageBar().createMessage(self.tr('Executing algorithm <i>{0}</i>'.format(algname if algname else '')))
self.progress = QProgressBar()
self.progress.setMaximum(100)
self.progress.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)
self.progressMessageBar.layout().addWidget(self.progress)
iface.messageBar().pushWidget(self.progressMessageBar,
iface.messageBar().INFO)
def error(self, msg):
iface.messageBar().clearWidgets()
iface.messageBar().pushMessage(self.tr('Error'),
msg, level=QgsMessageBar.CRITICAL, duration=3)
def setText(self, text):
pass
def setPercentage(self, i):
self.progress.setValue(i)
def setInfo(self, _):
pass
def setCommand(self, _):
pass
def setDebugInfo(self, _):
pass
def setConsoleInfo(self, _):
pass
def close(self):
iface.messageBar().clearWidgets()
def tr(self, string, context=''):
if context == '':
context = 'MessageBarProgress'
return QCoreApplication.translate(context, string)
| gpl-2.0 |
vishesh/pycket | pycket/prims/parameter.py | 1 | 2433 |
from pycket import values
from pycket import values_parameter
from pycket.argument_parser import ArgParser, EndOfInput
from pycket.arity import Arity
from pycket.base import W_Object
from pycket.error import SchemeException
from pycket.prims.expose import expose, expose_val, default, procedure
from rpython.rlib import jit
@expose("make-parameter",
[values.W_Object, default(values.W_Object, values.w_false)])
def make_parameter(init, guard):
return values_parameter.W_Parameter(init, guard)
@expose("make-derived-parameter",
[values_parameter.W_BaseParameter, procedure, procedure])
def make_derived_parameter(param, guard, wrap):
return values_parameter.W_DerivedParameter(param, guard, wrap)
@expose("extend-parameterization", arity=Arity.geq(1))
@jit.unroll_safe
def scheme_extend_parameterization(args):
if len(args) == 0:
raise SchemeException("extend-parameterization: expected 1 or more arguments")
config = args[0]
argc = len(args)
if argc < 2 or not isinstance(config, values_parameter.W_Parameterization) or argc % 2 != 1:
return config
parser = ArgParser("extend-parameterization", args, start_at=1)
while parser.has_more():
param = parser.parameter()
key = parser.object()
config = config.extend([param], [key])
return config
def call_with_parameterization(f, args, paramz, env, cont):
cont.update_cm(values.parameterization_key, paramz)
return f.call(args, env, cont)
@expose("call-with-parameterization",
[values.W_Object, values_parameter.W_Parameterization], simple=False)
def call_w_paramz(f, paramz, env, cont):
return call_with_parameterization(f, [], paramz, env, cont)
def call_with_extended_paramz(f, args, keys, vals, env, cont):
from pycket.values import parameterization_key
# XXX seems untested?
paramz = cont.get_mark_first(parameterization_key)
assert isinstance(paramz, values_parameter.W_Parameterization) # XXX is this always right?
paramz_new = paramz.extend(keys, vals)
return call_with_parameterization(f, args, paramz_new, env, cont)
expose_val("parameterization-key", values.parameterization_key)
expose_val("print-mpair-curly-braces", values_parameter.W_Parameter(values.w_false))
expose_val("print-pair-curly-braces", values_parameter.W_Parameter(values.w_false))
| mit |
VirusTotal/msticpy | tests/test_ip_utils.py | 1 | 3873 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""IP Utils test class."""
import unittest
import json
import os
import pandas as pd
from msticpy.sectools.ip_utils import get_whois_info, get_whois_df, get_ip_type
_test_data_folders = [
d for d, _, _ in os.walk(os.getcwd()) if d.endswith("/tests/testdata")
]
if len(_test_data_folders) == 1:
_TEST_DATA = _test_data_folders[0]
else:
_TEST_DATA = "./tests/testdata"
class TestIpUtils(unittest.TestCase):
"""Unit test class."""
IPV4 = {
"Private": ("10.0.0.1", ["Private", "Reserved"]),
"Multicast": ("224.0.0.1", None),
"Unspecified": ("0.0.0.0", None),
"Reserved": ("198.51.100.1", ["Private", "Reserved"]),
"Loopback": ("127.0.0.1", None),
"Public": ("153.2.3.4", None),
"Link Local": ("169.254.0.1", None),
}
IPV6 = {
"Private": ("FC00::C001:1DFF:FEE0:0", None),
"Multicast": ("FF00::", None),
"Unspecified": ("::", None),
"Reserved": ("2001:db8::", ["Private", "Reserved"]),
"Loopback": ("::1", None),
"Public": ("2340:0023:AABA:0A01:0055:5054:9ABC:ABB0", None),
"Link Local": ("FE80::C001:1DFF:FEE0:0", None),
}
def setUp(self):
input_file = os.path.join(_TEST_DATA, "az_net_flows.csv")
self.input_df = pd.read_csv(input_file).sample(10)
def test_get_ip_type(self):
for ip_type, (addr, alts) in self.IPV4.items():
print(addr, ip_type)
if alts:
self.assertIn(get_ip_type(addr), alts)
else:
self.assertEqual(get_ip_type(addr), ip_type)
for ip_type, (addr, alts) in self.IPV6.items():
print(addr, ip_type)
if alts:
self.assertIn(get_ip_type(addr), alts)
else:
self.assertEqual(get_ip_type(addr), ip_type)
def test_get_whois(self):
ms_ip = "13.107.4.50"
ms_asn = "MICROSOFT-CORP-MSN-AS-BLOCK, US"
asn, whois = get_whois_info(ms_ip)
self.assertEqual(asn, ms_asn)
asn, whois = get_whois_info(self.IPV4["Private"][0])
invalid_type = "No ASN Information for IP type: Private"
self.assertEqual(asn, invalid_type)
def test_get_whois_df(self):
results = get_whois_df(data=self.input_df, ip_column="AllExtIPs")
self.assertEqual(len(results), len(self.input_df))
self.assertIn("AsnDescription", results.columns)
results2 = get_whois_df(
data=self.input_df, ip_column="AllExtIPs", asn_col="asn", whois_col="whois"
)
self.assertEqual(len(results2), len(self.input_df))
self.assertIn("asn", results2.columns)
self.assertIn("whois", results2.columns)
self.assertEqual(len(results2[~results2["asn"].isna()]), len(self.input_df))
self.assertEqual(len(results2[~results2["whois"].isna()]), len(self.input_df))
def test_whois_pdext(self):
results = self.input_df.mp_whois.lookup(ip_column="AllExtIPs")
self.assertEqual(len(results), len(self.input_df))
self.assertIn("AsnDescription", results.columns)
results2 = self.input_df.mp_whois.lookup(
ip_column="AllExtIPs", asn_col="asn", whois_col="whois"
)
self.assertEqual(len(results2), len(self.input_df))
self.assertIn("asn", results2.columns)
self.assertIn("whois", results2.columns)
self.assertEqual(len(results2[~results2["asn"].isna()]), len(self.input_df))
self.assertEqual(len(results2[~results2["whois"].isna()]), len(self.input_df))
| mit |
KnowNo/reviewboard | reviewboard/reviews/views.py | 3 | 64687 | from __future__ import unicode_literals
import logging
import time
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.db.models import Q
from django.http import (Http404,
HttpResponse,
HttpResponseNotFound,
HttpResponseNotModified,
HttpResponseRedirect)
from django.shortcuts import (get_object_or_404, get_list_or_404,
render_to_response)
from django.template.context import RequestContext
from django.template.loader import render_to_string
from django.utils import six, timezone
from django.utils.decorators import method_decorator
from django.utils.html import escape
from django.utils.http import http_date
from django.utils.safestring import mark_safe
from django.utils.timezone import utc
from django.utils.translation import ugettext_lazy as _
from djblets.siteconfig.models import SiteConfiguration
from djblets.util.dates import get_latest_timestamp
from djblets.util.decorators import augment_method_from
from djblets.util.http import (encode_etag, set_last_modified,
set_etag, etag_if_none_match)
from reviewboard.accounts.decorators import (check_login_required,
valid_prefs_required)
from reviewboard.accounts.models import ReviewRequestVisit, Profile
from reviewboard.attachments.models import (FileAttachment,
FileAttachmentHistory)
from reviewboard.changedescs.models import ChangeDescription
from reviewboard.diffviewer.diffutils import (convert_to_unicode,
get_file_chunks_in_range,
get_last_header_before_line,
get_last_line_number_in_diff,
get_original_file,
get_patched_file)
from reviewboard.diffviewer.models import DiffSet
from reviewboard.diffviewer.views import (DiffFragmentView, DiffViewerView,
exception_traceback_string)
from reviewboard.hostingsvcs.bugtracker import BugTracker
from reviewboard.reviews.ui.screenshot import LegacyScreenshotReviewUI
from reviewboard.reviews.context import (comment_counts,
diffsets_with_comments,
has_comments_in_diffsets_excluding,
interdiffs_with_comments,
make_review_request_context)
from reviewboard.reviews.fields import get_review_request_fieldsets
from reviewboard.reviews.markdown_utils import is_rich_text_default_for_user
from reviewboard.reviews.models import (BaseComment, Comment,
FileAttachmentComment,
ReviewRequest, Review,
Screenshot, ScreenshotComment)
from reviewboard.reviews.ui.base import FileAttachmentReviewUI
from reviewboard.scmtools.models import Repository
from reviewboard.site.decorators import check_local_site_access
from reviewboard.site.urlresolvers import local_site_reverse
from reviewboard.webapi.encoder import status_to_string
#
# Helper functions
#
def _render_permission_denied(
request,
template_name='reviews/review_request_permission_denied.html'):
"""Renders a Permission Denied error for this review request."""
response = render_to_response(template_name, RequestContext(request))
response.status_code = 403
return response
def _find_review_request_object(review_request_id, local_site):
"""Finds a review request given an ID and an optional LocalSite name.
If a local site is passed in on the URL, we want to look up the review
request using the local_id instead of the pk. This allows each LocalSite
configured to have its own review request ID namespace starting from 1.
"""
q = ReviewRequest.objects.all()
if local_site:
q = q.filter(local_site=local_site,
local_id=review_request_id)
else:
q = q.filter(pk=review_request_id)
try:
q = q.select_related('submitter', 'repository')
return q.get()
except ReviewRequest.DoesNotExist:
raise Http404
def _find_review_request(request, review_request_id, local_site):
"""Finds a review request matching an ID, checking user access permissions.
If the review request is accessible by the user, we return
(ReviewRequest, None). Otherwise, we return (None, response).
"""
review_request = _find_review_request_object(review_request_id, local_site)
if review_request.is_accessible_by(request.user):
return review_request, None
else:
return None, _render_permission_denied(request)
def _build_id_map(objects):
"""Builds an ID map out of a list of objects.
The resulting map makes it easy to quickly look up an object from an ID.
"""
id_map = {}
for obj in objects:
id_map[obj.pk] = obj
return id_map
def _query_for_diff(review_request, user, revision, draft):
"""
Queries for a diff based on several parameters.
If the draft does not exist, this throws an Http404 exception.
"""
# Normalize the revision, since it might come in as a string.
if revision:
revision = int(revision)
# This will try to grab the diff associated with a draft if the review
# request has an associated draft and is either the revision being
# requested or no revision is being requested.
if (draft and draft.diffset_id and
(revision is None or draft.diffset.revision == revision)):
return draft.diffset
query = Q(history=review_request.diffset_history_id)
# Grab a revision if requested.
if revision is not None:
query = query & Q(revision=revision)
try:
return DiffSet.objects.filter(query).latest()
except DiffSet.DoesNotExist:
raise Http404
def build_diff_comment_fragments(
comments, context,
comment_template_name='reviews/diff_comment_fragment.html',
error_template_name='diffviewer/diff_fragment_error.html',
lines_of_context=None,
show_controls=False):
comment_entries = []
had_error = False
siteconfig = SiteConfiguration.objects.get_current()
if lines_of_context is None:
lines_of_context = [0, 0]
for comment in comments:
try:
max_line = get_last_line_number_in_diff(context, comment.filediff,
comment.interfilediff)
first_line = max(1, comment.first_line - lines_of_context[0])
last_line = min(comment.last_line + lines_of_context[1], max_line)
num_lines = last_line - first_line + 1
chunks = list(get_file_chunks_in_range(context,
comment.filediff,
comment.interfilediff,
first_line,
num_lines))
content = render_to_string(comment_template_name, {
'comment': comment,
'header': get_last_header_before_line(context,
comment.filediff,
comment.interfilediff,
first_line),
'chunks': chunks,
'domain': Site.objects.get_current().domain,
'domain_method': siteconfig.get('site_domain_method'),
'lines_of_context': lines_of_context,
'expandable_above': show_controls and first_line != 1,
'expandable_below': show_controls and last_line != max_line,
'collapsible': lines_of_context != [0, 0],
'lines_above': first_line - 1,
'lines_below': max_line - last_line,
'first_line': first_line,
})
except Exception as e:
content = exception_traceback_string(
None, e, error_template_name, {
'comment': comment,
'file': {
'depot_filename': comment.filediff.source_file,
'index': None,
'filediff': comment.filediff,
},
'domain': Site.objects.get_current().domain,
'domain_method': siteconfig.get("site_domain_method"),
})
# It's bad that we failed, and we'll return a 500, but we'll
# still return content for anything we have. This will prevent any
# caching.
had_error = True
chunks = []
comment_entries.append({
'comment': comment,
'html': content,
'chunks': chunks,
})
return had_error, comment_entries
#
# View functions
#
@check_login_required
@valid_prefs_required
def root(request, local_site_name=None):
"""Handles the root URL of Review Board or a Local Site.
If the user is authenticated, this will redirect to their Dashboard.
Otherwise, they'll be redirected to the All Review Requests page.
Either page may then redirect for login or show a Permission Denied,
depending on the settings.
"""
if request.user.is_authenticated():
url_name = 'dashboard'
else:
url_name = 'all-review-requests'
return HttpResponseRedirect(
local_site_reverse(url_name, local_site_name=local_site_name))
@login_required
@check_local_site_access
def new_review_request(request,
local_site=None,
template_name='reviews/new_review_request.html'):
"""Displays the New Review Request UI.
This handles the creation of a review request based on either an existing
changeset or the provided information.
"""
valid_repos = []
repos = Repository.objects.accessible(request.user, local_site=local_site)
if local_site:
local_site_name = local_site.name
else:
local_site_name = ''
for repo in repos.order_by('name'):
try:
scmtool = repo.get_scmtool()
valid_repos.append({
'id': repo.id,
'name': repo.name,
'scmtool_name': scmtool.name,
'supports_post_commit': repo.supports_post_commit,
'local_site_name': local_site_name,
'files_only': False,
'requires_change_number': scmtool.supports_pending_changesets,
'requires_basedir': not scmtool.get_diffs_use_absolute_paths(),
})
except Exception:
logging.exception('Error loading SCMTool for repository "%s" '
'(ID %d)',
repo.name, repo.id)
valid_repos.insert(0, {
'id': '',
'name': _('(None - File attachments only)'),
'scmtool_name': '',
'supports_post_commit': False,
'files_only': True,
'local_site_name': local_site_name,
})
return render_to_response(template_name, RequestContext(request, {
'repos': valid_repos,
}))
def _get_latest_file_attachments(file_attachments):
file_attachment_histories = FileAttachmentHistory.objects.filter(
file_attachments__in=file_attachments)
latest = dict([
(data['id'], data['latest_revision'])
for data in file_attachment_histories.values('id', 'latest_revision')
])
return [
f
for f in file_attachments
if (not f.is_from_diff and
f.attachment_revision == latest[f.attachment_history_id])
]
@check_login_required
@check_local_site_access
def review_detail(request,
review_request_id,
local_site=None,
template_name="reviews/review_detail.html"):
"""
Main view for review requests. This covers the review request information
and all the reviews on it.
"""
# If there's a local_site passed in the URL, we want to look up the review
# request based on the local_id instead of the pk. This allows each
# local_site configured to have its own review request ID namespace
# starting from 1.
review_request, response = _find_review_request(
request, review_request_id, local_site)
if not review_request:
return response
# The review request detail page needs a lot of data from the database,
# and going through standard model relations will result in far too many
# queries. So we'll be optimizing quite a bit by prefetching and
# re-associating data.
#
# We will start by getting the list of reviews. We'll filter this out into
# some other lists, build some ID maps, and later do further processing.
entries = []
public_reviews = []
body_top_replies = {}
body_bottom_replies = {}
replies = {}
reply_timestamps = {}
reviews_entry_map = {}
reviews_id_map = {}
review_timestamp = 0
visited = None
# Start by going through all reviews that point to this review request.
# This includes draft reviews. We'll be separating these into a list of
# public reviews and a mapping of replies.
#
# We'll also compute the latest review timestamp early, for the ETag
# generation below.
#
# The second pass will come after the ETag calculation.
all_reviews = list(review_request.reviews.select_related('user'))
for review in all_reviews:
review._body_top_replies = []
review._body_bottom_replies = []
if review.public:
# This is a review we'll display on the page. Keep track of it
# for later display and filtering.
public_reviews.append(review)
parent_id = review.base_reply_to_id
if parent_id is not None:
# This is a reply to a review. We'll store the reply data
# into a map, which associates a review ID with its list of
# replies, and also figures out the timestamps.
#
# Later, we'll use this to associate reviews and replies for
# rendering.
if parent_id not in replies:
replies[parent_id] = [review]
reply_timestamps[parent_id] = review.timestamp
else:
replies[parent_id].append(review)
reply_timestamps[parent_id] = max(
reply_timestamps[parent_id],
review.timestamp)
elif (request.user.is_authenticated() and
review.user_id == request.user.pk and
(review_timestamp == 0 or review.timestamp > review_timestamp)):
# This is the latest draft so far from the current user, so
# we'll use this timestamp in the ETag.
review_timestamp = review.timestamp
if review.public or (request.user.is_authenticated() and
review.user_id == request.user.pk):
reviews_id_map[review.pk] = review
# If this review is replying to another review's body_top or
# body_bottom fields, store that data.
for reply_id, reply_list in (
(review.body_top_reply_to_id, body_top_replies),
(review.body_bottom_reply_to_id, body_bottom_replies)):
if reply_id is not None:
if reply_id not in reply_list:
reply_list[reply_id] = [review]
else:
reply_list[reply_id].append(review)
pending_review = review_request.get_pending_review(request.user)
review_ids = list(reviews_id_map.keys())
last_visited = 0
starred = False
if request.user.is_authenticated():
try:
visited, visited_is_new = \
ReviewRequestVisit.objects.get_or_create(
user=request.user, review_request=review_request)
last_visited = visited.timestamp.replace(tzinfo=utc)
except ReviewRequestVisit.DoesNotExist:
# Somehow, this visit was seen as created but then not
# accessible. We need to log this and then continue on.
logging.error('Unable to get or create ReviewRequestVisit '
'for user "%s" on review request at %s',
request.user.username,
review_request.get_absolute_url())
# If the review request is public and pending review and if the user
# is logged in, mark that they've visited this review request.
if (review_request.public and
review_request.status == review_request.PENDING_REVIEW):
visited.timestamp = timezone.now()
visited.save()
try:
profile = request.user.get_profile()
starred_review_requests = \
profile.starred_review_requests.filter(pk=review_request.pk)
starred = (starred_review_requests.count() > 0)
except Profile.DoesNotExist:
pass
draft = review_request.get_draft(request.user)
review_request_details = draft or review_request
# Map diffset IDs to their object.
diffsets = review_request.get_diffsets()
diffsets_by_id = {}
for diffset in diffsets:
diffsets_by_id[diffset.pk] = diffset
# Find out if we can bail early. Generate an ETag for this.
last_activity_time, updated_object = \
review_request.get_last_activity(diffsets, public_reviews)
if draft:
draft_timestamp = draft.last_updated
else:
draft_timestamp = ""
if visited:
visibility = visited.visibility
else:
visibility = None
blocks = review_request.get_blocks()
etag = encode_etag(
'%s:%s:%s:%s:%s:%s:%s:%s:%s:%s'
% (request.user, last_activity_time, draft_timestamp,
review_timestamp, review_request.last_review_activity_timestamp,
is_rich_text_default_for_user(request.user),
[r.pk for r in blocks],
starred, visibility, settings.AJAX_SERIAL))
if etag_if_none_match(request, etag):
return HttpResponseNotModified()
# Get the list of public ChangeDescriptions.
#
# We want to get the latest ChangeDescription along with this. This is
# best done here and not in a separate SQL query.
changedescs = list(review_request.changedescs.filter(public=True))
if changedescs:
# We sort from newest to oldest, so the latest one is the first.
latest_timestamp = changedescs[0].timestamp
else:
latest_timestamp = None
# Now that we have the list of public reviews and all that metadata,
# being processing them and adding entries for display in the page.
#
# We do this here and not above because we don't want to build *too* much
# before the ETag check.
for review in public_reviews:
if not review.is_reply():
state = ''
# Mark as collapsed if the review is older than the latest
# change.
if latest_timestamp and review.timestamp < latest_timestamp:
state = 'collapsed'
latest_reply = reply_timestamps.get(review.pk, None)
# Mark as expanded if there is a reply newer than last_visited
if latest_reply and last_visited and last_visited < latest_reply:
state = ''
entry = {
'review': review,
'comments': {
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': []
},
'timestamp': review.timestamp,
'class': state,
'collapsed': state == 'collapsed',
'issue_open_count': 0,
'has_issues': False,
}
reviews_entry_map[review.pk] = entry
entries.append(entry)
# Link up all the review body replies.
for key, reply_list in (('_body_top_replies', body_top_replies),
('_body_bottom_replies', body_bottom_replies)):
for reply_id, replies in six.iteritems(reply_list):
setattr(reviews_id_map[reply_id], key, replies)
# Get all the file attachments and screenshots and build a couple maps,
# so we can easily associate those objects in comments.
#
# Note that we're fetching inactive file attachments and screenshots.
# is because any file attachments/screenshots created after the initial
# creation of the review request that were later removed will still need
# to be rendered as an added file in a change box.
file_attachments = []
inactive_file_attachments = []
screenshots = []
inactive_screenshots = []
for attachment in review_request_details.get_file_attachments():
attachment._comments = []
file_attachments.append(attachment)
for attachment in review_request_details.get_inactive_file_attachments():
attachment._comments = []
inactive_file_attachments.append(attachment)
for screenshot in review_request_details.get_screenshots():
screenshot._comments = []
screenshots.append(screenshot)
for screenshot in review_request_details.get_inactive_screenshots():
screenshot._comments = []
inactive_screenshots.append(screenshot)
file_attachment_id_map = _build_id_map(file_attachments)
file_attachment_id_map.update(_build_id_map(inactive_file_attachments))
screenshot_id_map = _build_id_map(screenshots)
screenshot_id_map.update(_build_id_map(inactive_screenshots))
issues = {
'total': 0,
'open': 0,
'resolved': 0,
'dropped': 0
}
# Get all the comments and attach them to the reviews.
for model, key, ordering in (
(Comment, 'diff_comments',
('comment__filediff', 'comment__first_line', 'comment__timestamp')),
(ScreenshotComment, 'screenshot_comments', None),
(FileAttachmentComment, 'file_attachment_comments', None)):
# Due to how we initially made the schema, we have a ManyToManyField
# inbetween comments and reviews, instead of comments having a
# ForeignKey to the review. This makes it difficult to easily go
# from a comment to a review ID.
#
# The solution to this is to not query the comment objects, but rather
# the through table. This will let us grab the review and comment in
# one go, using select_related.
related_field = model.review.related.field
comment_field_name = related_field.m2m_reverse_field_name()
through = related_field.rel.through
q = through.objects.filter(review__in=review_ids).select_related()
if ordering:
q = q.order_by(*ordering)
objs = list(q)
# Two passes. One to build a mapping, and one to actually process
# comments.
comment_map = {}
for obj in objs:
comment = getattr(obj, comment_field_name)
comment_map[comment.pk] = comment
comment._replies = []
for obj in objs:
comment = getattr(obj, comment_field_name)
# Short-circuit some object fetches for the comment by setting
# some internal state on them.
assert obj.review_id in reviews_id_map
parent_review = reviews_id_map[obj.review_id]
comment._review = parent_review
comment._review_request = review_request
# If the comment has an associated object that we've already
# queried, attach it to prevent a future lookup.
if isinstance(comment, ScreenshotComment):
if comment.screenshot_id in screenshot_id_map:
screenshot = screenshot_id_map[comment.screenshot_id]
comment.screenshot = screenshot
screenshot._comments.append(comment)
elif isinstance(comment, FileAttachmentComment):
if comment.file_attachment_id in file_attachment_id_map:
file_attachment = \
file_attachment_id_map[comment.file_attachment_id]
comment.file_attachment = file_attachment
file_attachment._comments.append(comment)
diff_against_id = comment.diff_against_file_attachment_id
if diff_against_id in file_attachment_id_map:
file_attachment = file_attachment_id_map[diff_against_id]
comment.diff_against_file_attachment = file_attachment
uncollapse = False
if parent_review.is_reply():
# This is a reply to a comment. Add it to the list of replies.
assert obj.review_id not in reviews_entry_map
assert parent_review.base_reply_to_id in reviews_entry_map
# If there's an entry that isn't a reply, then it's
# orphaned. Ignore it.
if comment.is_reply():
replied_comment = comment_map[comment.reply_to_id]
replied_comment._replies.append(comment)
if not parent_review.public:
uncollapse = True
elif parent_review.public:
# This is a comment on a public review we're going to show.
# Add it to the list.
assert obj.review_id in reviews_entry_map
entry = reviews_entry_map[obj.review_id]
entry['comments'][key].append(comment)
if comment.issue_opened:
status_key = \
comment.issue_status_to_string(comment.issue_status)
issues[status_key] += 1
issues['total'] += 1
entry['has_issues'] = True
if comment.issue_status == BaseComment.OPEN:
entry['issue_open_count'] += 1
if review_request.submitter == request.user:
uncollapse = True
# If the box was collapsed, uncollapse it.
if uncollapse and entry['collapsed']:
entry['class'] = ''
entry['collapsed'] = False
# Sort all the reviews and ChangeDescriptions into a single list, for
# display.
for changedesc in changedescs:
# Process the list of fields, in order by fieldset. These will be
# put into groups composed of inline vs. full-width field values,
# for render into the box.
fields_changed_groups = []
cur_field_changed_group = None
fieldsets = get_review_request_fieldsets(
include_main=True,
include_change_entries_only=True)
for fieldset in fieldsets:
for field_cls in fieldset.field_classes:
field_id = field_cls.field_id
if field_id not in changedesc.fields_changed:
continue
inline = field_cls.change_entry_renders_inline
if (not cur_field_changed_group or
cur_field_changed_group['inline'] != inline):
# Begin a new group of fields.
cur_field_changed_group = {
'inline': inline,
'fields': [],
}
fields_changed_groups.append(cur_field_changed_group)
if hasattr(field_cls, 'locals_vars'):
field = field_cls(review_request, request=request,
locals_vars=locals())
else:
field = field_cls(review_request, request=request)
cur_field_changed_group['fields'] += \
field.get_change_entry_sections_html(
changedesc.fields_changed[field_id])
# See if the review request has had a status change.
status_change = changedesc.fields_changed.get('status')
if status_change:
assert 'new' in status_change
new_status = status_to_string(status_change['new'][0])
else:
new_status = None
# Mark as collapsed if the change is older than a newer change
if latest_timestamp and changedesc.timestamp < latest_timestamp:
state = 'collapsed'
collapsed = True
else:
state = ''
collapsed = False
entries.append({
'new_status': new_status,
'fields_changed_groups': fields_changed_groups,
'changedesc': changedesc,
'timestamp': changedesc.timestamp,
'class': state,
'collapsed': collapsed,
})
entries.sort(key=lambda item: item['timestamp'])
close_description, close_description_rich_text = \
review_request.get_close_description()
latest_file_attachments = _get_latest_file_attachments(file_attachments)
siteconfig = SiteConfiguration.objects.get_current()
context_data = make_review_request_context(request, review_request, {
'blocks': blocks,
'draft': draft,
'review_request_details': review_request_details,
'review_request_visit': visited,
'send_email': siteconfig.get('mail_send_review_mail'),
'entries': entries,
'last_activity_time': last_activity_time,
'review': pending_review,
'request': request,
'close_description': close_description,
'close_description_rich_text': close_description_rich_text,
'issues': issues,
'has_diffs': (draft and draft.diffset_id) or len(diffsets) > 0,
'file_attachments': latest_file_attachments,
'all_file_attachments': file_attachments,
'screenshots': screenshots,
})
response = render_to_response(template_name,
RequestContext(request, context_data))
set_etag(response, etag)
return response
class ReviewsDiffViewerView(DiffViewerView):
"""Renders the diff viewer for a review request.
This wraps the base DiffViewerView to display a diff for the given
review request and the given diff revision or range.
The view expects the following parameters to be provided:
* review_request_id
- The ID of the ReviewRequest containing the diff to render.
The following may also be provided:
* revision
- The DiffSet revision to render.
* interdiff_revision
- The second DiffSet revision in an interdiff revision range.
* local_site
- The LocalSite the ReviewRequest must be on, if any.
See DiffViewerView's documentation for the accepted query parameters.
"""
@method_decorator(check_login_required)
@method_decorator(check_local_site_access)
@augment_method_from(DiffViewerView)
def dispatch(self, *args, **kwargs):
pass
def get(self, request, review_request_id, revision=None,
interdiff_revision=None, local_site=None):
"""Handles GET requests for this view.
This will look up the review request and DiffSets, given the
provided information, and pass them to the parent class for rendering.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
self.review_request = review_request
self.draft = review_request.get_draft(request.user)
self.diffset = _query_for_diff(review_request, request.user,
revision, self.draft)
self.interdiffset = None
if interdiff_revision and interdiff_revision != revision:
# An interdiff revision was specified. Try to find a matching
# diffset.
self.interdiffset = _query_for_diff(review_request, request.user,
interdiff_revision, self.draft)
return super(ReviewsDiffViewerView, self).get(
request, self.diffset, self.interdiffset)
def get_context_data(self, *args, **kwargs):
"""Calculates additional context data for rendering.
This provides some additional data used for rendering the diff
viewer. This data is more specific to the reviewing functionality,
as opposed to the data calculated by DiffViewerView.get_context_data,
which is more focused on the actual diff.
"""
# Try to find an existing pending review of this diff from the
# current user.
pending_review = \
self.review_request.get_pending_review(self.request.user)
has_draft_diff = self.draft and self.draft.diffset
is_draft_diff = has_draft_diff and self.draft.diffset == self.diffset
is_draft_interdiff = (has_draft_diff and self.interdiffset and
self.draft.diffset == self.interdiffset)
# Get the list of diffsets. We only want to calculate this once.
diffsets = self.review_request.get_diffsets()
num_diffs = len(diffsets)
if num_diffs > 0:
latest_diffset = diffsets[-1]
else:
latest_diffset = None
if self.draft and self.draft.diffset:
num_diffs += 1
last_activity_time, updated_object = \
self.review_request.get_last_activity(diffsets)
file_attachments = list(self.review_request.get_file_attachments())
screenshots = list(self.review_request.get_screenshots())
latest_file_attachments = \
_get_latest_file_attachments(file_attachments)
# Compute the lists of comments based on filediffs and interfilediffs.
# We do this using the 'through' table so that we can select_related
# the reviews and comments.
comments = {}
q = Comment.review.related.field.rel.through.objects.filter(
review__review_request=self.review_request)
q = q.select_related()
for obj in q:
comment = obj.comment
comment._review = obj.review
key = (comment.filediff_id, comment.interfilediff_id)
comments.setdefault(key, []).append(comment)
close_description, close_description_rich_text = \
self.review_request.get_close_description()
context = super(ReviewsDiffViewerView, self).get_context_data(
*args, **kwargs)
siteconfig = SiteConfiguration.objects.get_current()
context.update({
'close_description': close_description,
'close_description_rich_text': close_description_rich_text,
'diffsets': diffsets,
'latest_diffset': latest_diffset,
'review': pending_review,
'review_request_details': self.draft or self.review_request,
'draft': self.draft,
'last_activity_time': last_activity_time,
'file_attachments': latest_file_attachments,
'all_file_attachments': file_attachments,
'screenshots': screenshots,
'comments': comments,
'send_email': siteconfig.get('mail_send_review_mail'),
})
context.update(
make_review_request_context(self.request, self.review_request))
diffset_pair = context['diffset_pair']
context['diff_context'].update({
'num_diffs': num_diffs,
'comments_hint': {
'has_other_comments': has_comments_in_diffsets_excluding(
pending_review, diffset_pair),
'diffsets_with_comments': [
{
'revision': diffset_info['diffset'].revision,
'is_current': diffset_info['is_current'],
}
for diffset_info in diffsets_with_comments(
pending_review, diffset_pair)
],
'interdiffs_with_comments': [
{
'old_revision': pair['diffset'].revision,
'new_revision': pair['interdiff'].revision,
'is_current': pair['is_current'],
}
for pair in interdiffs_with_comments(
pending_review, diffset_pair)
],
},
})
context['diff_context']['revision'].update({
'latest_revision': (latest_diffset.revision
if latest_diffset else None),
'is_draft_diff': is_draft_diff,
'is_draft_interdiff': is_draft_interdiff,
})
files = []
for f in context['files']:
filediff = f['filediff']
interfilediff = f['interfilediff']
data = {
'newfile': f['newfile'],
'binary': f['binary'],
'deleted': f['deleted'],
'id': f['filediff'].pk,
'depot_filename': f['depot_filename'],
'dest_filename': f['dest_filename'],
'dest_revision': f['dest_revision'],
'revision': f['revision'],
'filediff': {
'id': filediff.id,
'revision': filediff.diffset.revision,
},
'index': f['index'],
'comment_counts': comment_counts(self.request.user, comments,
filediff, interfilediff),
}
if interfilediff:
data['interfilediff'] = {
'id': interfilediff.id,
'revision': interfilediff.diffset.revision,
}
if f['force_interdiff']:
data['force_interdiff'] = True
data['interdiff_revision'] = f['force_interdiff_revision']
files.append(data)
context['diff_context']['files'] = files
return context
@check_login_required
@check_local_site_access
def raw_diff(request, review_request_id, revision=None, local_site=None):
"""
Displays a raw diff of all the filediffs in a diffset for the
given review request.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
draft = review_request.get_draft(request.user)
diffset = _query_for_diff(review_request, request.user, revision, draft)
tool = review_request.repository.get_scmtool()
data = tool.get_parser('').raw_diff(diffset)
resp = HttpResponse(data, content_type='text/x-patch')
if diffset.name == 'diff':
filename = "rb%d.patch" % review_request.display_id
else:
filename = six.text_type(diffset.name).encode('ascii', 'ignore')
# Content-Disposition headers containing commas break on Chrome 16 and
# newer. To avoid this, replace any commas in the filename with an
# underscore. Was bug 3704.
filename = filename.replace(',', '_')
resp['Content-Disposition'] = 'attachment; filename=%s' % filename
set_last_modified(resp, diffset.timestamp)
return resp
@check_login_required
@check_local_site_access
def comment_diff_fragments(
request,
review_request_id,
comment_ids,
template_name='reviews/load_diff_comment_fragments.js',
comment_template_name='reviews/diff_comment_fragment.html',
error_template_name='diffviewer/diff_fragment_error.html',
local_site=None):
"""
Returns the fragment representing the parts of a diff referenced by the
specified list of comment IDs. This is used to allow batch lazy-loading
of these diff fragments based on filediffs, since they may not be cached
and take time to generate.
"""
comments = get_list_or_404(Comment, pk__in=comment_ids.split(","))
latest_timestamp = get_latest_timestamp(comment.timestamp
for comment in comments)
etag = encode_etag(
'%s:%s:%s'
% (comment_ids, latest_timestamp, settings.TEMPLATE_SERIAL))
if etag_if_none_match(request, etag):
response = HttpResponseNotModified()
else:
# While we don't actually need the review request, we still want to do
# this lookup in order to get the permissions checking.
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
lines_of_context = request.GET.get('lines_of_context', '0,0')
container_prefix = request.GET.get('container_prefix')
try:
lines_of_context = [int(i) for i in lines_of_context.split(',')]
# Ensure that we have 2 values for lines_of_context. If only one is
# given, assume it is both the before and after context. If more than
# two are given, only consider the first two. If somehow we get no
# lines of context value, we will default to [0, 0].
if len(lines_of_context) == 1:
lines_of_context.append(lines_of_context[0])
elif len(lines_of_context) > 2:
lines_of_context = lines_of_context[0:2]
elif len(lines_of_context) == 0:
raise ValueError
except ValueError:
lines_of_context = [0, 0]
context = RequestContext(request, {
'comment_entries': [],
'container_prefix': container_prefix,
'queue_name': request.GET.get('queue'),
'show_controls': request.GET.get('show_controls', False),
})
had_error, context['comment_entries'] = (
build_diff_comment_fragments(
comments,
context,
comment_template_name,
error_template_name,
lines_of_context=lines_of_context,
show_controls='draft' not in container_prefix))
page_content = render_to_string(template_name, context)
response = HttpResponse(
page_content,
content_type='application/javascript')
if had_error:
return response
set_etag(response, etag)
response['Expires'] = http_date(time.time() + 60 * 60 * 24 * 365) # 1 year
return response
class ReviewsDiffFragmentView(DiffFragmentView):
"""Renders a fragment from a file in the diff viewer.
Displays just a fragment of a diff or interdiff owned by the given
review request. The fragment is identified by the chunk index in the
diff.
The view expects the following parameters to be provided:
* review_request_id
- The ID of the ReviewRequest containing the diff to render.
* revision
- The DiffSet revision to render.
* filediff_id
- The ID of the FileDiff within the DiffSet.
The following may also be provided:
* interdiff_revision
- The second DiffSet revision in an interdiff revision range.
* chunkindex
- The index (0-based) of the chunk to render. If left out, the
entire file will be rendered.
* local_site
- The LocalSite the ReviewRequest must be on, if any.
See DiffFragmentView's documentation for the accepted query parameters.
"""
@method_decorator(check_login_required)
@method_decorator(check_local_site_access)
@augment_method_from(DiffFragmentView)
def dispatch(self, *args, **kwargs):
pass
def process_diffset_info(self, review_request_id, revision,
interdiff_revision=None, local_site=None,
*args, **kwargs):
"""Process and return information on the desired diff.
The diff IDs and other data passed to the view can be processed and
converted into DiffSets. A dictionary with the DiffSet and FileDiff
information will be returned.
If the review request cannot be accessed by the user, an HttpResponse
will be returned instead.
"""
self.review_request, response = \
_find_review_request(self.request, review_request_id, local_site)
if not self.review_request:
return response
user = self.request.user
draft = self.review_request.get_draft(user)
if interdiff_revision is not None:
interdiffset = _query_for_diff(self.review_request, user,
interdiff_revision, draft)
else:
interdiffset = None
diffset = _query_for_diff(self.review_request, user, revision, draft)
return super(ReviewsDiffFragmentView, self).process_diffset_info(
diffset_or_id=diffset,
interdiffset_or_id=interdiffset,
**kwargs)
def create_renderer(self, diff_file, *args, **kwargs):
"""Creates the DiffRenderer for this fragment.
This will augment the renderer for binary files by looking up
file attachments, if review UIs are involved, disabling caching.
"""
renderer = super(ReviewsDiffFragmentView, self).create_renderer(
diff_file=diff_file, *args, **kwargs)
if diff_file['binary']:
# Determine the file attachments to display in the diff viewer,
# if any.
filediff = diff_file['filediff']
interfilediff = diff_file['interfilediff']
orig_attachment = None
modified_attachment = None
if diff_file['force_interdiff']:
orig_attachment = self._get_diff_file_attachment(filediff)
modified_attachment = \
self._get_diff_file_attachment(interfilediff)
else:
modified_attachment = self._get_diff_file_attachment(filediff)
if not diff_file['is_new_file']:
orig_attachment = \
self._get_diff_file_attachment(filediff, False)
diff_review_ui = None
diff_review_ui_html = None
orig_review_ui = None
orig_review_ui_html = None
modified_review_ui = None
modified_review_ui_html = None
if orig_attachment:
orig_review_ui = orig_attachment.review_ui
if modified_attachment:
modified_review_ui = modified_attachment.review_ui
# See if we're able to generate a diff review UI for these files.
if (orig_review_ui and modified_review_ui and
orig_review_ui.__class__ is modified_review_ui.__class__ and
modified_review_ui.supports_diffing):
# Both files are able to be diffed by this review UI.
# We'll display a special diff review UI instead of two
# side-by-side review UIs.
diff_review_ui = modified_review_ui
diff_review_ui.set_diff_against(orig_attachment)
diff_review_ui_html = \
self._render_review_ui(diff_review_ui, False)
else:
# We won't be showing a diff of these files. Instead, just
# grab the review UIs and render them.
orig_review_ui_html = \
self._render_review_ui(orig_review_ui)
modified_review_ui_html = \
self._render_review_ui(modified_review_ui)
if (diff_review_ui_html or orig_review_ui_html or
modified_review_ui_html):
# Don't cache the view, because the Review UI may care about
# state that we can't anticipate. At the least, it may have
# comments or other data that change between renders, and we
# don't want that to go stale.
renderer.allow_caching = False
renderer.extra_context.update({
'orig_diff_file_attachment': orig_attachment,
'modified_diff_file_attachment': modified_attachment,
'orig_attachment_review_ui_html': orig_review_ui_html,
'modified_attachment_review_ui_html': modified_review_ui_html,
'diff_attachment_review_ui_html': diff_review_ui_html,
})
renderer.extra_context.update(
self._get_download_links(renderer, diff_file))
return renderer
def get_context_data(self, **kwargs):
return {
'review_request': self.review_request,
}
def _get_download_links(self, renderer, diff_file):
if diff_file['binary']:
orig_attachment = \
renderer.extra_context['orig_diff_file_attachment']
modified_attachment = \
renderer.extra_context['modified_diff_file_attachment']
if orig_attachment:
download_orig_url = orig_attachment.get_absolute_url()
else:
download_orig_url = None
if modified_attachment:
download_modified_url = modified_attachment.get_absolute_url()
else:
download_modified_url = None
else:
filediff = diff_file['filediff']
interfilediff = diff_file['interfilediff']
diffset = filediff.diffset
if interfilediff:
orig_url_name = 'download-modified-file'
modified_revision = interfilediff.diffset.revision
modified_filediff_id = interfilediff.pk
else:
orig_url_name = 'download-orig-file'
modified_revision = diffset.revision
modified_filediff_id = filediff.pk
download_orig_url = local_site_reverse(
orig_url_name,
request=self.request,
kwargs={
'review_request_id': self.review_request.display_id,
'revision': diffset.revision,
'filediff_id': filediff.pk,
})
download_modified_url = local_site_reverse(
'download-modified-file',
request=self.request,
kwargs={
'review_request_id': self.review_request.display_id,
'revision': modified_revision,
'filediff_id': modified_filediff_id,
})
return {
'download_orig_url': download_orig_url,
'download_modified_url': download_modified_url,
}
def _render_review_ui(self, review_ui, inline_only=True):
"""Renders the review UI for a file attachment."""
if review_ui and (not inline_only or review_ui.allow_inline):
return mark_safe(review_ui.render_to_string(self.request))
return None
def _get_diff_file_attachment(self, filediff, use_modified=True):
"""Fetch the FileAttachment associated with a FileDiff.
This will query for the FileAttachment based on the provided filediff,
and set the retrieved diff file attachment to a variable whose name is
provided as an argument to this tag.
If 'use_modified' is True, the FileAttachment returned will be from the
modified version of the new file. Otherwise, it's the original file
that's being modified.
If no matching FileAttachment is found or if there is more than one
FileAttachment associated with one FileDiff, None is returned. An error
is logged in the latter case.
"""
if not filediff:
return None
try:
return FileAttachment.objects.get_for_filediff(filediff,
use_modified)
except ObjectDoesNotExist:
return None
except MultipleObjectsReturned:
# Only one FileAttachment should be associated with a FileDiff
logging.error('More than one FileAttachments associated with '
'FileDiff %s',
filediff.pk,
exc_info=1)
return None
@check_login_required
@check_local_site_access
def preview_review_request_email(
request,
review_request_id,
format,
text_template_name='notifications/review_request_email.txt',
html_template_name='notifications/review_request_email.html',
changedesc_id=None,
local_site=None):
"""
Previews the e-mail message that would be sent for an initial
review request or an update.
This is mainly used for debugging.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
extra_context = {}
if changedesc_id:
changedesc = get_object_or_404(ChangeDescription, pk=changedesc_id)
extra_context['change_text'] = changedesc.text
extra_context['changes'] = changedesc.fields_changed
siteconfig = SiteConfiguration.objects.get_current()
if format == 'text':
template_name = text_template_name
mimetype = 'text/plain'
elif format == 'html':
template_name = html_template_name
mimetype = 'text/html'
else:
raise Http404
return HttpResponse(render_to_string(
template_name,
RequestContext(request, dict({
'review_request': review_request,
'user': request.user,
'domain': Site.objects.get_current().domain,
'domain_method': siteconfig.get("site_domain_method"),
}, **extra_context)),
), content_type=mimetype)
@check_login_required
@check_local_site_access
def preview_review_email(request, review_request_id, review_id, format,
text_template_name='notifications/review_email.txt',
html_template_name='notifications/review_email.html',
extra_context={},
local_site=None):
"""
Previews the e-mail message that would be sent for a review of a
review request.
This is mainly used for debugging.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
review = get_object_or_404(Review, pk=review_id,
review_request=review_request)
siteconfig = SiteConfiguration.objects.get_current()
review.ordered_comments = \
review.comments.order_by('filediff', 'first_line')
if format == 'text':
template_name = text_template_name
mimetype = 'text/plain'
elif format == 'html':
template_name = html_template_name
mimetype = 'text/html'
else:
raise Http404
context = {
'review_request': review_request,
'review': review,
'user': request.user,
'domain': Site.objects.get_current().domain,
'domain_method': siteconfig.get("site_domain_method"),
}
context.update(extra_context)
has_error, context['comment_entries'] = \
build_diff_comment_fragments(
review.ordered_comments, context,
"notifications/email_diff_comment_fragment.html")
return HttpResponse(
render_to_string(template_name, RequestContext(request, context)),
content_type=mimetype)
@check_login_required
@check_local_site_access
def preview_reply_email(request, review_request_id, review_id, reply_id,
format,
text_template_name='notifications/reply_email.txt',
html_template_name='notifications/reply_email.html',
local_site=None):
"""
Previews the e-mail message that would be sent for a reply to a
review of a review request.
This is mainly used for debugging.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
review = get_object_or_404(Review, pk=review_id,
review_request=review_request)
reply = get_object_or_404(Review, pk=reply_id, base_reply_to=review)
siteconfig = SiteConfiguration.objects.get_current()
reply.ordered_comments = \
reply.comments.order_by('filediff', 'first_line')
if format == 'text':
template_name = text_template_name
mimetype = 'text/plain'
elif format == 'html':
template_name = html_template_name
mimetype = 'text/html'
else:
raise Http404
context = {
'review_request': review_request,
'review': review,
'reply': reply,
'user': request.user,
'domain': Site.objects.get_current().domain,
'domain_method': siteconfig.get("site_domain_method"),
}
has_error, context['comment_entries'] = \
build_diff_comment_fragments(
reply.ordered_comments, context,
"notifications/email_diff_comment_fragment.html")
return HttpResponse(
render_to_string(template_name, RequestContext(request, context)),
content_type=mimetype)
@check_login_required
@check_local_site_access
def review_file_attachment(request, review_request_id, file_attachment_id,
file_attachment_diff_id=None, local_site=None):
"""Displays a file attachment with a review UI."""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
file_attachment = get_object_or_404(FileAttachment, pk=file_attachment_id)
review_ui = file_attachment.review_ui
if not review_ui:
review_ui = FileAttachmentReviewUI(review_request, file_attachment)
if file_attachment_diff_id:
file_attachment_revision = get_object_or_404(
FileAttachment.objects.filter(
attachment_history=file_attachment.attachment_history),
pk=file_attachment_diff_id)
review_ui.set_diff_against(file_attachment_revision)
try:
is_enabled_for = review_ui.is_enabled_for(
user=request.user,
review_request=review_request,
file_attachment=file_attachment)
except Exception as e:
logging.error('Error when calling is_enabled_for for '
'FileAttachmentReviewUI %r: %s',
review_ui, e, exc_info=1)
is_enabled_for = False
if review_ui and is_enabled_for:
return review_ui.render_to_response(request)
else:
raise Http404
@check_login_required
@check_local_site_access
def view_screenshot(request, review_request_id, screenshot_id,
local_site=None):
"""
Displays a screenshot, along with any comments that were made on it.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
screenshot = get_object_or_404(Screenshot, pk=screenshot_id)
review_ui = LegacyScreenshotReviewUI(review_request, screenshot)
return review_ui.render_to_response(request)
@check_login_required
@check_local_site_access
def user_infobox(request, username,
template_name='accounts/user_infobox.html',
local_site=None):
"""Displays a user info popup.
This is meant to be embedded in other pages, rather than being
a standalone page.
"""
user = get_object_or_404(User, username=username)
show_profile = user.is_profile_visible(request.user)
etag = encode_etag(':'.join([
user.first_name,
user.last_name,
user.email,
six.text_type(user.last_login),
six.text_type(settings.TEMPLATE_SERIAL),
six.text_type(show_profile)
]))
if etag_if_none_match(request, etag):
return HttpResponseNotModified()
response = render_to_response(template_name, RequestContext(request, {
'show_profile': show_profile,
'requested_user': user,
}))
set_etag(response, etag)
return response
def bug_url(request, review_request_id, bug_id, local_site=None):
"""Redirects user to bug tracker issue page."""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
return HttpResponseRedirect(review_request.repository.bug_tracker % bug_id)
def bug_infobox(request, review_request_id, bug_id,
template_name='reviews/bug_infobox.html',
local_site=None):
"""Displays a bug info popup.
This is meant to be embedded in other pages, rather than being
a standalone page.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
repository = review_request.repository
bug_tracker = repository.bug_tracker_service
if not bug_tracker:
return HttpResponseNotFound(_('Unable to find bug tracker service'))
if not isinstance(bug_tracker, BugTracker):
return HttpResponseNotFound(
_('Bug tracker %s does not support metadata') % bug_tracker.name)
bug_info = bug_tracker.get_bug_info(repository, bug_id)
bug_description = bug_info['description']
bug_summary = bug_info['summary']
bug_status = bug_info['status']
if not bug_summary and not bug_description:
return HttpResponseNotFound(
_('No bug metadata found for bug %(bug_id)s on bug tracker '
'%(bug_tracker)s') % {
'bug_id': bug_id,
'bug_tracker': bug_tracker.name,
})
# Don't do anything for single newlines, but treat two newlines as a
# paragraph break.
escaped_description = escape(bug_description).replace('\n\n', '<br/><br/>')
return render_to_response(template_name, RequestContext(request, {
'bug_id': bug_id,
'bug_description': mark_safe(escaped_description),
'bug_status': bug_status,
'bug_summary': bug_summary
}))
def _download_diff_file(modified, request, review_request_id, revision,
filediff_id, local_site=None):
"""Downloads an original or modified file from a diff.
This will fetch the file from a FileDiff, optionally patching it,
and return the result as an HttpResponse.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site)
if not review_request:
return response
draft = review_request.get_draft(request.user)
diffset = _query_for_diff(review_request, request.user, revision, draft)
filediff = get_object_or_404(diffset.files, pk=filediff_id)
encoding_list = diffset.repository.get_encoding_list()
data = get_original_file(filediff, request, encoding_list)
if modified:
data = get_patched_file(data, filediff, request)
data = convert_to_unicode(data, encoding_list)[1]
return HttpResponse(data, content_type='text/plain; charset=utf-8')
@check_login_required
@check_local_site_access
def download_orig_file(*args, **kwargs):
"""Downloads an original file from a diff."""
return _download_diff_file(False, *args, **kwargs)
@check_login_required
@check_local_site_access
def download_modified_file(*args, **kwargs):
"""Downloads a modified file from a diff."""
return _download_diff_file(True, *args, **kwargs)
| mit |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/pandas/io/tests/test_pickle.py | 7 | 10831 | # pylint: disable=E1101,E1103,W0232
""" manage legacy pickle tests """
import nose
import os
from distutils.version import LooseVersion
import pandas as pd
from pandas import Index
from pandas.compat import u, is_platform_little_endian
import pandas
import pandas.util.testing as tm
from pandas.tseries.offsets import Day, MonthEnd
class TestPickle():
"""
How to add pickle tests:
1. Install pandas version intended to output the pickle.
2. Execute "generate_legacy_storage_files.py" to create the pickle.
$ python generate_legacy_storage_files.py <output_dir> pickle
3. Move the created pickle to "data/legacy_pickle/<version>" directory.
NOTE: TestPickle can't be a subclass of tm.Testcase to use test generator.
http://stackoverflow.com/questions/6689537/
nose-test-generators-inside-class
"""
_multiprocess_can_split_ = True
def setUp(self):
from pandas.io.tests.generate_legacy_storage_files import (
create_pickle_data)
self.data = create_pickle_data()
self.path = u('__%s__.pickle' % tm.rands(10))
def compare_element(self, result, expected, typ, version=None):
if isinstance(expected, Index):
tm.assert_index_equal(expected, result)
return
if typ.startswith('sp_'):
comparator = getattr(tm, "assert_%s_equal" % typ)
comparator(result, expected, exact_indices=False)
elif typ == 'timestamp':
if expected is pd.NaT:
assert result is pd.NaT
else:
tm.assert_equal(result, expected)
tm.assert_equal(result.freq, expected.freq)
else:
comparator = getattr(tm, "assert_%s_equal" %
typ, tm.assert_almost_equal)
comparator(result, expected)
def compare(self, vf, version):
# py3 compat when reading py2 pickle
try:
data = pandas.read_pickle(vf)
except (ValueError) as e:
if 'unsupported pickle protocol:' in str(e):
# trying to read a py3 pickle in py2
return
else:
raise
for typ, dv in data.items():
for dt, result in dv.items():
try:
expected = self.data[typ][dt]
except (KeyError):
if version in ('0.10.1', '0.11.0') and dt == 'reg':
break
else:
raise
# use a specific comparator
# if available
comparator = "compare_{typ}_{dt}".format(typ=typ, dt=dt)
comparator = getattr(self, comparator, self.compare_element)
comparator(result, expected, typ, version)
return data
def compare_sp_series_ts(self, res, exp, typ, version):
# SparseTimeSeries integrated into SparseSeries in 0.12.0
# and deprecated in 0.17.0
if version and LooseVersion(version) <= "0.12.0":
tm.assert_sp_series_equal(res, exp, check_series_type=False)
else:
tm.assert_sp_series_equal(res, exp)
def compare_series_ts(self, result, expected, typ, version):
# GH 7748
tm.assert_series_equal(result, expected)
tm.assert_equal(result.index.freq, expected.index.freq)
tm.assert_equal(result.index.freq.normalize, False)
tm.assert_series_equal(result > 0, expected > 0)
# GH 9291
freq = result.index.freq
tm.assert_equal(freq + Day(1), Day(2))
res = freq + pandas.Timedelta(hours=1)
tm.assert_equal(isinstance(res, pandas.Timedelta), True)
tm.assert_equal(res, pandas.Timedelta(days=1, hours=1))
res = freq + pandas.Timedelta(nanoseconds=1)
tm.assert_equal(isinstance(res, pandas.Timedelta), True)
tm.assert_equal(res, pandas.Timedelta(days=1, nanoseconds=1))
def compare_series_dt_tz(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < '0.17.0':
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
def compare_series_cat(self, result, expected, typ, version):
# Categorical dtype is added in 0.15.0
# ordered is changed in 0.16.0
if LooseVersion(version) < '0.15.0':
tm.assert_series_equal(result, expected, check_dtype=False,
check_categorical=False)
elif LooseVersion(version) < '0.16.0':
tm.assert_series_equal(result, expected, check_categorical=False)
else:
tm.assert_series_equal(result, expected)
def compare_frame_dt_mixed_tzs(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < '0.17.0':
expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
def compare_frame_cat_onecol(self, result, expected, typ, version):
# Categorical dtype is added in 0.15.0
# ordered is changed in 0.16.0
if LooseVersion(version) < '0.15.0':
tm.assert_frame_equal(result, expected, check_dtype=False,
check_categorical=False)
elif LooseVersion(version) < '0.16.0':
tm.assert_frame_equal(result, expected, check_categorical=False)
else:
tm.assert_frame_equal(result, expected)
def compare_frame_cat_and_float(self, result, expected, typ, version):
self.compare_frame_cat_onecol(result, expected, typ, version)
def compare_index_period(self, result, expected, typ, version):
tm.assert_index_equal(result, expected)
tm.assertIsInstance(result.freq, MonthEnd)
tm.assert_equal(result.freq, MonthEnd())
tm.assert_equal(result.freqstr, 'M')
tm.assert_index_equal(result.shift(2), expected.shift(2))
def compare_sp_frame_float(self, result, expected, typ, version):
if LooseVersion(version) <= '0.18.1':
tm.assert_sp_frame_equal(result, expected, exact_indices=False,
check_dtype=False)
else:
tm.assert_sp_frame_equal(result, expected)
def read_pickles(self, version):
if not is_platform_little_endian():
raise nose.SkipTest("known failure on non-little endian")
pth = tm.get_data_path('legacy_pickle/{0}'.format(str(version)))
n = 0
for f in os.listdir(pth):
vf = os.path.join(pth, f)
data = self.compare(vf, version)
if data is None:
continue
n += 1
assert n > 0, 'Pickle files are not tested'
def test_pickles(self):
pickle_path = tm.get_data_path('legacy_pickle')
n = 0
for v in os.listdir(pickle_path):
pth = os.path.join(pickle_path, v)
if os.path.isdir(pth):
yield self.read_pickles, v
n += 1
assert n > 0, 'Pickle files are not tested'
def test_round_trip_current(self):
try:
import cPickle as c_pickle
def c_pickler(obj, path):
with open(path, 'wb') as fh:
c_pickle.dump(obj, fh, protocol=-1)
def c_unpickler(path):
with open(path, 'rb') as fh:
fh.seek(0)
return c_pickle.load(fh)
except:
c_pickler = None
c_unpickler = None
import pickle as python_pickle
def python_pickler(obj, path):
with open(path, 'wb') as fh:
python_pickle.dump(obj, fh, protocol=-1)
def python_unpickler(path):
with open(path, 'rb') as fh:
fh.seek(0)
return python_pickle.load(fh)
for typ, dv in self.data.items():
for dt, expected in dv.items():
for writer in [pd.to_pickle, c_pickler, python_pickler]:
if writer is None:
continue
with tm.ensure_clean(self.path) as path:
# test writing with each pickler
writer(expected, path)
# test reading with each unpickler
result = pd.read_pickle(path)
self.compare_element(result, expected, typ)
if c_unpickler is not None:
result = c_unpickler(path)
self.compare_element(result, expected, typ)
result = python_unpickler(path)
self.compare_element(result, expected, typ)
def test_pickle_v0_14_1(self):
# we have the name warning
# 10482
with tm.assert_produces_warning(UserWarning):
cat = pd.Categorical(values=['a', 'b', 'c'],
categories=['a', 'b', 'c', 'd'],
name='foobar', ordered=False)
pickle_path = os.path.join(tm.get_data_path(),
'categorical_0_14_1.pickle')
# This code was executed once on v0.14.1 to generate the pickle:
#
# cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'],
# name='foobar')
# with open(pickle_path, 'wb') as f: pickle.dump(cat, f)
#
tm.assert_categorical_equal(cat, pd.read_pickle(pickle_path))
def test_pickle_v0_15_2(self):
# ordered -> _ordered
# GH 9347
# we have the name warning
# 10482
with tm.assert_produces_warning(UserWarning):
cat = pd.Categorical(values=['a', 'b', 'c'],
categories=['a', 'b', 'c', 'd'],
name='foobar', ordered=False)
pickle_path = os.path.join(tm.get_data_path(),
'categorical_0_15_2.pickle')
# This code was executed once on v0.15.2 to generate the pickle:
#
# cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'],
# name='foobar')
# with open(pickle_path, 'wb') as f: pickle.dump(cat, f)
#
tm.assert_categorical_equal(cat, pd.read_pickle(pickle_path))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# '--with-coverage', '--cover-package=pandas.core'],
exit=False)
| mit |
sbalde/edxplatform | lms/djangoapps/oauth2_handler/tests.py | 57 | 9001 | # pylint: disable=missing-docstring
from django.core.cache import cache
from django.test.utils import override_settings
from lang_pref import LANGUAGE_KEY
from xmodule.modulestore.tests.factories import (check_mongo_calls, CourseFactory)
from student.models import anonymous_id_for_user
from student.models import UserProfile
from student.roles import (CourseInstructorRole, CourseStaffRole, GlobalStaff,
OrgInstructorRole, OrgStaffRole)
from student.tests.factories import UserFactory, UserProfileFactory
from openedx.core.djangoapps.user_api.preferences.api import set_user_preference
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
# Will also run default tests for IDTokens and UserInfo
from oauth2_provider.tests import IDTokenTestCase, UserInfoTestCase
class BaseTestMixin(ModuleStoreTestCase):
profile = None
def setUp(self):
super(BaseTestMixin, self).setUp()
self.course_key = CourseFactory.create(emit_signals=True).id
self.course_id = unicode(self.course_key)
self.user_factory = UserFactory
self.set_user(self.make_user())
def set_user(self, user):
super(BaseTestMixin, self).set_user(user)
self.profile = UserProfileFactory(user=self.user)
class IDTokenTest(BaseTestMixin, IDTokenTestCase):
def setUp(self):
super(IDTokenTest, self).setUp()
# CourseAccessHandler uses the application cache.
cache.clear()
def test_sub_claim(self):
scopes, claims = self.get_id_token_values('openid')
self.assertIn('openid', scopes)
sub = claims['sub']
expected_sub = anonymous_id_for_user(self.user, None)
self.assertEqual(sub, expected_sub)
def test_user_name_claim(self):
_scopes, claims = self.get_id_token_values('openid profile')
claim_name = claims['name']
user_profile = UserProfile.objects.get(user=self.user)
user_name = user_profile.name
self.assertEqual(claim_name, user_name)
@override_settings(LANGUAGE_CODE='en')
def test_user_without_locale_claim(self):
scopes, claims = self.get_id_token_values('openid profile')
self.assertIn('profile', scopes)
self.assertEqual(claims['locale'], 'en')
def test_user_with_locale_claim(self):
language = 'en'
set_user_preference(self.user, LANGUAGE_KEY, language)
scopes, claims = self.get_id_token_values('openid profile')
self.assertIn('profile', scopes)
locale = claims['locale']
self.assertEqual(language, locale)
def test_no_special_course_access(self):
with check_mongo_calls(0):
scopes, claims = self.get_id_token_values('openid course_instructor course_staff')
self.assertNotIn('course_staff', scopes)
self.assertNotIn('staff_courses', claims)
self.assertNotIn('course_instructor', scopes)
self.assertNotIn('instructor_courses', claims)
def test_course_staff_courses(self):
CourseStaffRole(self.course_key).add_users(self.user)
with check_mongo_calls(0):
scopes, claims = self.get_id_token_values('openid course_staff')
self.assertIn('course_staff', scopes)
self.assertNotIn('staff_courses', claims) # should not return courses in id_token
def test_course_instructor_courses(self):
with check_mongo_calls(0):
CourseInstructorRole(self.course_key).add_users(self.user)
scopes, claims = self.get_id_token_values('openid course_instructor')
self.assertIn('course_instructor', scopes)
self.assertNotIn('instructor_courses', claims) # should not return courses in id_token
def test_course_staff_courses_with_claims(self):
CourseStaffRole(self.course_key).add_users(self.user)
course_id = unicode(self.course_key)
nonexistent_course_id = 'some/other/course'
claims = {
'staff_courses': {
'values': [course_id, nonexistent_course_id],
'essential': True,
}
}
with check_mongo_calls(0):
scopes, claims = self.get_id_token_values(scope='openid course_staff', claims=claims)
self.assertIn('course_staff', scopes)
self.assertIn('staff_courses', claims)
self.assertEqual(len(claims['staff_courses']), 1)
self.assertIn(course_id, claims['staff_courses'])
self.assertNotIn(nonexistent_course_id, claims['staff_courses'])
def test_permissions_scope(self):
scopes, claims = self.get_id_token_values('openid profile permissions')
self.assertIn('permissions', scopes)
self.assertFalse(claims['administrator'])
self.user.is_staff = True
self.user.save()
_scopes, claims = self.get_id_token_values('openid profile permissions')
self.assertTrue(claims['administrator'])
class UserInfoTest(BaseTestMixin, UserInfoTestCase):
def setUp(self):
super(UserInfoTest, self).setUp()
# create another course in the DB that only global staff have access to
CourseFactory.create(emit_signals=True)
def token_for_scope(self, scope):
full_scope = 'openid %s' % scope
self.set_access_token_scope(full_scope)
token = self.access_token.token # pylint: disable=no-member
return full_scope, token
def get_with_scope(self, scope):
scope, token = self.token_for_scope(scope)
result, claims = self.get_userinfo(token, scope)
self.assertEqual(result.status_code, 200)
return claims
def get_with_claim_value(self, scope, claim, values):
_full_scope, token = self.token_for_scope(scope)
result, claims = self.get_userinfo(
token,
claims={claim: {'values': values}}
)
self.assertEqual(result.status_code, 200)
return claims
def _assert_role_using_scope(self, scope, claim, assert_one_course=True):
with check_mongo_calls(0):
claims = self.get_with_scope(scope)
self.assertEqual(len(claims), 2)
courses = claims[claim]
self.assertIn(self.course_id, courses)
if assert_one_course:
self.assertEqual(len(courses), 1)
def test_request_global_staff_courses_using_scope(self):
GlobalStaff().add_users(self.user)
self._assert_role_using_scope('course_staff', 'staff_courses', assert_one_course=False)
def test_request_org_staff_courses_using_scope(self):
OrgStaffRole(self.course_key.org).add_users(self.user)
self._assert_role_using_scope('course_staff', 'staff_courses')
def test_request_org_instructor_courses_using_scope(self):
OrgInstructorRole(self.course_key.org).add_users(self.user)
self._assert_role_using_scope('course_instructor', 'instructor_courses')
def test_request_staff_courses_using_scope(self):
CourseStaffRole(self.course_key).add_users(self.user)
self._assert_role_using_scope('course_staff', 'staff_courses')
def test_request_instructor_courses_using_scope(self):
CourseInstructorRole(self.course_key).add_users(self.user)
self._assert_role_using_scope('course_instructor', 'instructor_courses')
def _assert_role_using_claim(self, scope, claim):
values = [self.course_id, 'some_invalid_course']
with check_mongo_calls(0):
claims = self.get_with_claim_value(scope, claim, values)
self.assertEqual(len(claims), 2)
courses = claims[claim]
self.assertIn(self.course_id, courses)
self.assertEqual(len(courses), 1)
def test_request_global_staff_courses_with_claims(self):
GlobalStaff().add_users(self.user)
self._assert_role_using_claim('course_staff', 'staff_courses')
def test_request_org_staff_courses_with_claims(self):
OrgStaffRole(self.course_key.org).add_users(self.user)
self._assert_role_using_claim('course_staff', 'staff_courses')
def test_request_org_instructor_courses_with_claims(self):
OrgInstructorRole(self.course_key.org).add_users(self.user)
self._assert_role_using_claim('course_instructor', 'instructor_courses')
def test_request_staff_courses_with_claims(self):
CourseStaffRole(self.course_key).add_users(self.user)
self._assert_role_using_claim('course_staff', 'staff_courses')
def test_request_instructor_courses_with_claims(self):
CourseInstructorRole(self.course_key).add_users(self.user)
self._assert_role_using_claim('course_instructor', 'instructor_courses')
def test_permissions_scope(self):
claims = self.get_with_scope('permissions')
self.assertIn('administrator', claims)
self.assertFalse(claims['administrator'])
self.user.is_staff = True
self.user.save()
claims = self.get_with_scope('permissions')
self.assertTrue(claims['administrator'])
| agpl-3.0 |
eugene1g/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/multicommandtool_unittest.py | 121 | 7646 | # Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import unittest2 as unittest
from optparse import make_option
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.multicommandtool import MultiCommandTool, Command, TryAgain
class TrivialCommand(Command):
name = "trivial"
show_in_main_help = True
help_text = "help text"
def __init__(self, **kwargs):
Command.__init__(self, **kwargs)
def execute(self, options, args, tool):
pass
class UncommonCommand(TrivialCommand):
name = "uncommon"
show_in_main_help = False
class LikesToRetry(Command):
name = "likes-to-retry"
show_in_main_help = True
help_text = "help text"
def __init__(self, **kwargs):
Command.__init__(self, **kwargs)
self.execute_count = 0
def execute(self, options, args, tool):
self.execute_count += 1
if self.execute_count < 2:
raise TryAgain()
class CommandTest(unittest.TestCase):
def test_name_with_arguments(self):
TrivialCommand.argument_names = "ARG1 ARG2"
command_with_args = TrivialCommand()
self.assertEqual(command_with_args.name_with_arguments(), "trivial ARG1 ARG2")
TrivialCommand.argument_names = None
command_with_args = TrivialCommand(options=[make_option("--my_option")])
self.assertEqual(command_with_args.name_with_arguments(), "trivial [options]")
def test_parse_required_arguments(self):
self.assertEqual(Command._parse_required_arguments("ARG1 ARG2"), ["ARG1", "ARG2"])
self.assertEqual(Command._parse_required_arguments("[ARG1] [ARG2]"), [])
self.assertEqual(Command._parse_required_arguments("[ARG1] ARG2"), ["ARG2"])
# Note: We might make our arg parsing smarter in the future and allow this type of arguments string.
self.assertRaises(Exception, Command._parse_required_arguments, "[ARG1 ARG2]")
def test_required_arguments(self):
TrivialCommand.argument_names = "ARG1 ARG2 [ARG3]"
two_required_arguments = TrivialCommand()
expected_logs = "2 arguments required, 1 argument provided. Provided: 'foo' Required: ARG1 ARG2\nSee 'trivial-tool help trivial' for usage.\n"
exit_code = OutputCapture().assert_outputs(self, two_required_arguments.check_arguments_and_execute, [None, ["foo"], TrivialTool()], expected_logs=expected_logs)
self.assertEqual(exit_code, 1)
TrivialCommand.argument_names = None
class TrivialTool(MultiCommandTool):
def __init__(self, commands=None):
MultiCommandTool.__init__(self, name="trivial-tool", commands=commands)
def path(self):
return __file__
def should_execute_command(self, command):
return (True, None)
class MultiCommandToolTest(unittest.TestCase):
def _assert_split(self, args, expected_split):
self.assertEqual(MultiCommandTool._split_command_name_from_args(args), expected_split)
def test_split_args(self):
# MultiCommandToolTest._split_command_name_from_args returns: (command, args)
full_args = ["--global-option", "command", "--option", "arg"]
full_args_expected = ("command", ["--global-option", "--option", "arg"])
self._assert_split(full_args, full_args_expected)
full_args = []
full_args_expected = (None, [])
self._assert_split(full_args, full_args_expected)
full_args = ["command", "arg"]
full_args_expected = ("command", ["arg"])
self._assert_split(full_args, full_args_expected)
def test_command_by_name(self):
# This also tests Command auto-discovery.
tool = TrivialTool()
self.assertEqual(tool.command_by_name("trivial").name, "trivial")
self.assertEqual(tool.command_by_name("bar"), None)
def _assert_tool_main_outputs(self, tool, main_args, expected_stdout, expected_stderr = "", expected_exit_code=0):
exit_code = OutputCapture().assert_outputs(self, tool.main, [main_args], expected_stdout=expected_stdout, expected_stderr=expected_stderr)
self.assertEqual(exit_code, expected_exit_code)
def test_retry(self):
likes_to_retry = LikesToRetry()
tool = TrivialTool(commands=[likes_to_retry])
tool.main(["tool", "likes-to-retry"])
self.assertEqual(likes_to_retry.execute_count, 2)
def test_global_help(self):
tool = TrivialTool(commands=[TrivialCommand(), UncommonCommand()])
expected_common_commands_help = """Usage: trivial-tool [options] COMMAND [ARGS]
Options:
-h, --help show this help message and exit
Common trivial-tool commands:
trivial help text
See 'trivial-tool help --all-commands' to list all commands.
See 'trivial-tool help COMMAND' for more information on a specific command.
"""
self._assert_tool_main_outputs(tool, ["tool"], expected_common_commands_help)
self._assert_tool_main_outputs(tool, ["tool", "help"], expected_common_commands_help)
expected_all_commands_help = """Usage: trivial-tool [options] COMMAND [ARGS]
Options:
-h, --help show this help message and exit
All trivial-tool commands:
help Display information about this program or its subcommands
trivial help text
uncommon help text
See 'trivial-tool help --all-commands' to list all commands.
See 'trivial-tool help COMMAND' for more information on a specific command.
"""
self._assert_tool_main_outputs(tool, ["tool", "help", "--all-commands"], expected_all_commands_help)
# Test that arguments can be passed before commands as well
self._assert_tool_main_outputs(tool, ["tool", "--all-commands", "help"], expected_all_commands_help)
def test_command_help(self):
TrivialCommand.long_help = "LONG HELP"
command_with_options = TrivialCommand(options=[make_option("--my_option")])
tool = TrivialTool(commands=[command_with_options])
expected_subcommand_help = "trivial [options] help text\n\nLONG HELP\n\nOptions:\n --my_option=MY_OPTION\n\n"
self._assert_tool_main_outputs(tool, ["tool", "help", "trivial"], expected_subcommand_help)
| bsd-3-clause |
lgarren/spack | var/spack/repos/builtin/packages/pathfinder/package.py | 3 | 2190 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Pathfinder(MakefilePackage):
"""Proxy Application. Signature search."""
homepage = "https://mantevo.org/packages/"
url = "http://mantevo.org/downloads/releaseTarballs/miniapps/PathFinder/PathFinder_1.0.0.tgz"
tags = ['proxy-app']
version('1.0.0', '374269e8d42c305eda3e392444e22dde')
build_targets = ['--directory=PathFinder_ref', 'CC=cc']
def edit(self, spec, prefix):
makefile = FileFilter('PathFinder_ref/Makefile')
makefile.filter('-fopenmp', self.compiler.openmp_flag)
def install(self, spec, prefix):
# Manual installation
mkdirp(prefix.bin)
mkdirp(prefix.doc)
install('PathFinder_ref/PathFinder.x', prefix.bin)
install('PathFinder_ref/MicroTestData.adj_list', prefix.bin)
install('README', prefix.doc)
install_tree('generatedData/', prefix.doc.generatedData)
install_tree('scaleData/', prefix.doc.scaleData)
| lgpl-2.1 |
blueboxgroup/cinder | cinder/volume/drivers/san/hp/hp_lefthand_iscsi.py | 3 | 5725 | # (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Volume driver for HP LeftHand Storage array.
This driver requires 11.5 or greater firmware on the LeftHand array, using
the 1.0 or greater version of the hplefthandclient.
You will need to install the python hplefthandclient.
sudo pip install hplefthandclient
Set the following in the cinder.conf file to enable the
LeftHand Channel Driver along with the required flags:
volume_driver=cinder.volume.drivers.san.hp.hp_lefthand_iscsi.
HPLeftHandISCSIDriver
It also requires the setting of hplefthand_api_url, hplefthand_username,
hplefthand_password for credentials to talk to the REST service on the
LeftHand array.
"""
from cinder import exception
from cinder.i18n import _LE, _LI
from cinder.openstack.common import log as logging
from cinder.volume.driver import VolumeDriver
from cinder.volume.drivers.san.hp import hp_lefthand_cliq_proxy as cliq_proxy
from cinder.volume.drivers.san.hp import hp_lefthand_rest_proxy as rest_proxy
LOG = logging.getLogger(__name__)
MIN_CLIENT_VERSION = '1.0.3'
class HPLeftHandISCSIDriver(VolumeDriver):
"""Executes commands relating to HP/LeftHand SAN ISCSI volumes.
Version history:
1.0.0 - Initial driver
1.0.1 - Added support for retype
1.0.2 - Added support for volume migrate
1.0.3 - Fix for no handler for logger during tests
1.0.4 - Removing locks bug #1395953
"""
VERSION = "1.0.4"
def __init__(self, *args, **kwargs):
super(HPLeftHandISCSIDriver, self).__init__(*args, **kwargs)
self.proxy = None
self.args = args
self.kwargs = kwargs
def _create_proxy(self, *args, **kwargs):
try:
proxy = rest_proxy.HPLeftHandRESTProxy(*args, **kwargs)
except exception.NotFound:
proxy = cliq_proxy.HPLeftHandCLIQProxy(*args, **kwargs)
return proxy
def check_for_setup_error(self):
self.proxy.check_for_setup_error()
def do_setup(self, context):
self.proxy = self._create_proxy(*self.args, **self.kwargs)
LOG.info(_LI("HPLeftHand driver %(driver_ver)s, "
"proxy %(proxy_ver)s") % {
"driver_ver": self.VERSION,
"proxy_ver": self.proxy.get_version_string()})
if isinstance(self.proxy, cliq_proxy.HPLeftHandCLIQProxy):
self.proxy.do_setup(context)
else:
# Check minimum client version for REST proxy
client_version = rest_proxy.hplefthandclient.version
if (client_version < MIN_CLIENT_VERSION):
ex_msg = (_LE("Invalid hplefthandclient version found ("
"%(found)s). Version %(minimum)s or greater "
"required.")
% {'found': client_version,
'minimum': MIN_CLIENT_VERSION})
LOG.error(ex_msg)
raise exception.InvalidInput(reason=ex_msg)
def create_volume(self, volume):
"""Creates a volume."""
return self.proxy.create_volume(volume)
def extend_volume(self, volume, new_size):
"""Extend the size of an existing volume."""
self.proxy.extend_volume(volume, new_size)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
return self.proxy.create_volume_from_snapshot(volume, snapshot)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self.proxy.create_snapshot(snapshot)
def delete_volume(self, volume):
"""Deletes a volume."""
self.proxy.delete_volume(volume)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
self.proxy.delete_snapshot(snapshot)
def initialize_connection(self, volume, connector):
"""Assigns the volume to a server."""
return self.proxy.initialize_connection(volume, connector)
def terminate_connection(self, volume, connector, **kwargs):
"""Unassign the volume from the host."""
self.proxy.terminate_connection(volume, connector)
def get_volume_stats(self, refresh):
data = self.proxy.get_volume_stats(refresh)
data['driver_version'] = self.VERSION
return data
def create_cloned_volume(self, volume, src_vref):
return self.proxy.create_cloned_volume(volume, src_vref)
def create_export(self, context, volume):
return self.proxy.create_export(context, volume)
def ensure_export(self, context, volume):
return self.proxy.ensure_export(context, volume)
def remove_export(self, context, volume):
return self.proxy.remove_export(context, volume)
def retype(self, context, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
return self.proxy.retype(context, volume, new_type, diff, host)
def migrate_volume(self, ctxt, volume, host):
"""Migrate directly if source and dest are managed by same storage."""
return self.proxy.migrate_volume(ctxt, volume, host)
| apache-2.0 |
mahak/spark | examples/src/main/python/mllib/naive_bayes_example.py | 27 | 2246 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
NaiveBayes Example.
Usage:
`spark-submit --master local[4] examples/src/main/python/mllib/naive_bayes_example.py`
"""
import shutil
from pyspark import SparkContext
# $example on$
from pyspark.mllib.classification import NaiveBayes, NaiveBayesModel
from pyspark.mllib.util import MLUtils
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonNaiveBayesExample")
# $example on$
# Load and parse the data file.
data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt")
# Split data approximately into training (60%) and test (40%)
training, test = data.randomSplit([0.6, 0.4])
# Train a naive Bayes model.
model = NaiveBayes.train(training, 1.0)
# Make prediction and test accuracy.
predictionAndLabel = test.map(lambda p: (model.predict(p.features), p.label))
accuracy = 1.0 * predictionAndLabel.filter(lambda pl: pl[0] == pl[1]).count() / test.count()
print('model accuracy {}'.format(accuracy))
# Save and load model
output_dir = 'target/tmp/myNaiveBayesModel'
shutil.rmtree(output_dir, ignore_errors=True)
model.save(sc, output_dir)
sameModel = NaiveBayesModel.load(sc, output_dir)
predictionAndLabel = test.map(lambda p: (sameModel.predict(p.features), p.label))
accuracy = 1.0 * predictionAndLabel.filter(lambda pl: pl[0] == pl[1]).count() / test.count()
print('sameModel accuracy {}'.format(accuracy))
# $example off$
| apache-2.0 |
xHeliotrope/injustice_dropper | env/lib/python3.4/site-packages/setuptools/command/rotate.py | 461 | 2038 | from distutils.util import convert_path
from distutils import log
from distutils.errors import DistutilsOptionError
import os
from setuptools import Command
from setuptools.compat import basestring
class rotate(Command):
"""Delete older distributions"""
description = "delete older distributions, keeping N newest files"
user_options = [
('match=', 'm', "patterns to match (required)"),
('dist-dir=', 'd', "directory where the distributions are"),
('keep=', 'k', "number of matching distributions to keep"),
]
boolean_options = []
def initialize_options(self):
self.match = None
self.dist_dir = None
self.keep = None
def finalize_options(self):
if self.match is None:
raise DistutilsOptionError(
"Must specify one or more (comma-separated) match patterns "
"(e.g. '.zip' or '.egg')"
)
if self.keep is None:
raise DistutilsOptionError("Must specify number of files to keep")
try:
self.keep = int(self.keep)
except ValueError:
raise DistutilsOptionError("--keep must be an integer")
if isinstance(self.match, basestring):
self.match = [
convert_path(p.strip()) for p in self.match.split(',')
]
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
def run(self):
self.run_command("egg_info")
from glob import glob
for pattern in self.match:
pattern = self.distribution.get_name() + '*' + pattern
files = glob(os.path.join(self.dist_dir, pattern))
files = [(os.path.getmtime(f), f) for f in files]
files.sort()
files.reverse()
log.info("%d file(s) matching %s", len(files), pattern)
files = files[self.keep:]
for (t, f) in files:
log.info("Deleting %s", f)
if not self.dry_run:
os.unlink(f)
| mit |
GitHublong/hue | desktop/core/ext-py/boto-2.38.0/boto/pyami/installers/ubuntu/__init__.py | 205 | 1112 | # Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
| apache-2.0 |
crosick/zhishu | ENV/lib/python2.7/site-packages/pip/wheel.py | 187 | 30186 | """
Support for installing and building the "wheel" binary package format.
"""
from __future__ import absolute_import
import compileall
import csv
import errno
import functools
import hashlib
import logging
import os
import os.path
import re
import shutil
import stat
import sys
import tempfile
import warnings
from base64 import urlsafe_b64encode
from email.parser import Parser
from pip._vendor.six import StringIO
import pip
from pip.download import path_to_url, unpack_url
from pip.exceptions import InvalidWheelFilename, UnsupportedWheel
from pip.locations import distutils_scheme, PIP_DELETE_MARKER_FILENAME
from pip import pep425tags
from pip.utils import (
call_subprocess, ensure_dir, make_path_relative, captured_stdout,
rmtree)
from pip.utils.logging import indent_log
from pip._vendor.distlib.scripts import ScriptMaker
from pip._vendor import pkg_resources
from pip._vendor.six.moves import configparser
wheel_ext = '.whl'
VERSION_COMPATIBLE = (1, 0)
logger = logging.getLogger(__name__)
class WheelCache(object):
"""A cache of wheels for future installs."""
def __init__(self, cache_dir, format_control):
"""Create a wheel cache.
:param cache_dir: The root of the cache.
:param format_control: A pip.index.FormatControl object to limit
binaries being read from the cache.
"""
self._cache_dir = os.path.expanduser(cache_dir) if cache_dir else None
self._format_control = format_control
def cached_wheel(self, link, package_name):
return cached_wheel(
self._cache_dir, link, self._format_control, package_name)
def _cache_for_link(cache_dir, link):
"""
Return a directory to store cached wheels in for link.
Because there are M wheels for any one sdist, we provide a directory
to cache them in, and then consult that directory when looking up
cache hits.
We only insert things into the cache if they have plausible version
numbers, so that we don't contaminate the cache with things that were not
unique. E.g. ./package might have dozens of installs done for it and build
a version of 0.0...and if we built and cached a wheel, we'd end up using
the same wheel even if the source has been edited.
:param cache_dir: The cache_dir being used by pip.
:param link: The link of the sdist for which this will cache wheels.
"""
# We want to generate an url to use as our cache key, we don't want to just
# re-use the URL because it might have other items in the fragment and we
# don't care about those.
key_parts = [link.url_without_fragment]
if link.hash_name is not None and link.hash is not None:
key_parts.append("=".join([link.hash_name, link.hash]))
key_url = "#".join(key_parts)
# Encode our key url with sha224, we'll use this because it has similar
# security properties to sha256, but with a shorter total output (and thus
# less secure). However the differences don't make a lot of difference for
# our use case here.
hashed = hashlib.sha224(key_url.encode()).hexdigest()
# We want to nest the directories some to prevent having a ton of top level
# directories where we might run out of sub directories on some FS.
parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]]
# Inside of the base location for cached wheels, expand our parts and join
# them all together.
return os.path.join(cache_dir, "wheels", *parts)
def cached_wheel(cache_dir, link, format_control, package_name):
if not cache_dir:
return link
if not link:
return link
if link.is_wheel:
return link
if not link.is_artifact:
return link
if not package_name:
return link
canonical_name = pkg_resources.safe_name(package_name).lower()
formats = pip.index.fmt_ctl_formats(format_control, canonical_name)
if "binary" not in formats:
return link
root = _cache_for_link(cache_dir, link)
try:
wheel_names = os.listdir(root)
except OSError as e:
if e.errno in (errno.ENOENT, errno.ENOTDIR):
return link
raise
candidates = []
for wheel_name in wheel_names:
try:
wheel = Wheel(wheel_name)
except InvalidWheelFilename:
continue
if not wheel.supported():
# Built for a different python/arch/etc
continue
candidates.append((wheel.support_index_min(), wheel_name))
if not candidates:
return link
candidates.sort()
path = os.path.join(root, candidates[0][1])
return pip.index.Link(path_to_url(path), trusted=True)
def rehash(path, algo='sha256', blocksize=1 << 20):
"""Return (hash, length) for path using hashlib.new(algo)"""
h = hashlib.new(algo)
length = 0
with open(path, 'rb') as f:
block = f.read(blocksize)
while block:
length += len(block)
h.update(block)
block = f.read(blocksize)
digest = 'sha256=' + urlsafe_b64encode(
h.digest()
).decode('latin1').rstrip('=')
return (digest, length)
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = {'newline': ''}
bin = ''
return open(name, mode + bin, **nl)
def fix_script(path):
"""Replace #!python with #!/path/to/python
Return True if file was changed."""
# XXX RECORD hashes will need to be updated
if os.path.isfile(path):
with open(path, 'rb') as script:
firstline = script.readline()
if not firstline.startswith(b'#!python'):
return False
exename = sys.executable.encode(sys.getfilesystemencoding())
firstline = b'#!' + exename + os.linesep.encode("ascii")
rest = script.read()
with open(path, 'wb') as script:
script.write(firstline)
script.write(rest)
return True
dist_info_re = re.compile(r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
\.dist-info$""", re.VERBOSE)
def root_is_purelib(name, wheeldir):
"""
Return True if the extracted wheel in wheeldir should go into purelib.
"""
name_folded = name.replace("-", "_")
for item in os.listdir(wheeldir):
match = dist_info_re.match(item)
if match and match.group('name') == name_folded:
with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel:
for line in wheel:
line = line.lower().rstrip()
if line == "root-is-purelib: true":
return True
return False
def get_entrypoints(filename):
if not os.path.exists(filename):
return {}, {}
# This is done because you can pass a string to entry_points wrappers which
# means that they may or may not be valid INI files. The attempt here is to
# strip leading and trailing whitespace in order to make them valid INI
# files.
with open(filename) as fp:
data = StringIO()
for line in fp:
data.write(line.strip())
data.write("\n")
data.seek(0)
cp = configparser.RawConfigParser()
cp.readfp(data)
console = {}
gui = {}
if cp.has_section('console_scripts'):
console = dict(cp.items('console_scripts'))
if cp.has_section('gui_scripts'):
gui = dict(cp.items('gui_scripts'))
return console, gui
def move_wheel_files(name, req, wheeldir, user=False, home=None, root=None,
pycompile=True, scheme=None, isolated=False):
"""Install a wheel"""
if not scheme:
scheme = distutils_scheme(
name, user=user, home=home, root=root, isolated=isolated
)
if root_is_purelib(name, wheeldir):
lib_dir = scheme['purelib']
else:
lib_dir = scheme['platlib']
info_dir = []
data_dirs = []
source = wheeldir.rstrip(os.path.sep) + os.path.sep
# Record details of the files moved
# installed = files copied from the wheel to the destination
# changed = files changed while installing (scripts #! line typically)
# generated = files newly generated during the install (script wrappers)
installed = {}
changed = set()
generated = []
# Compile all of the pyc files that we're going to be installing
if pycompile:
with captured_stdout() as stdout:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
compileall.compile_dir(source, force=True, quiet=True)
logger.debug(stdout.getvalue())
def normpath(src, p):
return make_path_relative(src, p).replace(os.path.sep, '/')
def record_installed(srcfile, destfile, modified=False):
"""Map archive RECORD paths to installation RECORD paths."""
oldpath = normpath(srcfile, wheeldir)
newpath = normpath(destfile, lib_dir)
installed[oldpath] = newpath
if modified:
changed.add(destfile)
def clobber(source, dest, is_base, fixer=None, filter=None):
ensure_dir(dest) # common for the 'include' path
for dir, subdirs, files in os.walk(source):
basedir = dir[len(source):].lstrip(os.path.sep)
destdir = os.path.join(dest, basedir)
if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'):
continue
for s in subdirs:
destsubdir = os.path.join(dest, basedir, s)
if is_base and basedir == '' and destsubdir.endswith('.data'):
data_dirs.append(s)
continue
elif (is_base and
s.endswith('.dist-info') and
# is self.req.project_name case preserving?
s.lower().startswith(
req.project_name.replace('-', '_').lower())):
assert not info_dir, 'Multiple .dist-info directories'
info_dir.append(destsubdir)
for f in files:
# Skip unwanted files
if filter and filter(f):
continue
srcfile = os.path.join(dir, f)
destfile = os.path.join(dest, basedir, f)
# directory creation is lazy and after the file filtering above
# to ensure we don't install empty dirs; empty dirs can't be
# uninstalled.
ensure_dir(destdir)
# We use copyfile (not move, copy, or copy2) to be extra sure
# that we are not moving directories over (copyfile fails for
# directories) as well as to ensure that we are not copying
# over any metadata because we want more control over what
# metadata we actually copy over.
shutil.copyfile(srcfile, destfile)
# Copy over the metadata for the file, currently this only
# includes the atime and mtime.
st = os.stat(srcfile)
if hasattr(os, "utime"):
os.utime(destfile, (st.st_atime, st.st_mtime))
# If our file is executable, then make our destination file
# executable.
if os.access(srcfile, os.X_OK):
st = os.stat(srcfile)
permissions = (
st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
)
os.chmod(destfile, permissions)
changed = False
if fixer:
changed = fixer(destfile)
record_installed(srcfile, destfile, changed)
clobber(source, lib_dir, True)
assert info_dir, "%s .dist-info directory not found" % req
# Get the defined entry points
ep_file = os.path.join(info_dir[0], 'entry_points.txt')
console, gui = get_entrypoints(ep_file)
def is_entrypoint_wrapper(name):
# EP, EP.exe and EP-script.py are scripts generated for
# entry point EP by setuptools
if name.lower().endswith('.exe'):
matchname = name[:-4]
elif name.lower().endswith('-script.py'):
matchname = name[:-10]
elif name.lower().endswith(".pya"):
matchname = name[:-4]
else:
matchname = name
# Ignore setuptools-generated scripts
return (matchname in console or matchname in gui)
for datadir in data_dirs:
fixer = None
filter = None
for subdir in os.listdir(os.path.join(wheeldir, datadir)):
fixer = None
if subdir == 'scripts':
fixer = fix_script
filter = is_entrypoint_wrapper
source = os.path.join(wheeldir, datadir, subdir)
dest = scheme[subdir]
clobber(source, dest, False, fixer=fixer, filter=filter)
maker = ScriptMaker(None, scheme['scripts'])
# Ensure old scripts are overwritten.
# See https://github.com/pypa/pip/issues/1800
maker.clobber = True
# Ensure we don't generate any variants for scripts because this is almost
# never what somebody wants.
# See https://bitbucket.org/pypa/distlib/issue/35/
maker.variants = set(('', ))
# This is required because otherwise distlib creates scripts that are not
# executable.
# See https://bitbucket.org/pypa/distlib/issue/32/
maker.set_mode = True
# Simplify the script and fix the fact that the default script swallows
# every single stack trace.
# See https://bitbucket.org/pypa/distlib/issue/34/
# See https://bitbucket.org/pypa/distlib/issue/33/
def _get_script_text(entry):
return maker.script_template % {
"module": entry.prefix,
"import_name": entry.suffix.split(".")[0],
"func": entry.suffix,
}
maker._get_script_text = _get_script_text
maker.script_template = """# -*- coding: utf-8 -*-
import re
import sys
from %(module)s import %(import_name)s
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(%(func)s())
"""
# Special case pip and setuptools to generate versioned wrappers
#
# The issue is that some projects (specifically, pip and setuptools) use
# code in setup.py to create "versioned" entry points - pip2.7 on Python
# 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
# the wheel metadata at build time, and so if the wheel is installed with
# a *different* version of Python the entry points will be wrong. The
# correct fix for this is to enhance the metadata to be able to describe
# such versioned entry points, but that won't happen till Metadata 2.0 is
# available.
# In the meantime, projects using versioned entry points will either have
# incorrect versioned entry points, or they will not be able to distribute
# "universal" wheels (i.e., they will need a wheel per Python version).
#
# Because setuptools and pip are bundled with _ensurepip and virtualenv,
# we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
# override the versioned entry points in the wheel and generate the
# correct ones. This code is purely a short-term measure until Metadat 2.0
# is available.
#
# To add the level of hack in this section of code, in order to support
# ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
# variable which will control which version scripts get installed.
#
# ENSUREPIP_OPTIONS=altinstall
# - Only pipX.Y and easy_install-X.Y will be generated and installed
# ENSUREPIP_OPTIONS=install
# - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
# that this option is technically if ENSUREPIP_OPTIONS is set and is
# not altinstall
# DEFAULT
# - The default behavior is to install pip, pipX, pipX.Y, easy_install
# and easy_install-X.Y.
pip_script = console.pop('pip', None)
if pip_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'pip = ' + pip_script
generated.extend(maker.make(spec))
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
spec = 'pip%s = %s' % (sys.version[:1], pip_script)
generated.extend(maker.make(spec))
spec = 'pip%s = %s' % (sys.version[:3], pip_script)
generated.extend(maker.make(spec))
# Delete any other versioned pip entry points
pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
for k in pip_ep:
del console[k]
easy_install_script = console.pop('easy_install', None)
if easy_install_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'easy_install = ' + easy_install_script
generated.extend(maker.make(spec))
spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script)
generated.extend(maker.make(spec))
# Delete any other versioned easy_install entry points
easy_install_ep = [
k for k in console if re.match(r'easy_install(-\d\.\d)?$', k)
]
for k in easy_install_ep:
del console[k]
# Generate the console and GUI entry points specified in the wheel
if len(console) > 0:
generated.extend(
maker.make_multiple(['%s = %s' % kv for kv in console.items()])
)
if len(gui) > 0:
generated.extend(
maker.make_multiple(
['%s = %s' % kv for kv in gui.items()],
{'gui': True}
)
)
record = os.path.join(info_dir[0], 'RECORD')
temp_record = os.path.join(info_dir[0], 'RECORD.pip')
with open_for_csv(record, 'r') as record_in:
with open_for_csv(temp_record, 'w+') as record_out:
reader = csv.reader(record_in)
writer = csv.writer(record_out)
for row in reader:
row[0] = installed.pop(row[0], row[0])
if row[0] in changed:
row[1], row[2] = rehash(row[0])
writer.writerow(row)
for f in generated:
h, l = rehash(f)
writer.writerow((f, h, l))
for f in installed:
writer.writerow((installed[f], '', ''))
shutil.move(temp_record, record)
def _unique(fn):
@functools.wraps(fn)
def unique(*args, **kw):
seen = set()
for item in fn(*args, **kw):
if item not in seen:
seen.add(item)
yield item
return unique
# TODO: this goes somewhere besides the wheel module
@_unique
def uninstallation_paths(dist):
"""
Yield all the uninstallation paths for dist based on RECORD-without-.pyc
Yield paths to all the files in RECORD. For each .py file in RECORD, add
the .pyc in the same directory.
UninstallPathSet.add() takes care of the __pycache__ .pyc.
"""
from pip.utils import FakeFile # circular import
r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD')))
for row in r:
path = os.path.join(dist.location, row[0])
yield path
if path.endswith('.py'):
dn, fn = os.path.split(path)
base = fn[:-3]
path = os.path.join(dn, base + '.pyc')
yield path
def wheel_version(source_dir):
"""
Return the Wheel-Version of an extracted wheel, if possible.
Otherwise, return False if we couldn't parse / extract it.
"""
try:
dist = [d for d in pkg_resources.find_on_path(None, source_dir)][0]
wheel_data = dist.get_metadata('WHEEL')
wheel_data = Parser().parsestr(wheel_data)
version = wheel_data['Wheel-Version'].strip()
version = tuple(map(int, version.split('.')))
return version
except:
return False
def check_compatibility(version, name):
"""
Raises errors or warns if called with an incompatible Wheel-Version.
Pip should refuse to install a Wheel-Version that's a major series
ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when
installing a version only minor version ahead (e.g 1.2 > 1.1).
version: a 2-tuple representing a Wheel-Version (Major, Minor)
name: name of wheel or package to raise exception about
:raises UnsupportedWheel: when an incompatible Wheel-Version is given
"""
if not version:
raise UnsupportedWheel(
"%s is in an unsupported or invalid wheel" % name
)
if version[0] > VERSION_COMPATIBLE[0]:
raise UnsupportedWheel(
"%s's Wheel-Version (%s) is not compatible with this version "
"of pip" % (name, '.'.join(map(str, version)))
)
elif version > VERSION_COMPATIBLE:
logger.warning(
'Installing from a newer Wheel-Version (%s)',
'.'.join(map(str, version)),
)
class Wheel(object):
"""A wheel file"""
# TODO: maybe move the install code into this class
wheel_file_re = re.compile(
r"""^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?))
((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl|\.dist-info)$""",
re.VERBOSE
)
def __init__(self, filename):
"""
:raises InvalidWheelFilename: when the filename is invalid for a wheel
"""
wheel_info = self.wheel_file_re.match(filename)
if not wheel_info:
raise InvalidWheelFilename(
"%s is not a valid wheel filename." % filename
)
self.filename = filename
self.name = wheel_info.group('name').replace('_', '-')
# we'll assume "_" means "-" due to wheel naming scheme
# (https://github.com/pypa/pip/issues/1150)
self.version = wheel_info.group('ver').replace('_', '-')
self.pyversions = wheel_info.group('pyver').split('.')
self.abis = wheel_info.group('abi').split('.')
self.plats = wheel_info.group('plat').split('.')
# All the tag combinations from this file
self.file_tags = set(
(x, y, z) for x in self.pyversions
for y in self.abis for z in self.plats
)
def support_index_min(self, tags=None):
"""
Return the lowest index that one of the wheel's file_tag combinations
achieves in the supported_tags list e.g. if there are 8 supported tags,
and one of the file tags is first in the list, then return 0. Returns
None is the wheel is not supported.
"""
if tags is None: # for mock
tags = pep425tags.supported_tags
indexes = [tags.index(c) for c in self.file_tags if c in tags]
return min(indexes) if indexes else None
def supported(self, tags=None):
"""Is this wheel supported on this system?"""
if tags is None: # for mock
tags = pep425tags.supported_tags
return bool(set(tags).intersection(self.file_tags))
class WheelBuilder(object):
"""Build wheels from a RequirementSet."""
def __init__(self, requirement_set, finder, build_options=None,
global_options=None):
self.requirement_set = requirement_set
self.finder = finder
self._cache_root = requirement_set._wheel_cache._cache_dir
self._wheel_dir = requirement_set.wheel_download_dir
self.build_options = build_options or []
self.global_options = global_options or []
def _build_one(self, req, output_dir):
"""Build one wheel.
:return: The filename of the built wheel, or None if the build failed.
"""
tempd = tempfile.mkdtemp('pip-wheel-')
try:
if self.__build_one(req, tempd):
try:
wheel_name = os.listdir(tempd)[0]
wheel_path = os.path.join(output_dir, wheel_name)
shutil.move(os.path.join(tempd, wheel_name), wheel_path)
logger.info('Stored in directory: %s', output_dir)
return wheel_path
except:
return None
return None
finally:
rmtree(tempd)
def __build_one(self, req, tempd):
base_args = [
sys.executable, '-c',
"import setuptools;__file__=%r;"
"exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), "
"__file__, 'exec'))" % req.setup_py
] + list(self.global_options)
logger.info('Running setup.py bdist_wheel for %s', req.name)
logger.debug('Destination directory: %s', tempd)
wheel_args = base_args + ['bdist_wheel', '-d', tempd] \
+ self.build_options
try:
call_subprocess(wheel_args, cwd=req.source_dir, show_stdout=False)
return True
except:
logger.error('Failed building wheel for %s', req.name)
return False
def build(self, autobuilding=False):
"""Build wheels.
:param unpack: If True, replace the sdist we built from the with the
newly built wheel, in preparation for installation.
:return: True if all the wheels built correctly.
"""
assert self._wheel_dir or (autobuilding and self._cache_root)
# unpack sdists and constructs req set
self.requirement_set.prepare_files(self.finder)
reqset = self.requirement_set.requirements.values()
buildset = []
for req in reqset:
if req.constraint:
continue
if req.is_wheel:
if not autobuilding:
logger.info(
'Skipping %s, due to already being wheel.', req.name)
elif req.editable:
if not autobuilding:
logger.info(
'Skipping bdist_wheel for %s, due to being editable',
req.name)
elif autobuilding and req.link and not req.link.is_artifact:
pass
elif autobuilding and not req.source_dir:
pass
else:
if autobuilding:
link = req.link
base, ext = link.splitext()
if pip.index.egg_info_matches(base, None, link) is None:
# Doesn't look like a package - don't autobuild a wheel
# because we'll have no way to lookup the result sanely
continue
if "binary" not in pip.index.fmt_ctl_formats(
self.finder.format_control,
pkg_resources.safe_name(req.name).lower()):
logger.info(
"Skipping bdist_wheel for %s, due to binaries "
"being disabled for it.", req.name)
continue
buildset.append(req)
if not buildset:
return True
# Build the wheels.
logger.info(
'Building wheels for collected packages: %s',
', '.join([req.name for req in buildset]),
)
with indent_log():
build_success, build_failure = [], []
for req in buildset:
if autobuilding:
output_dir = _cache_for_link(self._cache_root, req.link)
try:
ensure_dir(output_dir)
except OSError as e:
logger.warn("Building wheel for %s failed: %s",
req.name, e)
build_failure.append(req)
continue
else:
output_dir = self._wheel_dir
wheel_file = self._build_one(req, output_dir)
if wheel_file:
build_success.append(req)
if autobuilding:
# XXX: This is mildly duplicative with prepare_files,
# but not close enough to pull out to a single common
# method.
# The code below assumes temporary source dirs -
# prevent it doing bad things.
if req.source_dir and not os.path.exists(os.path.join(
req.source_dir, PIP_DELETE_MARKER_FILENAME)):
raise AssertionError(
"bad source dir - missing marker")
# Delete the source we built the wheel from
req.remove_temporary_source()
# set the build directory again - name is known from
# the work prepare_files did.
req.source_dir = req.build_location(
self.requirement_set.build_dir)
# Update the link for this.
req.link = pip.index.Link(
path_to_url(wheel_file), trusted=True)
assert req.link.is_wheel
# extract the wheel into the dir
unpack_url(
req.link, req.source_dir, None, False,
session=self.requirement_set.session)
else:
build_failure.append(req)
# notify success/failure
if build_success:
logger.info(
'Successfully built %s',
' '.join([req.name for req in build_success]),
)
if build_failure:
logger.info(
'Failed to build %s',
' '.join([req.name for req in build_failure]),
)
# Return True if all builds were successful
return len(build_failure) == 0
| mit |
bowlofstew/Herd | herd/BitTornado/launchmanycore.py | 5 | 12499 | #!/usr/bin/env python
# Written by John Hoffman
# see LICENSE.txt for license information
from BitTornado import PSYCO
if PSYCO.psyco:
try:
import psyco
assert psyco.__version__ >= 0x010100f0
psyco.full()
except:
pass
from download_bt1 import BT1Download
from RawServer import RawServer, UPnP_ERROR
from RateLimiter import RateLimiter
from ServerPortHandler import MultiHandler
from parsedir import parsedir
from natpunch import UPnP_test
from random import seed
from socket import error as socketerror
from threading import Event
from sys import argv, exit
import sys, os
from clock import clock
from __init__ import createPeerID, mapbase64, version
from cStringIO import StringIO
from traceback import print_exc
try:
True
except:
True = 1
False = 0
def fmttime(n):
try:
n = int(n) # n may be None or too large
assert n < 5184000 # 60 days
except:
return 'downloading'
m, s = divmod(n, 60)
h, m = divmod(m, 60)
return '%d:%02d:%02d' % (h, m, s)
class SingleDownload:
def __init__(self, controller, hash, response, config, myid):
self.controller = controller
self.hash = hash
self.response = response
self.config = config
self.doneflag = Event()
self.waiting = True
self.checking = False
self.working = False
self.seed = False
self.closed = False
self.status_msg = ''
self.status_err = ['']
self.status_errtime = 0
self.status_done = 0.0
self.rawserver = controller.handler.newRawServer(hash, self.doneflag)
d = BT1Download(self.display, self.finished, self.error,
controller.exchandler, self.doneflag, config, response,
hash, myid, self.rawserver, controller.listen_port)
self.d = d
def start(self):
if not self.d.saveAs(self.saveAs):
self._shutdown()
return
self._hashcheckfunc = self.d.initFiles()
if not self._hashcheckfunc:
self._shutdown()
return
self.controller.hashchecksched(self.hash)
def saveAs(self, name, length, saveas, isdir):
return self.controller.saveAs(self.hash, name, saveas, isdir)
def hashcheck_start(self, donefunc):
if self.is_dead():
self._shutdown()
return
self.waiting = False
self.checking = True
self._hashcheckfunc(donefunc)
def hashcheck_callback(self):
self.checking = False
if self.is_dead():
self._shutdown()
return
if not self.d.startEngine(ratelimiter = self.controller.ratelimiter):
self._shutdown()
return
self.d.startRerequester()
self.statsfunc = self.d.startStats()
self.rawserver.start_listening(self.d.getPortHandler())
self.working = True
def is_dead(self):
return self.doneflag.isSet()
def _shutdown(self):
self.shutdown(False)
def shutdown(self, quiet=True):
if self.closed:
return
self.doneflag.set()
self.rawserver.shutdown()
if self.checking or self.working:
self.d.shutdown()
self.waiting = False
self.checking = False
self.working = False
self.closed = True
self.controller.was_stopped(self.hash)
if not quiet:
self.controller.died(self.hash)
def display(self, activity = None, fractionDone = None):
# really only used by StorageWrapper now
if activity:
self.status_msg = activity
if fractionDone is not None:
self.status_done = float(fractionDone)
def finished(self):
self.seed = True
def error(self, msg):
if self.doneflag.isSet():
self._shutdown()
self.status_err.append(msg)
self.status_errtime = clock()
class LaunchMany:
def __init__(self, config, Output):
try:
self.config = config
self.Output = Output
self.torrent_dir = config['torrent_dir']
self.torrent_cache = {}
self.file_cache = {}
self.blocked_files = {}
self.scan_period = config['parse_dir_interval']
self.stats_period = config['display_interval']
self.torrent_list = []
self.downloads = {}
self.counter = 0
self.doneflag = Event()
self.hashcheck_queue = []
self.hashcheck_current = None
self.rawserver = RawServer(self.doneflag, config['timeout_check_interval'],
config['timeout'], ipv6_enable = config['ipv6_enabled'],
failfunc = self.failed, errorfunc = self.exchandler)
upnp_type = UPnP_test(config['upnp_nat_access'])
while True:
try:
self.listen_port = self.rawserver.find_and_bind(
config['minport'], config['maxport'], config['bind'],
ipv6_socket_style = config['ipv6_binds_v4'],
upnp = upnp_type, randomizer = config['random_port'])
break
except socketerror, e:
if upnp_type and e == UPnP_ERROR:
self.Output.message('WARNING: COULD NOT FORWARD VIA UPnP')
upnp_type = 0
continue
self.failed("Couldn't listen - " + str(e))
return
self.ratelimiter = RateLimiter(self.rawserver.add_task,
config['upload_unit_size'])
self.ratelimiter.set_upload_rate(config['max_upload_rate'])
self.handler = MultiHandler(self.rawserver, self.doneflag)
seed(createPeerID())
self.rawserver.add_task(self.scan, 0)
self.rawserver.add_task(self.stats, 0)
self.handler.listen_forever()
self.Output.message('shutting down')
self.hashcheck_queue = []
for hash in self.torrent_list:
self.Output.message('dropped "'+self.torrent_cache[hash]['path']+'"')
self.downloads[hash].shutdown()
self.rawserver.shutdown()
except:
data = StringIO()
print_exc(file = data)
Output.exception(data.getvalue())
def scan(self):
self.rawserver.add_task(self.scan, self.scan_period)
r = parsedir(self.torrent_dir, self.torrent_cache,
self.file_cache, self.blocked_files,
return_metainfo = True, errfunc = self.Output.message)
( self.torrent_cache, self.file_cache, self.blocked_files,
added, removed ) = r
for hash, data in removed.items():
self.Output.message('dropped "'+data['path']+'"')
self.remove(hash)
for hash, data in added.items():
self.Output.message('added "'+data['path']+'"')
self.add(hash, data)
def stats(self):
self.rawserver.add_task(self.stats, self.stats_period)
data = []
for hash in self.torrent_list:
cache = self.torrent_cache[hash]
if self.config['display_path']:
name = cache['path']
else:
name = cache['name']
size = cache['length']
d = self.downloads[hash]
progress = '0.0%'
peers = 0
seeds = 0
seedsmsg = "S"
dist = 0.0
uprate = 0.0
dnrate = 0.0
upamt = 0
dnamt = 0
t = 0
if d.is_dead():
status = 'stopped'
elif d.waiting:
status = 'waiting for hash check'
elif d.checking:
status = d.status_msg
progress = '%.1f%%' % (d.status_done*100)
else:
stats = d.statsfunc()
s = stats['stats']
if d.seed:
status = 'seeding'
progress = '100.0%'
seeds = s.numOldSeeds
seedsmsg = "s"
dist = s.numCopies
else:
if s.numSeeds + s.numPeers:
t = stats['time']
if t == 0: # unlikely
t = 0.01
status = fmttime(t)
else:
t = -1
status = 'connecting to peers'
progress = '%.1f%%' % (int(stats['frac']*1000)/10.0)
seeds = s.numSeeds
dist = s.numCopies2
dnrate = stats['down']
peers = s.numPeers
uprate = stats['up']
upamt = s.upTotal
dnamt = s.downTotal
if d.is_dead() or d.status_errtime+300 > clock():
msg = d.status_err[-1]
else:
msg = ''
data.append(( name, status, progress, peers, seeds, seedsmsg, dist,
uprate, dnrate, upamt, dnamt, size, t, msg ))
stop = self.Output.display(data)
if stop:
self.doneflag.set()
def remove(self, hash):
self.torrent_list.remove(hash)
self.downloads[hash].shutdown()
del self.downloads[hash]
def add(self, hash, data):
c = self.counter
self.counter += 1
x = ''
for i in xrange(3):
x = mapbase64[c & 0x3F]+x
c >>= 6
peer_id = createPeerID(x)
d = SingleDownload(self, hash, data['metainfo'], self.config, peer_id)
self.torrent_list.append(hash)
self.downloads[hash] = d
d.start()
def saveAs(self, hash, name, saveas, isdir):
x = self.torrent_cache[hash]
style = self.config['saveas_style']
if style == 1 or style == 3:
if saveas:
saveas = os.path.join(saveas,x['file'][:-1-len(x['type'])])
else:
saveas = x['path'][:-1-len(x['type'])]
if style == 3:
if not os.path.isdir(saveas):
try:
os.mkdir(saveas)
except:
raise OSError("couldn't create directory for "+x['path']
+" ("+saveas+")")
if not isdir:
saveas = os.path.join(saveas, name)
else:
if saveas:
saveas = os.path.join(saveas, name)
else:
saveas = os.path.join(os.path.split(x['path'])[0], name)
if isdir and not os.path.isdir(saveas):
try:
os.mkdir(saveas)
except:
raise OSError("couldn't create directory for "+x['path']
+" ("+saveas+")")
return saveas
def hashchecksched(self, hash = None):
if hash:
self.hashcheck_queue.append(hash)
if not self.hashcheck_current:
self._hashcheck_start()
def _hashcheck_start(self):
self.hashcheck_current = self.hashcheck_queue.pop(0)
self.downloads[self.hashcheck_current].hashcheck_start(self.hashcheck_callback)
def hashcheck_callback(self):
self.downloads[self.hashcheck_current].hashcheck_callback()
if self.hashcheck_queue:
self._hashcheck_start()
else:
self.hashcheck_current = None
def died(self, hash):
if self.torrent_cache.has_key(hash):
self.Output.message('DIED: "'+self.torrent_cache[hash]['path']+'"')
def was_stopped(self, hash):
try:
self.hashcheck_queue.remove(hash)
except:
pass
if self.hashcheck_current == hash:
self.hashcheck_current = None
if self.hashcheck_queue:
self._hashcheck_start()
def failed(self, s):
self.Output.message('FAILURE: '+s)
def exchandler(self, s):
self.Output.exception(s)
| mit |
benfinkelcbt/CPD200 | CPD200-Lab13-Python/pyasn1/type/tag.py | 162 | 4499 | # ASN.1 types tags
from operator import getitem
from pyasn1 import error
tagClassUniversal = 0x00
tagClassApplication = 0x40
tagClassContext = 0x80
tagClassPrivate = 0xC0
tagFormatSimple = 0x00
tagFormatConstructed = 0x20
tagCategoryImplicit = 0x01
tagCategoryExplicit = 0x02
tagCategoryUntagged = 0x04
class Tag:
def __init__(self, tagClass, tagFormat, tagId):
if tagId < 0:
raise error.PyAsn1Error(
'Negative tag ID (%s) not allowed' % (tagId,)
)
self.__tag = (tagClass, tagFormat, tagId)
self.uniq = (tagClass, tagId)
self.__hashedUniqTag = hash(self.uniq)
def __str__(self):
return '[%s:%s:%s]' % self.__tag
def __repr__(self):
return '%s(tagClass=%s, tagFormat=%s, tagId=%s)' % (
(self.__class__.__name__,) + self.__tag
)
# These is really a hotspot -- expose public "uniq" attribute to save on
# function calls
def __eq__(self, other): return self.uniq == other.uniq
def __ne__(self, other): return self.uniq != other.uniq
def __lt__(self, other): return self.uniq < other.uniq
def __le__(self, other): return self.uniq <= other.uniq
def __gt__(self, other): return self.uniq > other.uniq
def __ge__(self, other): return self.uniq >= other.uniq
def __hash__(self): return self.__hashedUniqTag
def __getitem__(self, idx): return self.__tag[idx]
def __and__(self, otherTag):
(tagClass, tagFormat, tagId) = otherTag
return self.__class__(
self.__tag&tagClass, self.__tag&tagFormat, self.__tag&tagId
)
def __or__(self, otherTag):
(tagClass, tagFormat, tagId) = otherTag
return self.__class__(
self.__tag[0]|tagClass,
self.__tag[1]|tagFormat,
self.__tag[2]|tagId
)
def asTuple(self): return self.__tag # __getitem__() is slow
class TagSet:
def __init__(self, baseTag=(), *superTags):
self.__baseTag = baseTag
self.__superTags = superTags
self.__hashedSuperTags = hash(superTags)
_uniq = ()
for t in superTags:
_uniq = _uniq + t.uniq
self.uniq = _uniq
self.__lenOfSuperTags = len(superTags)
def __str__(self):
return self.__superTags and '+'.join([str(x) for x in self.__superTags]) or '[untagged]'
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
'(), ' + ', '.join([repr(x) for x in self.__superTags])
)
def __add__(self, superTag):
return self.__class__(
self.__baseTag, *self.__superTags + (superTag,)
)
def __radd__(self, superTag):
return self.__class__(
self.__baseTag, *(superTag,) + self.__superTags
)
def tagExplicitly(self, superTag):
tagClass, tagFormat, tagId = superTag
if tagClass == tagClassUniversal:
raise error.PyAsn1Error(
'Can\'t tag with UNIVERSAL-class tag'
)
if tagFormat != tagFormatConstructed:
superTag = Tag(tagClass, tagFormatConstructed, tagId)
return self + superTag
def tagImplicitly(self, superTag):
tagClass, tagFormat, tagId = superTag
if self.__superTags:
superTag = Tag(tagClass, self.__superTags[-1][1], tagId)
return self[:-1] + superTag
def getBaseTag(self): return self.__baseTag
def __getitem__(self, idx):
if isinstance(idx, slice):
return self.__class__(
self.__baseTag, *getitem(self.__superTags, idx)
)
return self.__superTags[idx]
def __eq__(self, other): return self.uniq == other.uniq
def __ne__(self, other): return self.uniq != other.uniq
def __lt__(self, other): return self.uniq < other.uniq
def __le__(self, other): return self.uniq <= other.uniq
def __gt__(self, other): return self.uniq > other.uniq
def __ge__(self, other): return self.uniq >= other.uniq
def __hash__(self): return self.__hashedSuperTags
def __len__(self): return self.__lenOfSuperTags
def isSuperTagSetOf(self, tagSet):
if len(tagSet) < self.__lenOfSuperTags:
return
idx = self.__lenOfSuperTags - 1
while idx >= 0:
if self.__superTags[idx] != tagSet[idx]:
return
idx = idx - 1
return 1
def initTagSet(tag): return TagSet(tag, tag)
| gpl-3.0 |
diging/jars | cookies/operations.py | 1 | 16043 | from django.contrib.contenttypes.models import ContentType
from django.db.models import Q, QuerySet
from django.conf import settings
from cookies.models import *
from concepts.models import Concept
from cookies import authorization
import jsonpickle, datetime, copy, requests
from itertools import groupby, combinations
from collections import Counter
import networkx as nx
import os
from cookies.exceptions import *
logger = settings.LOGGER
def add_creation_metadata(resource, user):
"""
Convenience function for creating a provenance relation when a
:class:`.User` adds a :class:`.Resource`\.
Parameters
----------
resource : :class:`.Resource`
user : :class:`.User`
"""
__provenance__, _ = Field.objects.get_or_create(uri=settings.PROVENANCE)
_now = str(datetime.datetime.now())
_creation_message = u'Added by %s on %s' % (user.username, _now)
Relation.objects.create(**{
'source': resource,
'predicate': __provenance__,
'target': Value.objects.create(**{
'_value': jsonpickle.encode(_creation_message),
'container': resource.container,
}),
'container': resource.container,
})
def _transfer_all_relations(from_instance, to_instance, content_type):
"""
Transfers relations from one model instance to another.
Parameters
----------
from_instance : object
An instance of any model, usually a :class:`.Resource` or
:class:`.ConceptEntity`\.
to_instance :
content_type : :class:`.ContentType`
:class:`.ContentType` for the model of the instance that will inherit
relations.
"""
from_instance.relations_from.update(source_type=content_type,
source_instance_id=to_instance.id)
from_instance.relations_to.update(target_type=content_type,
target_instance_id=to_instance.id)
def prune_relations(resource, user=None):
"""
Search for and aggressively remove duplicate relations for a
:class:`.Resource`\.
Use at your own peril.
Parameters
----------
resource : :class:`.Resource`
user : :class:`.User`
If provided, data manipulation will be limited to by the authorizations
attached to a specific user. Default is ``None`` (superuser auths).
"""
value_type = ContentType.objects.get_for_model(Value)
def _search_and_destroy(relations):
def _delete_dupes(objs): # objs is an iterator of values() dicts.
for obj in objs[1:]: # Leave the first object.
Relation.objects.get(pk=obj[-1]).delete()
# We're looking for relations with the same predicate, whose
# complementary object is of the same type and is either identical or
# (if a Value) has the same value/content.
for pred, pr_relations in groupby(relations, lambda o: o[0]):
for ctype, ct_relations in groupby(pr_relations, lambda o: o[1]):
# We need to use this iterator twice, so we consume it now, and
# keep it around as a list.
ct_r = list(ct_relations)
for iid, id_relations in groupby(ct_relations, lambda o: o[2]):
_delete_dupes(list(id_relations)) # Target is the same.
if ctype != value_type.id: # Only applies to Value instances.
continue
values = Value.objects.filter(pk__in=zip(*ct_r)[2]) \
.order_by('id').values('id', '_value')
key = lambda *o: o[0][1]['_value']
for _, vl_r in groupby(sorted(zip(ct_r, values), key=key), key):
_delete_dupes(zip(*list(vl_r))[0])
fields = ['predicate_id', 'target_type', 'target_instance_id', 'id']
relations_from = resource.relations_from.all()
if user and type(resource) is Resource:
relations_from = authorization.apply_filter(ResourceAuthorization.EDIT, user, relations_from)
_search_and_destroy(relations_from.order_by(*fields).values_list(*fields))
fields = ['predicate_id', 'source_type', 'source_instance_id', 'id']
relations_to = resource.relations_to.all()
if user and type(resource) is Resource:
relations_to = authorization.apply_filter(ResourceAuthorization.EDIT, user, relations_to)
_search_and_destroy(relations_to.order_by(*fields).values_list(*fields))
def merge_conceptentities(entities, master_id=None, delete=True, user=None):
"""
Merge :class:`.ConceptEntity` instances in the QuerySet ``entities``.
As of 0.4, no :class:`.ConceptEntity` instances are deleted. Instead, they
are added to an :class:`.Identity` instance. ``master`` will become the
:prop:`.Identity.representative`\.
Parameters
----------
entities : QuerySet
master_id : int
(optional) The primary key of the :class:`.ConceptEntity` to use as the
"master" instance into which the remaining instances will be merged.
Returns
-------
master : :class:`.ConceptEntity`
Raises
------
RuntimeError
If less than two :class:`.ConceptEntity` instances are present in
``entities``, or if more than one unique :class:`.Concept` is
implicated.
"""
conceptentity_type = ContentType.objects.get_for_model(ConceptEntity)
if isinstance(entities, QuerySet):
_len = lambda qs: qs.count()
_uri = lambda qs: qs.values_list('concept__uri', flat=True)
_get_master = lambda qs, pk: qs.get(pk=pk)
_get_rep = lambda qs: qs.filter(represents__isnull=False).first()
_first = lambda qs: qs.first()
elif isinstance(entities, list):
_len = lambda qs: len(qs)
_uri = lambda qs: [concept.uri for obj in qs for concept in obj.concept.all()]#[getattr(o.concept, 'uri', None) for o in qs]
_get_master = lambda qs, pk: [e for e in entities if e.id == pk].pop()
_get_rep = lambda qs: [e for e in entities if e.represents.count() > 0].pop()
_first = lambda qs: qs[0]
if _len(entities) < 2:
raise RuntimeError("Need more than one ConceptEntity instance to merge")
# _concepts = list(set([v for v in _uri(entities) if v]))
# if len(_concepts) > 1:
# raise RuntimeError("Cannot merge two ConceptEntity instances with"
# " conflicting external concepts")
# _uri = _concepts[0] if _concepts else None
master = None
if master_id: # If a master is specified, use it...
try:
master = _get_master(entities, pk)
except:
pass
if not master:
# Prefer entities that are already representative.
try:
master = _get_rep(entities)
except:
pass
if not master:
try: # ...otherwise, try to use the first instance.
master = _first(entities)
except AssertionError: # If a slice has already been taken.
master = entities[0]
concepts = filter(lambda pk: pk is not None, entities.values_list('concept__id', flat=True))
if concepts:
master.concept.add(*Concept.objects.filter(pk__in=concepts))
master.save()
identity = Identity.objects.create(
created_by = user,
representative = master,
)
identity.entities.add(*entities)
map(lambda e: e.identities.update(representative=master), entities)
return master
def merge_resources(resources, master_id=None, delete=True, user=None):
"""
Merge selected resources to a single resource.
Parameters
-------------
resources : ``QuerySet``
The :class:`.Resource` instances that will be merged.
master_id : int
(optional) The primary key of the :class:`.Resource` to use as the
"master" instance into which the remaining instances will be merged.
Returns
-------
master : :class:`.Resource`
Raises
------
RuntimeError
If less than two :class:`.Resource` instances are present in
``resources``, or if :class:`.Resource` instances are not the
same with respect to content.
"""
resource_type = ContentType.objects.get_for_model(Resource)
if resources.count() < 2:
raise RuntimeError("Need more than one Resource instance to merge")
with_content = resources.filter(content_resource=True)
if with_content.count() != 0 and with_content.count() != resources.count():
raise RuntimeError("Cannot merge content and non-content resources")
if user is None:
user, _ = User.objects.get_or_create(username='AnonymousUser')
if master_id:
master = resources.get(pk=master_id)
else:
master = resources.first()
to_merge = resources.filter(~Q(pk=master.id))
for resource in to_merge:
_transfer_all_relations(resource, master, resource_type)
resource.content.all().update(for_resource=master)
for rel in ['resource_set', 'conceptentity_set', 'relation_set', 'content_relations', 'value_set']:
getattr(resource.container, rel).update(container_id=master.container.id)
# for collection in resource.part_of.all():
# master.part_of.add(collection)
prune_relations(master, user)
master.save()
if delete:
to_merge.delete()
return master
def add_resources_to_collection(resources, collection):
"""
Adds selected resources to a collection.
Number of resources should be greater than or equal to 1.
And one collection has to be selected
Returns the collection after making changes.
Parameters
-------------
resources : ``QuerySet``
The :class:`.Resource` instances that will be added to ``collection``.
collection : :class:`.Collection`
The :class:`.Collection` instance to which ``resources`` will be added.
Returns
---------
collection : :class:`.Collection`
Updated :class:`.Collection` instance.
Raises
------
RuntimeError
If less than one :class:`.Resource` instance is in queryset
or if collection is not a :class:`.ConceptEntity` instance
"""
if resources.count() < 1 :
raise RuntimeError("Need at least one resource to add to collection.")
if not isinstance(collection, Collection):
raise RuntimeError("Invalid collection to add resources to.")
collection.resources.add(*map(lambda r: r.container, resources))
collection.save()
return collection
def isolate_conceptentity(instance):
"""
Clone ``instance`` (and its relations) such that there is a separate
:class:`.ConceptEntity` instance for each related :class:`.Resource`\.
Prior to 0.3, merging involved actually combining records (and deleting all
but one). As of 0.4, merging does not result in deletion or combination,
but rather the reation of a :class:`.Identity`\.
Parameters
----------
instance : :class:`.ConceptEntity`
"""
if instance.relations_to.count() <= 1:
return
entities = []
for relation in instance.relations_to.all():
clone = copy.copy(instance)
clone.pk = None
clone.save()
relation.target = clone
relation.save()
for alt_relation in instance.relations_from.all():
alt_relation_target = alt_relation.target
cloned_relation_target = copy.copy(alt_relation_target)
cloned_relation_target.pk = None
cloned_relation_target.save()
cloned_relation = copy.copy(alt_relation)
cloned_relation.pk = None
cloned_relation.save()
cloned_relation.source = clone
cloned_relation.target = cloned_relation_target
cloned_relation.save()
entities.append(clone)
merge_conceptentities(entities, user=instance.created_by)
def generate_collection_coauthor_graph(collection,
author_predicate_uri="http://purl.org/net/biblio#authors"):
"""
Create a graph describing co-occurrences of :class:`.ConceptEntity`
instances linked to individual :class:`.Resource` instances via an
authorship :class:`.Relation` instance.
Parameters
----------
collection : :class:`.Collection`
author_predicate_uri : str
Defaults to the Biblio #authors predicate. This is the predicate that
will be used to identify author :class:`.Relation` instances.
Returns
-------
:class:`networkx.Graph`
Nodes will be :class:`.ConceptEntity` PK ids (int), edges will indicate
co-authorship; each edge should have a ``weight`` attribute indicating
the number of :class:`.Resource` instances on which the pair of CEs are
co-located.
"""
# This is a check to see if the collection parameter is an instance of the
# :class:`.Collection`. If it is not a RuntimeError exception is raised.
if not isinstance(collection, Collection):
raise RuntimeError("Invalid collection to export co-author data from")
resource_type_id = ContentType.objects.get_for_model(Resource).id
# This will hold node attributes for all ConceptEntity instances across the
# entire collection.
node_labels = {}
node_uris = {}
# Since a particular pair of ConceptEntity instances may co-occur on more
# than one Resource in this Collection, we compile the number of
# co-occurrences prior to building the networkx Graph object.
edges = Counter()
# The co-occurrence graph will be comprised of ConceptEntity instances
# (identified by their PK ids. An edge between two nodes indicates that
# the two constituent CEs occur together on the same Resource (with an
# author Relation). A ``weight`` attribute on each edge will record the
# number of Resource instances on which each respective pair of CEs
# co-occur.
for resource_id in collection.resourcecontainer_set.values_list('primary__id', flat=True):
# We only need a few columns from the ConceptEntity table, from rows
# referenced by responding Relations.
author_relations = Relation.objects\
.filter(source_type_id=resource_type_id,
source_instance_id=resource_id,
predicate__uri=author_predicate_uri)\
.prefetch_related('target')
# If there are no author relations, there are no nodes to be created for
# the resource.
if author_relations.count() <= 1:
continue
ids, labels, uris = zip(*list(set(((r.target.id, r.target.name, r.target.uri) for r in author_relations))))
# It doesn't matter if we overwrite node attribute values, since they
# won't vary.
node_labels.update(dict(zip(ids, labels)))
node_uris.update(dict(zip(ids, uris)))
# The keys here are ConceptEntity PK ids, which will be the primary
# identifiers used in the graph.
for edge in combinations(ids, 2):
edges[edge] += 1
# Instantiate the Graph from the edge data generated above.
graph = nx.Graph()
for (u, v), weight in edges.iteritems():
graph.add_edge(u, v, weight=weight)
# This is more efficient than setting the node attribute as we go along.
# If there is only one author, there is no need to set node attributes as
# there is no co-authorship for that Collection.
if len(node_labels.keys()) > 1:
nx.set_node_attributes(graph, 'label', node_labels)
nx.set_node_attributes(graph, 'uri', node_uris)
return graph
def ping_remote_resource(path):
"""
Check whether a remote resource is accessible.
"""
try:
response = requests.head(path)
except requests.exceptions.ConnectTimeout:
return False, {}
return response.status_code == requests.codes.ok, response.headers
| gpl-3.0 |
stefano-meschiari/SMESCHIA | .emacs.d/elpa/elpy-20140810.7/elpy/tests/test_pydocutils.py | 6 | 3370 | import os
import unittest
import shutil
import sys
import tempfile
import mock
import elpy.pydocutils
class TestGetPydocCompletions(unittest.TestCase):
def test_should_return_top_level_modules(self):
modules = elpy.pydocutils.get_pydoc_completions("")
self.assertIn('sys', modules)
self.assertIn('json', modules)
self.assertIn('elpy', modules)
def test_should_return_submodules(self):
modules = elpy.pydocutils.get_pydoc_completions("elpy")
self.assertIn("elpy.rpc", modules)
self.assertIn("elpy.server", modules)
modules = elpy.pydocutils.get_pydoc_completions("os")
self.assertIn("os.path", modules)
def test_should_find_objects_in_module(self):
self.assertIn("elpy.tests.test_pydocutils.TestGetPydocCompletions",
elpy.pydocutils.get_pydoc_completions
("elpy.tests.test_pydocutils"))
def test_should_find_attributes_of_objects(self):
attribs = elpy.pydocutils.get_pydoc_completions(
"elpy.tests.test_pydocutils.TestGetPydocCompletions")
self.assertIn("elpy.tests.test_pydocutils.TestGetPydocCompletions."
"test_should_find_attributes_of_objects",
attribs)
def test_should_return_none_for_inexisting_module(self):
self.assertEqual([],
elpy.pydocutils.get_pydoc_completions
("does_not_exist"))
def test_should_work_for_unicode_strings(self):
self.assertIsNotNone(elpy.pydocutils.get_pydoc_completions
(u"sys"))
def test_should_find_partial_completions(self):
self.assertIn("multiprocessing",
elpy.pydocutils.get_pydoc_completions
("multiprocess"))
self.assertIn("multiprocessing.util",
elpy.pydocutils.get_pydoc_completions
("multiprocessing.ut"))
def test_should_ignore_trailing_dot(self):
self.assertIn("elpy.pydocutils",
elpy.pydocutils.get_pydoc_completions
("elpy."))
class TestGetModules(unittest.TestCase):
def test_should_return_top_level_modules(self):
modules = elpy.pydocutils.get_modules()
self.assertIn('sys', modules)
self.assertIn('json', modules)
self.assertIn('elpy', modules)
def test_should_return_submodules(self):
modules = elpy.pydocutils.get_modules("elpy")
self.assertIn("rpc", modules)
self.assertIn("server", modules)
@mock.patch.object(elpy.pydocutils, 'safeimport')
def test_should_catch_import_errors(self, safeimport):
def raise_function(message):
raise elpy.pydocutils.ErrorDuringImport(message,
(None, None, None))
safeimport.side_effect = raise_function
self.assertEqual([], elpy.pydocutils.get_modules("foo.bar"))
def test_should_not_fail_for_permission_denied(self):
tmpdir = tempfile.mkdtemp(prefix="test-elpy-get-modules-")
sys.path.append(tmpdir)
os.chmod(tmpdir, 0o000)
try:
elpy.pydocutils.get_modules()
finally:
os.chmod(tmpdir, 0o755)
shutil.rmtree(tmpdir)
sys.path.remove(tmpdir)
| mit |
ToontownUprising/src | toontown/toonbase/ContentPacksManager.py | 3 | 4454 | from direct.directnotify.DirectNotifyGlobal import directNotify
import fnmatch
import os
from panda3d.core import Multifile, Filename, VirtualFileSystem
import yaml
APPLICABLE_FILE_PATTERNS = ('*.mf', 'ambience.yaml')
CONTENT_EXT_WHITELIST = ('.jpg', '.jpeg', '.rgb', '.png', '.ogg', '.ttf')
class ContentPackError(Exception):
pass
class ContentPacksManager:
notify = directNotify.newCategory('ContentPacksManager')
notify.setInfo(True)
def __init__(self, filepath='contentpacks/', sortFilename='sort.yaml'):
self.filepath = filepath
self.sortFilename = os.path.join(self.filepath, sortFilename)
if __debug__:
self.mountPoint = '../resources'
else:
self.mountPoint = '/'
self.vfs = VirtualFileSystem.getGlobalPtr()
self.sort = []
self.ambience = {}
def isApplicable(self, filename):
"""
Returns whether or not the specified file is applicable.
"""
# Does this file exist?
if not os.path.exists(os.path.join(self.filepath, filename)):
return False
# Does this file match one of the applicable file patterns?
basename = os.path.basename(filename)
for pattern in APPLICABLE_FILE_PATTERNS:
if fnmatch.fnmatch(basename, pattern):
return True
return False
def applyMultifile(self, filename):
"""
Apply the specified multifile.
"""
mf = Multifile()
mf.openReadWrite(Filename(os.path.join(self.filepath, filename)))
# Discard content with non-whitelisted extensions:
for subfileName in mf.getSubfileNames():
ext = os.path.splitext(subfileName)[1]
if ext not in CONTENT_EXT_WHITELIST:
mf.removeSubfile(subfileName)
self.vfs.mount(mf, self.mountPoint, 0)
def applyAmbience(self, filename):
"""
Apply the specified ambience configuration file.
"""
with open(os.path.join(self.filepath, filename), 'r') as f:
self.ambience.update(yaml.load(f) or {})
def apply(self, filename):
"""
Apply the specified content pack file.
"""
self.notify.info('Applying %s...' % filename)
basename = os.path.basename(filename)
if basename.endswith('.mf'):
self.applyMultifile(filename)
elif basename == 'ambience.yaml':
self.applyAmbience(filename)
def applyAll(self):
"""
Using the sort configuration, recursively apply all applicable content
pack files under the configured content packs directory.
"""
# First, read the sort configuration:
self.readSortConfig()
# Next, apply the sorted files:
for filename in self.sort[:]:
if self.isApplicable(filename):
self.apply(filename)
else:
self.notify.warning('Invalidating %s...' % filename)
self.sort.remove(filename)
# Apply the non-sorted files:
for root, _, filenames in os.walk(self.filepath):
root = root[len(self.filepath):]
for filename in filenames:
filename = os.path.join(root, filename).replace('\\', '/')
# Ensure this file isn't sorted:
if filename in self.sort:
continue
# Ensure this file is applicable:
if not self.isApplicable(filename):
continue
# Apply this file, and add it to the sort configuration:
self.apply(filename)
self.sort.append(filename)
# Finally, write the new sort configuration:
self.writeSortConfig()
def readSortConfig(self):
"""
Read the sort configuration.
"""
if not os.path.exists(self.sortFilename):
return
with open(self.sortFilename, 'r') as f:
self.sort = yaml.load(f) or []
def writeSortConfig(self):
"""
Write the sort configuration to disk.
"""
with open(self.sortFilename, 'w') as f:
for filename in self.sort:
f.write('- %s\n' % filename)
def getAmbience(self, group):
"""
Returns the ambience configurations for the specified group.
"""
return self.ambience.get(group, {})
| mit |
nesdis/djongo | tests/django_tests/tests/v22/tests/db_functions/text/test_trim.py | 71 | 1357 | from django.db.models import CharField
from django.db.models.functions import LTrim, RTrim, Trim
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import Author
class TrimTests(TestCase):
def test_trim(self):
Author.objects.create(name=' John ', alias='j')
Author.objects.create(name='Rhonda', alias='r')
authors = Author.objects.annotate(
ltrim=LTrim('name'),
rtrim=RTrim('name'),
trim=Trim('name'),
)
self.assertQuerysetEqual(
authors.order_by('alias'), [
('John ', ' John', 'John'),
('Rhonda', 'Rhonda', 'Rhonda'),
],
lambda a: (a.ltrim, a.rtrim, a.trim)
)
def test_trim_transform(self):
Author.objects.create(name=' John ')
Author.objects.create(name='Rhonda')
tests = (
(LTrim, 'John '),
(RTrim, ' John'),
(Trim, 'John'),
)
for transform, trimmed_name in tests:
with self.subTest(transform=transform):
with register_lookup(CharField, transform):
authors = Author.objects.filter(**{'name__%s' % transform.lookup_name: trimmed_name})
self.assertQuerysetEqual(authors, [' John '], lambda a: a.name)
| agpl-3.0 |
listamilton/supermilton.repository | script.module.youtube.dl/lib/youtube_dl/postprocessor/ffmpeg.py | 13 | 22506 | from __future__ import unicode_literals
import io
import os
import subprocess
import time
from .common import AudioConversionError, PostProcessor
from ..compat import (
compat_subprocess_get_DEVNULL,
)
from ..utils import (
encodeArgument,
encodeFilename,
get_exe_version,
is_outdated_version,
PostProcessingError,
prepend_extension,
shell_quote,
subtitles_filename,
dfxp2srt,
ISO639Utils,
)
EXT_TO_OUT_FORMATS = {
"aac": "adts",
"m4a": "ipod",
"mka": "matroska",
"mkv": "matroska",
"mpg": "mpeg",
"ogv": "ogg",
"ts": "mpegts",
"wma": "asf",
"wmv": "asf",
}
class FFmpegPostProcessorError(PostProcessingError):
pass
class FFmpegPostProcessor(PostProcessor):
def __init__(self, downloader=None):
PostProcessor.__init__(self, downloader)
self._determine_executables()
def check_version(self):
if not self.available:
raise FFmpegPostProcessorError('ffmpeg or avconv not found. Please install one.')
required_version = '10-0' if self.basename == 'avconv' else '1.0'
if is_outdated_version(
self._versions[self.basename], required_version):
warning = 'Your copy of %s is outdated, update %s to version %s or newer if you encounter any errors.' % (
self.basename, self.basename, required_version)
if self._downloader:
self._downloader.report_warning(warning)
@staticmethod
def get_versions(downloader=None):
return FFmpegPostProcessor(downloader)._versions
def _determine_executables(self):
programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
prefer_ffmpeg = False
self.basename = None
self.probe_basename = None
self._paths = None
self._versions = None
if self._downloader:
prefer_ffmpeg = self._downloader.params.get('prefer_ffmpeg', False)
location = self._downloader.params.get('ffmpeg_location')
if location is not None:
if not os.path.exists(location):
self._downloader.report_warning(
'ffmpeg-location %s does not exist! '
'Continuing without avconv/ffmpeg.' % (location))
self._versions = {}
return
elif not os.path.isdir(location):
basename = os.path.splitext(os.path.basename(location))[0]
if basename not in programs:
self._downloader.report_warning(
'Cannot identify executable %s, its basename should be one of %s. '
'Continuing without avconv/ffmpeg.' %
(location, ', '.join(programs)))
self._versions = {}
return None
location = os.path.dirname(os.path.abspath(location))
if basename in ('ffmpeg', 'ffprobe'):
prefer_ffmpeg = True
self._paths = dict(
(p, os.path.join(location, p)) for p in programs)
self._versions = dict(
(p, get_exe_version(self._paths[p], args=['-version']))
for p in programs)
if self._versions is None:
self._versions = dict(
(p, get_exe_version(p, args=['-version'])) for p in programs)
self._paths = dict((p, p) for p in programs)
if prefer_ffmpeg:
prefs = ('ffmpeg', 'avconv')
else:
prefs = ('avconv', 'ffmpeg')
for p in prefs:
if self._versions[p]:
self.basename = p
break
if prefer_ffmpeg:
prefs = ('ffprobe', 'avprobe')
else:
prefs = ('avprobe', 'ffprobe')
for p in prefs:
if self._versions[p]:
self.probe_basename = p
break
@property
def available(self):
return self.basename is not None
@property
def executable(self):
return self._paths[self.basename]
@property
def probe_available(self):
return self.probe_basename is not None
@property
def probe_executable(self):
return self._paths[self.probe_basename]
def run_ffmpeg_multiple_files(self, input_paths, out_path, opts):
self.check_version()
oldest_mtime = min(
os.stat(encodeFilename(path)).st_mtime for path in input_paths)
opts += self._configuration_args()
files_cmd = []
for path in input_paths:
files_cmd.extend([
encodeArgument('-i'),
encodeFilename(self._ffmpeg_filename_argument(path), True)
])
cmd = ([encodeFilename(self.executable, True), encodeArgument('-y')] +
files_cmd +
[encodeArgument(o) for o in opts] +
[encodeFilename(self._ffmpeg_filename_argument(out_path), True)])
if self._downloader.params.get('verbose', False):
self._downloader.to_screen('[debug] ffmpeg command line: %s' % shell_quote(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
stderr = stderr.decode('utf-8', 'replace')
msg = stderr.strip().split('\n')[-1]
raise FFmpegPostProcessorError(msg)
self.try_utime(out_path, oldest_mtime, oldest_mtime)
def run_ffmpeg(self, path, out_path, opts):
self.run_ffmpeg_multiple_files([path], out_path, opts)
def _ffmpeg_filename_argument(self, fn):
# Always use 'file:' because the filename may contain ':' (ffmpeg
# interprets that as a protocol) or can start with '-' (-- is broken in
# ffmpeg, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details)
# Also leave '-' intact in order not to break streaming to stdout.
return 'file:' + fn if fn != '-' else fn
class FFmpegExtractAudioPP(FFmpegPostProcessor):
def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, nopostoverwrites=False):
FFmpegPostProcessor.__init__(self, downloader)
if preferredcodec is None:
preferredcodec = 'best'
self._preferredcodec = preferredcodec
self._preferredquality = preferredquality
self._nopostoverwrites = nopostoverwrites
def get_audio_codec(self, path):
if not self.probe_available:
raise PostProcessingError('ffprobe or avprobe not found. Please install one.')
try:
cmd = [
encodeFilename(self.probe_executable, True),
encodeArgument('-show_streams'),
encodeFilename(self._ffmpeg_filename_argument(path), True)]
if self._downloader.params.get('verbose', False):
self._downloader.to_screen('[debug] %s command line: %s' % (self.basename, shell_quote(cmd)))
handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE, stdin=subprocess.PIPE)
output = handle.communicate()[0]
if handle.wait() != 0:
return None
except (IOError, OSError):
return None
audio_codec = None
for line in output.decode('ascii', 'ignore').split('\n'):
if line.startswith('codec_name='):
audio_codec = line.split('=')[1].strip()
elif line.strip() == 'codec_type=audio' and audio_codec is not None:
return audio_codec
return None
def run_ffmpeg(self, path, out_path, codec, more_opts):
if codec is None:
acodec_opts = []
else:
acodec_opts = ['-acodec', codec]
opts = ['-vn'] + acodec_opts + more_opts
try:
FFmpegPostProcessor.run_ffmpeg(self, path, out_path, opts)
except FFmpegPostProcessorError as err:
raise AudioConversionError(err.msg)
def run(self, information):
path = information['filepath']
filecodec = self.get_audio_codec(path)
if filecodec is None:
raise PostProcessingError('WARNING: unable to obtain file audio codec with ffprobe')
more_opts = []
if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'):
if filecodec == 'aac' and self._preferredcodec in ['m4a', 'best']:
# Lossless, but in another container
acodec = 'copy'
extension = 'm4a'
more_opts = ['-bsf:a', 'aac_adtstoasc']
elif filecodec in ['aac', 'mp3', 'vorbis', 'opus']:
# Lossless if possible
acodec = 'copy'
extension = filecodec
if filecodec == 'aac':
more_opts = ['-f', 'adts']
if filecodec == 'vorbis':
extension = 'ogg'
else:
# MP3 otherwise.
acodec = 'libmp3lame'
extension = 'mp3'
more_opts = []
if self._preferredquality is not None:
if int(self._preferredquality) < 10:
more_opts += ['-q:a', self._preferredquality]
else:
more_opts += ['-b:a', self._preferredquality + 'k']
else:
# We convert the audio (lossy)
acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'opus': 'opus', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec]
extension = self._preferredcodec
more_opts = []
if self._preferredquality is not None:
# The opus codec doesn't support the -aq option
if int(self._preferredquality) < 10 and extension != 'opus':
more_opts += ['-q:a', self._preferredquality]
else:
more_opts += ['-b:a', self._preferredquality + 'k']
if self._preferredcodec == 'aac':
more_opts += ['-f', 'adts']
if self._preferredcodec == 'm4a':
more_opts += ['-bsf:a', 'aac_adtstoasc']
if self._preferredcodec == 'vorbis':
extension = 'ogg'
if self._preferredcodec == 'wav':
extension = 'wav'
more_opts += ['-f', 'wav']
prefix, sep, ext = path.rpartition('.') # not os.path.splitext, since the latter does not work on unicode in all setups
new_path = prefix + sep + extension
# If we download foo.mp3 and convert it to... foo.mp3, then don't delete foo.mp3, silly.
if (new_path == path or
(self._nopostoverwrites and os.path.exists(encodeFilename(new_path)))):
self._downloader.to_screen('[ffmpeg] Post-process file %s exists, skipping' % new_path)
return [], information
try:
self._downloader.to_screen('[ffmpeg] Destination: ' + new_path)
self.run_ffmpeg(path, new_path, acodec, more_opts)
except AudioConversionError as e:
raise PostProcessingError(
'audio conversion failed: ' + e.msg)
except Exception:
raise PostProcessingError('error running ' + self.basename)
# Try to update the date time for extracted audio file.
if information.get('filetime') is not None:
self.try_utime(
new_path, time.time(), information['filetime'],
errnote='Cannot update utime of audio file')
information['filepath'] = new_path
information['ext'] = extension
return [path], information
class FFmpegVideoConvertorPP(FFmpegPostProcessor):
def __init__(self, downloader=None, preferedformat=None):
super(FFmpegVideoConvertorPP, self).__init__(downloader)
self._preferedformat = preferedformat
def run(self, information):
path = information['filepath']
if information['ext'] == self._preferedformat:
self._downloader.to_screen('[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat))
return [], information
options = []
if self._preferedformat == 'avi':
options.extend(['-c:v', 'libxvid', '-vtag', 'XVID'])
prefix, sep, ext = path.rpartition('.')
outpath = prefix + sep + self._preferedformat
self._downloader.to_screen('[' + 'ffmpeg' + '] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) + outpath)
self.run_ffmpeg(path, outpath, options)
information['filepath'] = outpath
information['format'] = self._preferedformat
information['ext'] = self._preferedformat
return [path], information
class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
def run(self, information):
if information['ext'] not in ('mp4', 'webm', 'mkv'):
self._downloader.to_screen('[ffmpeg] Subtitles can only be embedded in mp4, webm or mkv files')
return [], information
subtitles = information.get('requested_subtitles')
if not subtitles:
self._downloader.to_screen('[ffmpeg] There aren\'t any subtitles to embed')
return [], information
filename = information['filepath']
ext = information['ext']
sub_langs = []
sub_filenames = []
webm_vtt_warn = False
for lang, sub_info in subtitles.items():
sub_ext = sub_info['ext']
if ext != 'webm' or ext == 'webm' and sub_ext == 'vtt':
sub_langs.append(lang)
sub_filenames.append(subtitles_filename(filename, lang, sub_ext))
else:
if not webm_vtt_warn and ext == 'webm' and sub_ext != 'vtt':
webm_vtt_warn = True
self._downloader.to_screen('[ffmpeg] Only WebVTT subtitles can be embedded in webm files')
if not sub_langs:
return [], information
input_files = [filename] + sub_filenames
opts = [
'-map', '0',
'-c', 'copy',
# Don't copy the existing subtitles, we may be running the
# postprocessor a second time
'-map', '-0:s',
]
if information['ext'] == 'mp4':
opts += ['-c:s', 'mov_text']
for (i, lang) in enumerate(sub_langs):
opts.extend(['-map', '%d:0' % (i + 1)])
lang_code = ISO639Utils.short2long(lang)
if lang_code is not None:
opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code])
temp_filename = prepend_extension(filename, 'temp')
self._downloader.to_screen('[ffmpeg] Embedding subtitles in \'%s\'' % filename)
self.run_ffmpeg_multiple_files(input_files, temp_filename, opts)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return sub_filenames, information
class FFmpegMetadataPP(FFmpegPostProcessor):
def run(self, info):
metadata = {}
def add(meta_list, info_list=None):
if not info_list:
info_list = meta_list
if not isinstance(meta_list, (list, tuple)):
meta_list = (meta_list,)
if not isinstance(info_list, (list, tuple)):
info_list = (info_list,)
for info_f in info_list:
if info.get(info_f) is not None:
for meta_f in meta_list:
metadata[meta_f] = info[info_f]
break
add('title', ('track', 'title'))
add('date', 'upload_date')
add(('description', 'comment'), 'description')
add('purl', 'webpage_url')
add('track', 'track_number')
add('artist', ('artist', 'creator', 'uploader', 'uploader_id'))
add('genre')
add('album')
add('album_artist')
add('disc', 'disc_number')
if not metadata:
self._downloader.to_screen('[ffmpeg] There isn\'t any metadata to add')
return [], info
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
if info['ext'] == 'm4a':
options = ['-vn', '-acodec', 'copy']
else:
options = ['-c', 'copy']
for (name, value) in metadata.items():
options.extend(['-metadata', '%s=%s' % (name, value)])
self._downloader.to_screen('[ffmpeg] Adding metadata to \'%s\'' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return [], info
class FFmpegMergerPP(FFmpegPostProcessor):
def run(self, info):
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
args = ['-c', 'copy', '-map', '0:v:0', '-map', '1:a:0']
self._downloader.to_screen('[ffmpeg] Merging formats into "%s"' % filename)
self.run_ffmpeg_multiple_files(info['__files_to_merge'], temp_filename, args)
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return info['__files_to_merge'], info
def can_merge(self):
# TODO: figure out merge-capable ffmpeg version
if self.basename != 'avconv':
return True
required_version = '10-0'
if is_outdated_version(
self._versions[self.basename], required_version):
warning = ('Your copy of %s is outdated and unable to properly mux separate video and audio files, '
'youtube-dl will download single file media. '
'Update %s to version %s or newer to fix this.') % (
self.basename, self.basename, required_version)
if self._downloader:
self._downloader.report_warning(warning)
return False
return True
class FFmpegFixupStretchedPP(FFmpegPostProcessor):
def run(self, info):
stretched_ratio = info.get('stretched_ratio')
if stretched_ratio is None or stretched_ratio == 1:
return [], info
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
options = ['-c', 'copy', '-aspect', '%f' % stretched_ratio]
self._downloader.to_screen('[ffmpeg] Fixing aspect ratio in "%s"' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return [], info
class FFmpegFixupM4aPP(FFmpegPostProcessor):
def run(self, info):
if info.get('container') != 'm4a_dash':
return [], info
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
options = ['-c', 'copy', '-f', 'mp4']
self._downloader.to_screen('[ffmpeg] Correcting container in "%s"' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return [], info
class FFmpegFixupM3u8PP(FFmpegPostProcessor):
def run(self, info):
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
options = ['-c', 'copy', '-f', 'mp4', '-bsf:a', 'aac_adtstoasc']
self._downloader.to_screen('[ffmpeg] Fixing malformated aac bitstream in "%s"' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return [], info
class FFmpegSubtitlesConvertorPP(FFmpegPostProcessor):
def __init__(self, downloader=None, format=None):
super(FFmpegSubtitlesConvertorPP, self).__init__(downloader)
self.format = format
def run(self, info):
subs = info.get('requested_subtitles')
filename = info['filepath']
new_ext = self.format
new_format = new_ext
if new_format == 'vtt':
new_format = 'webvtt'
if subs is None:
self._downloader.to_screen('[ffmpeg] There aren\'t any subtitles to convert')
return [], info
self._downloader.to_screen('[ffmpeg] Converting subtitles')
sub_filenames = []
for lang, sub in subs.items():
ext = sub['ext']
if ext == new_ext:
self._downloader.to_screen(
'[ffmpeg] Subtitle file for %s is already in the requested'
'format' % new_ext)
continue
old_file = subtitles_filename(filename, lang, ext)
sub_filenames.append(old_file)
new_file = subtitles_filename(filename, lang, new_ext)
if ext == 'dfxp' or ext == 'ttml' or ext == 'tt':
self._downloader.report_warning(
'You have requested to convert dfxp (TTML) subtitles into another format, '
'which results in style information loss')
dfxp_file = old_file
srt_file = subtitles_filename(filename, lang, 'srt')
with io.open(dfxp_file, 'rt', encoding='utf-8') as f:
srt_data = dfxp2srt(f.read())
with io.open(srt_file, 'wt', encoding='utf-8') as f:
f.write(srt_data)
old_file = srt_file
subs[lang] = {
'ext': 'srt',
'data': srt_data
}
if new_ext == 'srt':
continue
else:
sub_filenames.append(srt_file)
self.run_ffmpeg(old_file, new_file, ['-f', new_format])
with io.open(new_file, 'rt', encoding='utf-8') as f:
subs[lang] = {
'ext': new_ext,
'data': f.read(),
}
return sub_filenames, info
| gpl-2.0 |
rosudrag/Freemium-winner | VirtualEnvironment/Lib/site-packages/pip-7.1.0-py3.4.egg/pip/_vendor/requests/packages/urllib3/connection.py | 483 | 9011 | import datetime
import sys
import socket
from socket import timeout as SocketTimeout
import warnings
from .packages import six
try: # Python 3
from http.client import HTTPConnection as _HTTPConnection, HTTPException
except ImportError:
from httplib import HTTPConnection as _HTTPConnection, HTTPException
class DummyConnection(object):
"Used to detect a failed ConnectionCls import."
pass
try: # Compiled with SSL?
HTTPSConnection = DummyConnection
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
ssl = None
class BaseSSLError(BaseException):
pass
try: # Python 3:
# Not a no-op, we're adding this to the namespace so it can be imported.
ConnectionError = ConnectionError
except NameError: # Python 2:
class ConnectionError(Exception):
pass
from .exceptions import (
ConnectTimeoutError,
SystemTimeWarning,
SecurityWarning,
)
from .packages.ssl_match_hostname import match_hostname
from .util.ssl_ import (
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
assert_fingerprint,
)
from .util import connection
port_by_scheme = {
'http': 80,
'https': 443,
}
RECENT_DATE = datetime.date(2014, 1, 1)
class HTTPConnection(_HTTPConnection, object):
"""
Based on httplib.HTTPConnection but provides an extra constructor
backwards-compatibility layer between older and newer Pythons.
Additional keyword parameters are used to configure attributes of the connection.
Accepted parameters include:
- ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
- ``source_address``: Set the source address for the current connection.
.. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
- ``socket_options``: Set specific options on the underlying socket. If not specified, then
defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
For example, if you wish to enable TCP Keep Alive in addition to the defaults,
you might pass::
HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
]
Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
"""
default_port = port_by_scheme['http']
#: Disable Nagle's algorithm by default.
#: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
#: Whether this connection verifies the host's certificate.
is_verified = False
def __init__(self, *args, **kw):
if six.PY3: # Python 3
kw.pop('strict', None)
# Pre-set source_address in case we have an older Python like 2.6.
self.source_address = kw.get('source_address')
if sys.version_info < (2, 7): # Python 2.6
# _HTTPConnection on Python 2.6 will balk at this keyword arg, but
# not newer versions. We can still use it when creating a
# connection though, so we pop it *after* we have saved it as
# self.source_address.
kw.pop('source_address', None)
#: The socket options provided by the user. If no options are
#: provided, we use the default options.
self.socket_options = kw.pop('socket_options', self.default_socket_options)
# Superclass also sets self.source_address in Python 2.7+.
_HTTPConnection.__init__(self, *args, **kw)
def _new_conn(self):
""" Establish a socket connection and set nodelay settings on it.
:return: New socket connection.
"""
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = connection.create_connection(
(self.host, self.port), self.timeout, **extra_kw)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
return conn
def _prepare_conn(self, conn):
self.sock = conn
# the _tunnel_host attribute was added in python 2.6.3 (via
# http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
# not have them.
if getattr(self, '_tunnel_host', None):
# TODO: Fix tunnel so it doesn't depend on self.sock state.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
class HTTPSConnection(HTTPConnection):
default_port = port_by_scheme['https']
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, **kw):
HTTPConnection.__init__(self, host, port, strict=strict,
timeout=timeout, **kw)
self.key_file = key_file
self.cert_file = cert_file
# Required property for Google AppEngine 1.9.0 which otherwise causes
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = 'https'
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
self.sock = ssl.wrap_socket(conn, self.key_file, self.cert_file)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ssl_version = None
assert_fingerprint = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None):
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def connect(self):
# Add certificate verification
conn = self._new_conn()
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
hostname = self.host
if getattr(self, '_tunnel_host', None):
# _tunnel_host was added in Python 2.6.3
# (See: http://hg.python.org/cpython/rev/0f57b30a152f)
self.sock = conn
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
# Override the host with the one we're requesting data from.
hostname = self._tunnel_host
is_time_off = datetime.date.today() < RECENT_DATE
if is_time_off:
warnings.warn((
'System time is way off (before {0}). This will probably '
'lead to SSL verification errors').format(RECENT_DATE),
SystemTimeWarning
)
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(conn, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
server_hostname=hostname,
ssl_version=resolved_ssl_version)
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
elif resolved_cert_reqs != ssl.CERT_NONE \
and self.assert_hostname is not False:
cert = self.sock.getpeercert()
if not cert.get('subjectAltName', ()):
warnings.warn((
'Certificate has no `subjectAltName`, falling back to check for a `commonName` for now. '
'This feature is being removed by major browsers and deprecated by RFC 2818. '
'(See https://github.com/shazow/urllib3/issues/497 for details.)'),
SecurityWarning
)
match_hostname(cert, self.assert_hostname or hostname)
self.is_verified = (resolved_cert_reqs == ssl.CERT_REQUIRED
or self.assert_fingerprint is not None)
if ssl:
# Make a copy for testing.
UnverifiedHTTPSConnection = HTTPSConnection
HTTPSConnection = VerifiedHTTPSConnection
else:
HTTPSConnection = DummyConnection
| mit |
bgarnaat/codewars_katas | src/python/6kyu/string_average/test_string_average.py | 1 | 1185 | """
TEST CASES:
Test.describe("Basic tests")
Test.assert_equals(average_string("zero nine five two"), "four")
Test.assert_equals(average_string("four six two three"), "three")
Test.assert_equals(average_string("one two three four five"), "three")
Test.assert_equals(average_string("five four"), "four")
Test.assert_equals(average_string("zero zero zero zero zero"), "zero")
Test.assert_equals(average_string("one one eight one"), "two")
Test.assert_equals(average_string("one"), "one")
Test.assert_equals(average_string(""), "n/a")
Test.assert_equals(average_string("ten"), "n/a")
Test.assert_equals(average_string("pippi"), "n/a")
"""
import pytest
TEST_CASES = [
("zero nine five two", "four"),
("four six two three", "three"),
("one two three four five", "three"),
("five four", "four"),
("zero zero zero zero zero", "zero"),
("one one eight one", "two"),
("one", "one"),
("", "n/a"),
("ten", "n/a"),
("pippi", "n/a"),
]
@pytest.mark.parametrize('test_input, test_output', TEST_CASES)
def test_string_average(test_input, test_output):
from string_average import average_string
assert average_string(test_input) == test_output
| mit |
DigitalSlideArchive/HistomicsTK | histomicstk/features/compute_intensity_features.py | 1 | 5874 | """Compute intensity features in labeled image."""
import numpy as np
import pandas as pd
import scipy.stats
from skimage.measure import regionprops
def compute_intensity_features(
im_label, im_intensity, num_hist_bins=10,
rprops=None, feature_list=None):
"""Calculate intensity features from an intensity image.
Parameters
----------
im_label : array_like
A labeled mask image wherein intensity of a pixel is the ID of the
object it belongs to. Non-zero values are considered to be foreground
objects.
im_intensity : array_like
Intensity image.
num_hist_bins: int, optional
Number of bins used to computed the intensity histogram of an object.
Histogram is used to energy and entropy features. Default is 10.
rprops : output of skimage.measure.regionprops, optional
rprops = skimage.measure.regionprops( im_label ). If rprops is not
passed then it will be computed inside which will increase the
computation time.
feature_list : list, default is None
list of intensity features to return.
If none, all intensity features are returned.
Returns
-------
fdata: pandas.DataFrame
A pandas dataframe containing the intensity features listed below for
each object/label.
Notes
-----
List of intensity features computed by this function:
Intensity.Min : float
Minimum intensity of object pixels.
Intensity.Max : float
Maximum intensity of object pixels.
Intensity.Mean : float
Mean intensity of object pixels
Intensity.Median : float
Median intensity of object pixels
Intensity.MeanMedianDiff : float
Difference between mean and median intensities of object pixels.
Intensity.Std : float
Standard deviation of the intensities of object pixels
Intensity.IQR: float
Inter-quartile range of the intensities of object pixels
Intensity.MAD: float
Median absolute deviation of the intensities of object pixels
Intensity.Skewness : float
Skewness of the intensities of object pixels. Value is 0 when all
intensity values are equal.
Intensity.Kurtosis : float
Kurtosis of the intensities of object pixels. Value is -3 when all
values are equal.
Intensity.HistEnergy : float
Energy of the intensity histogram of object pixels
Intensity.HistEntropy : float
Entropy of the intensity histogram of object pixels.
References
----------
.. [#] Daniel Zwillinger and Stephen Kokoska. "CRC standard probability
and statistics tables and formulae," Crc Press, 1999.
"""
default_feature_list = [
'Intensity.Min',
'Intensity.Max',
'Intensity.Mean',
'Intensity.Median',
'Intensity.MeanMedianDiff',
'Intensity.Std',
'Intensity.IQR',
'Intensity.MAD',
'Intensity.Skewness',
'Intensity.Kurtosis',
'Intensity.HistEnergy',
'Intensity.HistEntropy',
]
# List of feature names
if feature_list is None:
feature_list = default_feature_list
else:
assert all(j in default_feature_list for j in feature_list), \
"Some feature names are not recognized."
# compute object properties if not provided
if rprops is None:
rprops = regionprops(im_label)
# create pandas data frame containing the features for each object
numFeatures = len(feature_list)
numLabels = len(rprops)
fdata = pd.DataFrame(np.zeros((numLabels, numFeatures)),
columns=feature_list)
# conditionally execute calculations if x in the features list
def _conditional_execution(feature, func, *args, **kwargs):
if feature in feature_list:
fdata.at[i, feature] = func(*args, **kwargs)
def _return_input(x):
return x
for i in range(numLabels):
# get intensities of object pixels
pixelIntensities = np.sort(
im_intensity[rprops[i].coords[:, 0], rprops[i].coords[:, 1]]
)
# simple descriptors
meanIntensity = np.mean(pixelIntensities)
medianIntensity = np.median(pixelIntensities)
_conditional_execution('Intensity.Min', np.min, pixelIntensities)
_conditional_execution('Intensity.Max', np.max, pixelIntensities)
_conditional_execution('Intensity.Mean', _return_input, meanIntensity)
_conditional_execution(
'Intensity.Median', _return_input, medianIntensity)
_conditional_execution(
'Intensity.MeanMedianDiff', _return_input,
meanIntensity - medianIntensity)
_conditional_execution('Intensity.Std', np.std, pixelIntensities)
_conditional_execution(
'Intensity.Skewness', scipy.stats.skew, pixelIntensities)
_conditional_execution(
'Intensity.Kurtosis', scipy.stats.kurtosis, pixelIntensities)
# inter-quartile range
_conditional_execution(
'Intensity.IQR', scipy.stats.iqr, pixelIntensities)
# median absolute deviation
_conditional_execution(
'Intensity.MAD', np.median,
np.abs(pixelIntensities - medianIntensity))
# histogram-based features
if any(j in feature_list for j in [
'Intensity.HistEntropy', 'Intensity.HistEnergy']):
# compute intensity histogram
hist, bins = np.histogram(pixelIntensities, bins=num_hist_bins)
prob = hist/np.sum(hist, dtype=np.float32)
# entropy and energy
_conditional_execution(
'Intensity.HistEntropy', scipy.stats.entropy, prob)
_conditional_execution('Intensity.HistEnergy', np.sum, prob**2)
return fdata
| apache-2.0 |
Tigerwhit4/taiga-back | taiga/projects/custom_attributes/serializers.py | 18 | 5454 | # Copyright (C) 2015 Andrey Antukh <[email protected]>
# Copyright (C) 2015 Jesús Espino <[email protected]>
# Copyright (C) 2015 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.apps import apps
from django.utils.translation import ugettext_lazy as _
from taiga.base.fields import JsonField
from taiga.base.api.serializers import ValidationError
from taiga.base.api.serializers import ModelSerializer
from . import models
######################################################
# Custom Attribute Serializer
#######################################################
class BaseCustomAttributeSerializer(ModelSerializer):
class Meta:
read_only_fields = ('id',)
exclude = ('created_date', 'modified_date')
def _validate_integrity_between_project_and_name(self, attrs, source):
"""
Check the name is not duplicated in the project. Check when:
- create a new one
- update the name
- update the project (move to another project)
"""
data_id = attrs.get("id", None)
data_name = attrs.get("name", None)
data_project = attrs.get("project", None)
if self.object:
data_id = data_id or self.object.id
data_name = data_name or self.object.name
data_project = data_project or self.object.project
model = self.Meta.model
qs = (model.objects.filter(project=data_project, name=data_name)
.exclude(id=data_id))
if qs.exists():
raise ValidationError(_("Already exists one with the same name."))
return attrs
def validate_name(self, attrs, source):
return self._validate_integrity_between_project_and_name(attrs, source)
def validate_project(self, attrs, source):
return self._validate_integrity_between_project_and_name(attrs, source)
class UserStoryCustomAttributeSerializer(BaseCustomAttributeSerializer):
class Meta(BaseCustomAttributeSerializer.Meta):
model = models.UserStoryCustomAttribute
class TaskCustomAttributeSerializer(BaseCustomAttributeSerializer):
class Meta(BaseCustomAttributeSerializer.Meta):
model = models.TaskCustomAttribute
class IssueCustomAttributeSerializer(BaseCustomAttributeSerializer):
class Meta(BaseCustomAttributeSerializer.Meta):
model = models.IssueCustomAttribute
######################################################
# Custom Attribute Serializer
#######################################################
class BaseCustomAttributesValuesSerializer(ModelSerializer):
attributes_values = JsonField(source="attributes_values", label="attributes values")
_custom_attribute_model = None
_container_field = None
class Meta:
exclude = ("id",)
def validate_attributes_values(self, attrs, source):
# values must be a dict
data_values = attrs.get("attributes_values", None)
if self.object:
data_values = (data_values or self.object.attributes_values)
if type(data_values) is not dict:
raise ValidationError(_("Invalid content. It must be {\"key\": \"value\",...}"))
# Values keys must be in the container object project
data_container = attrs.get(self._container_field, None)
if data_container:
project_id = data_container.project_id
elif self.object:
project_id = getattr(self.object, self._container_field).project_id
else:
project_id = None
values_ids = list(data_values.keys())
qs = self._custom_attribute_model.objects.filter(project=project_id,
id__in=values_ids)
if qs.count() != len(values_ids):
raise ValidationError(_("It contain invalid custom fields."))
return attrs
class UserStoryCustomAttributesValuesSerializer(BaseCustomAttributesValuesSerializer):
_custom_attribute_model = models.UserStoryCustomAttribute
_container_model = "userstories.UserStory"
_container_field = "user_story"
class Meta(BaseCustomAttributesValuesSerializer.Meta):
model = models.UserStoryCustomAttributesValues
class TaskCustomAttributesValuesSerializer(BaseCustomAttributesValuesSerializer, ModelSerializer):
_custom_attribute_model = models.TaskCustomAttribute
_container_field = "task"
class Meta(BaseCustomAttributesValuesSerializer.Meta):
model = models.TaskCustomAttributesValues
class IssueCustomAttributesValuesSerializer(BaseCustomAttributesValuesSerializer, ModelSerializer):
_custom_attribute_model = models.IssueCustomAttribute
_container_field = "issue"
class Meta(BaseCustomAttributesValuesSerializer.Meta):
model = models.IssueCustomAttributesValues
| agpl-3.0 |
cirruscluster/cirruscluster | cirruscluster/ext/ansible/inventory/host.py | 1 | 2025 | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import cirruscluster.ext.ansible.constants as C
class Host(object):
''' a single ansible host '''
__slots__ = [ 'name', 'vars', 'groups' ]
def __init__(self, name=None, port=None):
self.name = name
self.vars = {}
self.groups = []
if port and port != C.DEFAULT_REMOTE_PORT:
self.set_variable('ansible_ssh_port', int(port))
if self.name is None:
raise Exception("host name is required")
def add_group(self, group):
self.groups.append(group)
def set_variable(self, key, value):
self.vars[key]=value
def get_groups(self):
groups = {}
for g in self.groups:
groups[g.name] = g
ancestors = g.get_ancestors()
for a in ancestors:
groups[a.name] = a
return groups.values()
def get_variables(self):
results = {}
groups = self.get_groups()
for group in sorted(groups, key=lambda g: g.depth):
results.update(group.get_variables())
results.update(self.vars)
results['inventory_hostname'] = self.name
results['inventory_hostname_short'] = self.name.split('.')[0]
results['group_names'] = sorted([ g.name for g in groups if g.name != 'all'])
return results
| mit |
balle/chaosmap | lib/cymruwhois.py | 1 | 7940 | #!/usr/bin/env python
import socket
import errno
try :
import memcache
HAVE_MEMCACHE = True
except ImportError:
HAVE_MEMCACHE = False
def iterwindow(l, slice=50):
"""Generate sublists from an iterator
>>> list(iterwindow(iter(range(10)),11))
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]]
>>> list(iterwindow(iter(range(10)),9))
[[0, 1, 2, 3, 4, 5, 6, 7, 8], [9]]
>>> list(iterwindow(iter(range(10)),5))
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
>>> list(iterwindow(iter(range(10)),3))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(iterwindow(iter(range(10)),1))
[[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]]
"""
assert(slice > 0)
a=[]
for x in l:
if len(a) >= slice :
yield a
a=[]
a.append(x)
if a:
yield a
class record:
def __init__(self, asn, ip, prefix, cc, owner):
def fix(x):
x = x.strip()
if x == "NA":
return None
return str(x.decode('ascii','ignore'))
self.asn = fix(asn)
self.ip = fix(ip)
self.prefix = fix(prefix)
self.cc = fix(cc)
self.owner = fix(owner)
def __str__(self):
return "%-10s %-16s %-16s %s '%s'" % (self.asn, self.ip, self.prefix, self.cc, self.owner)
def __repr__(self):
return "<%s instance: %s|%s|%s|%s|%s>" % (self.__class__, self.asn, self.ip, self.prefix, self.cc, self.owner)
class Client:
"""Python interface to whois.cymru.com
**Usage**
>>> import socket
>>> ip = socket.gethostbyname("www.google.com")
>>> from cymruwhois import Client
>>> c=Client()
>>> r=c.lookup(ip)
>>> print r.asn
15169
>>> print r.owner
GOOGLE - Google Inc.
>>>
>>> ip_ms = socket.gethostbyname("www.microsoft.com")
>>> for r in c.lookupmany([ip, ip_ms]):
... print r.owner
GOOGLE - Google Inc.
MICROSOFT-CORP---MSN-AS-BLOCK - Microsoft Corp
"""
KEY_FMT = "cymruwhois:ip:%s"
def __init__(self, host="whois.cymru.com", port=43, memcache_host='localhost:11211'):
self.host=host
self.port=port
self._connected=False
self.c = None
if HAVE_MEMCACHE and memcache_host:
self.c = memcache.Client([memcache_host])
def _connect(self):
self.socket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.socket.settimeout(5.0)
self.socket.connect((self.host,self.port))
self.socket.settimeout(1.0)
self.file = self.socket.makefile()
def _sendline(self, line):
self.file.write(line + "\r\n")
self.file.flush()
def _readline(self):
return self.file.readline()
def _disconnect(self):
self.file.close()
self.socket.close()
def read_and_discard(self):
self.socket.setblocking(0)
try :
try :
self.file.read(1024)
except socket.error, e:
if e.args[0]!=errno.EAGAIN:
raise
finally:
self.socket.setblocking(1)
def _begin(self):
"""Explicitly connect and send BEGIN to start the lookup process"""
self._connect()
self._sendline("BEGIN")
self._readline() #discard the message "Bulk mode; one IP per line. [2005-08-02 18:54:55 GMT]"
self._sendline("PREFIX")
self._sendline("COUNTRYCODE")
self._sendline("NOTRUNC")
self._connected=True
def disconnect(self):
"""Explicitly send END to stop the lookup process and disconnect"""
if not self._connected: return
self._sendline("END")
self._disconnect()
self._connected=False
def get_cached(self, ips):
if not self.c:
return {}
keys = [self.KEY_FMT % ip for ip in ips]
vals = self.c.get_multi(keys)
#convert cymruwhois:ip:1.2.3.4 into just 1.2.3.4
return dict((k.split(":")[-1], v) for k,v in vals.items())
def cache(self, r):
if not self.c:
return
self.c.set(self.KEY_FMT % r.ip, r, 60*60*6)
def lookup(self, ip):
"""Look up a single address. This function should not be called in
loop, instead call lookupmany"""
return list(self.lookupmany([ip]))[0]
def lookupmany(self, ips):
"""Look up many ip addresses"""
ips = [str(ip).strip() for ip in ips]
for batch in iterwindow(ips, 100):
cached = self.get_cached(batch)
not_cached = [ip for ip in batch if not cached.get(ip)]
#print "cached:%d not_cached:%d" % (len(cached), len(not_cached))
if not_cached:
for rec in self._lookupmany_raw(not_cached):
cached[rec.ip] = rec
for ip in batch:
if ip in cached:
yield cached[ip]
def lookupmany_dict(self, ips):
"""Look up many ip addresses, returning a dictionary of ip -> record"""
ips = set(ips)
return dict((r.ip, r) for r in self.lookupmany(ips))
def _lookupmany_raw(self, ips):
"""Do a look up for some ips"""
if not self._connected:
self._begin()
ips = set(ips)
for ip in ips:
self._sendline(ip)
need = len(ips)
last = None
while need:
result=self._readline()
if 'Error: no ASN or IP match on line' in result:
need -=1
continue
parts=result.split("|")
r=record(*parts)
#check for multiple records being returned for a single IP
#in this case, just skip any extra records
if last and r.ip == last.ip:
continue
self.cache(r)
yield r
last = r
need -=1
#skip any trailing records that might have been caused by multiple records for the last ip
self.read_and_discard()
#backwards compatibility
lookerupper = Client
def lookup_stdin():
from optparse import OptionParser
import fileinput
parser = OptionParser(usage = "usage: %prog [options] [files]")
parser.add_option("-d", "--delim", dest="delim", action="store", default=None,
help="delimiter to use instead of justified")
parser.add_option("-f", "--fields", dest="fields", action="append",
help="comma separated fields to include (asn,ip,prefix,cc,owner)")
if HAVE_MEMCACHE:
parser.add_option("-c", "--cache", dest="cache", action="store", default="localhost:11211",
help="memcache server (default localhost)")
parser.add_option("-n", "--no-cache", dest="cache", action="store_false",
help="don't use memcached")
else:
memcache_host = None
(options, args) = parser.parse_args()
#fix the fields: convert ['a,b','c'] into ['a','b','c'] if needed
fields = []
if options.fields:
for f in options.fields:
fields.extend(f.split(","))
else:
fields = 'asn ip prefix cc owner'.split()
#generate the format string
fieldwidths = {
'asn': 8,
'ip': 15,
'prefix': 18,
'cc': 2,
'owner': 0,
}
if options.delim:
format = options.delim.join("%%(%s)s" % f for f in fields)
else:
format = ' '.join("%%(%s)-%ds" % (f, fieldwidths[f]) for f in fields)
#setup the memcache option
if HAVE_MEMCACHE:
memcache_host = options.cache
if memcache_host and ':' not in memcache_host:
memcache_host += ":11211"
c=Client(memcache_host=memcache_host)
ips = []
for line in fileinput.input(args):
ip=line.strip()
ips.append(ip)
for r in c.lookupmany(ips):
print format % r.__dict__
if __name__ == "__main__":
lookup_stdin()
| gpl-3.0 |
appneta/boto | boto/rds/dbsecuritygroup.py | 185 | 6651 | # Copyright (c) 2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an DBSecurityGroup
"""
from boto.ec2.securitygroup import SecurityGroup
class DBSecurityGroup(object):
"""
Represents an RDS database security group
Properties reference available from the AWS documentation at
http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DeleteDBSecurityGroup.html
:ivar Status: The current status of the security group. Possible values are
[ active, ? ]. Reference documentation lacks specifics of possibilities
:ivar connection: :py:class:`boto.rds.RDSConnection` associated with the current object
:ivar description: The description of the security group
:ivar ec2_groups: List of :py:class:`EC2 Security Group
<boto.ec2.securitygroup.SecurityGroup>` objects that this security
group PERMITS
:ivar ip_ranges: List of :py:class:`boto.rds.dbsecuritygroup.IPRange`
objects (containing CIDR addresses) that this security group PERMITS
:ivar name: Name of the security group
:ivar owner_id: ID of the owner of the security group. Can be 'None'
"""
def __init__(self, connection=None, owner_id=None,
name=None, description=None):
self.connection = connection
self.owner_id = owner_id
self.name = name
self.description = description
self.ec2_groups = []
self.ip_ranges = []
def __repr__(self):
return 'DBSecurityGroup:%s' % self.name
def startElement(self, name, attrs, connection):
if name == 'IPRange':
cidr = IPRange(self)
self.ip_ranges.append(cidr)
return cidr
elif name == 'EC2SecurityGroup':
ec2_grp = EC2SecurityGroup(self)
self.ec2_groups.append(ec2_grp)
return ec2_grp
else:
return None
def endElement(self, name, value, connection):
if name == 'OwnerId':
self.owner_id = value
elif name == 'DBSecurityGroupName':
self.name = value
elif name == 'DBSecurityGroupDescription':
self.description = value
elif name == 'IPRanges':
pass
else:
setattr(self, name, value)
def delete(self):
return self.connection.delete_dbsecurity_group(self.name)
def authorize(self, cidr_ip=None, ec2_group=None):
"""
Add a new rule to this DBSecurity group.
You need to pass in either a CIDR block to authorize or
and EC2 SecurityGroup.
:type cidr_ip: string
:param cidr_ip: A valid CIDR IP range to authorize
:type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup`
:param ec2_group: An EC2 security group to authorize
:rtype: bool
:return: True if successful.
"""
if isinstance(ec2_group, SecurityGroup):
group_name = ec2_group.name
group_owner_id = ec2_group.owner_id
else:
group_name = None
group_owner_id = None
return self.connection.authorize_dbsecurity_group(self.name,
cidr_ip,
group_name,
group_owner_id)
def revoke(self, cidr_ip=None, ec2_group=None):
"""
Revoke access to a CIDR range or EC2 SecurityGroup.
You need to pass in either a CIDR block or
an EC2 SecurityGroup from which to revoke access.
:type cidr_ip: string
:param cidr_ip: A valid CIDR IP range to revoke
:type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup`
:param ec2_group: An EC2 security group to revoke
:rtype: bool
:return: True if successful.
"""
if isinstance(ec2_group, SecurityGroup):
group_name = ec2_group.name
group_owner_id = ec2_group.owner_id
return self.connection.revoke_dbsecurity_group(
self.name,
ec2_security_group_name=group_name,
ec2_security_group_owner_id=group_owner_id)
# Revoking by CIDR IP range
return self.connection.revoke_dbsecurity_group(
self.name, cidr_ip=cidr_ip)
class IPRange(object):
"""
Describes a CIDR address range for use in a DBSecurityGroup
:ivar cidr_ip: IP Address range
"""
def __init__(self, parent=None):
self.parent = parent
self.cidr_ip = None
self.status = None
def __repr__(self):
return 'IPRange:%s' % self.cidr_ip
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'CIDRIP':
self.cidr_ip = value
elif name == 'Status':
self.status = value
else:
setattr(self, name, value)
class EC2SecurityGroup(object):
"""
Describes an EC2 security group for use in a DBSecurityGroup
"""
def __init__(self, parent=None):
self.parent = parent
self.name = None
self.owner_id = None
def __repr__(self):
return 'EC2SecurityGroup:%s' % self.name
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'EC2SecurityGroupName':
self.name = value
elif name == 'EC2SecurityGroupOwnerId':
self.owner_id = value
else:
setattr(self, name, value)
| mit |
santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/jq-0.1-py2.5.egg/jq/queue/consumerend.py | 1 | 1699 | from twisted.internet.protocol import Protocol, ClientFactory
from twisted.internet import reactor, error
from twisted.python import log
from jq.common import VariablePacketProtocol
import pickle
import functools
class ConsumerClientProtocol(VariablePacketProtocol):
def connectionMade(self):
data = pickle.dumps((self.factory.job.type, self.factory.job.data))
self.sendPacket(data)
def packetRecieved(self, packetData):
error = pickle.loads(packetData)
self.factory.jobDone(error)
self.transport.loseConnection()
class ConsumerClientFactory(ClientFactory):
protocol = ConsumerClientProtocol
def __init__(self, job, callback):
self.job = job
self.callback = callback
def jobDone(self, error):
self.callback(error)
def clientConnectionLost(self, connector, reason):
log.msg('Lost connection. Reason: %s' % reason)
def clientConnectionFailed(self, connector, reason):
log.msg('Connection failed. Reason: %s' % reason)
class JobConsumer(object):
def performJob(job, onFinishClbk):
"""Performs the given Job, and call the onFinishCallback"""
raise NotImplementedError, "Dummy Implementation"
class TwistedJobConsumer(JobConsumer):
def __init__(self, host, port):
self.host = host
self.port = port
def performJob(self, job, onFinishClbk):
callback = functools.partial(onFinishClbk, self, job)
clientFactory = ConsumerClientFactory(job, callback)
reactor.connectTCP(self.host, self.port, clientFactory)
def __repr__(self):
return "<TwistedJobConsumer(host=%s, port=%s)>" % (self.host, self.port)
| bsd-3-clause |
ykim362/mxnet | example/image-classification/fine-tune.py | 38 | 3215 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import argparse
import logging
logging.basicConfig(level=logging.DEBUG)
from common import find_mxnet
from common import data, fit, modelzoo
import mxnet as mx
def get_fine_tune_model(symbol, arg_params, num_classes, layer_name):
"""
symbol: the pre-trained network symbol
arg_params: the argument parameters of the pre-trained model
num_classes: the number of classes for the fine-tune datasets
layer_name: the layer name before the last fully-connected layer
"""
all_layers = symbol.get_internals()
net = all_layers[layer_name+'_output']
net = mx.symbol.FullyConnected(data=net, num_hidden=num_classes, name='fc')
net = mx.symbol.SoftmaxOutput(data=net, name='softmax')
new_args = dict({k:arg_params[k] for k in arg_params if 'fc' not in k})
return (net, new_args)
if __name__ == "__main__":
# parse args
parser = argparse.ArgumentParser(description="fine-tune a dataset",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
train = fit.add_fit_args(parser)
data.add_data_args(parser)
aug = data.add_data_aug_args(parser)
parser.add_argument('--pretrained-model', type=str,
help='the pre-trained model')
parser.add_argument('--layer-before-fullc', type=str, default='flatten0',
help='the name of the layer before the last fullc layer')
# use less augmentations for fine-tune
data.set_data_aug_level(parser, 1)
# use a small learning rate and less regularizations
parser.set_defaults(image_shape='3,224,224', num_epochs=30,
lr=.01, lr_step_epochs='20', wd=0, mom=0)
args = parser.parse_args()
# load pretrained model
dir_path = os.path.dirname(os.path.realpath(__file__))
(prefix, epoch) = modelzoo.download_model(
args.pretrained_model, os.path.join(dir_path, 'model'))
if prefix is None:
(prefix, epoch) = (args.pretrained_model, args.load_epoch)
sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
# remove the last fullc layer
(new_sym, new_args) = get_fine_tune_model(
sym, arg_params, args.num_classes, args.layer_before_fullc)
# train
fit.fit(args = args,
network = new_sym,
data_loader = data.get_rec_iter,
arg_params = new_args,
aux_params = aux_params)
| apache-2.0 |
molebot/brython | www/src/Lib/encodings/iso8859_10.py | 37 | 13896 | """ Python Character Mapping Codec iso8859_10 generated from 'MAPPINGS/ISO8859/8859-10.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-10',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u0112' # 0xA2 -> LATIN CAPITAL LETTER E WITH MACRON
'\u0122' # 0xA3 -> LATIN CAPITAL LETTER G WITH CEDILLA
'\u012a' # 0xA4 -> LATIN CAPITAL LETTER I WITH MACRON
'\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE
'\u0136' # 0xA6 -> LATIN CAPITAL LETTER K WITH CEDILLA
'\xa7' # 0xA7 -> SECTION SIGN
'\u013b' # 0xA8 -> LATIN CAPITAL LETTER L WITH CEDILLA
'\u0110' # 0xA9 -> LATIN CAPITAL LETTER D WITH STROKE
'\u0160' # 0xAA -> LATIN CAPITAL LETTER S WITH CARON
'\u0166' # 0xAB -> LATIN CAPITAL LETTER T WITH STROKE
'\u017d' # 0xAC -> LATIN CAPITAL LETTER Z WITH CARON
'\xad' # 0xAD -> SOFT HYPHEN
'\u016a' # 0xAE -> LATIN CAPITAL LETTER U WITH MACRON
'\u014a' # 0xAF -> LATIN CAPITAL LETTER ENG
'\xb0' # 0xB0 -> DEGREE SIGN
'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
'\u0113' # 0xB2 -> LATIN SMALL LETTER E WITH MACRON
'\u0123' # 0xB3 -> LATIN SMALL LETTER G WITH CEDILLA
'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
'\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE
'\u0137' # 0xB6 -> LATIN SMALL LETTER K WITH CEDILLA
'\xb7' # 0xB7 -> MIDDLE DOT
'\u013c' # 0xB8 -> LATIN SMALL LETTER L WITH CEDILLA
'\u0111' # 0xB9 -> LATIN SMALL LETTER D WITH STROKE
'\u0161' # 0xBA -> LATIN SMALL LETTER S WITH CARON
'\u0167' # 0xBB -> LATIN SMALL LETTER T WITH STROKE
'\u017e' # 0xBC -> LATIN SMALL LETTER Z WITH CARON
'\u2015' # 0xBD -> HORIZONTAL BAR
'\u016b' # 0xBE -> LATIN SMALL LETTER U WITH MACRON
'\u014b' # 0xBF -> LATIN SMALL LETTER ENG
'\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK
'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
'\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA
'\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\u0168' # 0xD7 -> LATIN CAPITAL LETTER U WITH TILDE
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
'\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK
'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
'\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA
'\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\u0169' # 0xF7 -> LATIN SMALL LETTER U WITH TILDE
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
'\u0138' # 0xFF -> LATIN SMALL LETTER KRA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-3-clause |
debugger87/spark | examples/src/main/python/mllib/elementwise_product_example.py | 106 | 1756 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from pyspark import SparkContext
# $example on$
from pyspark.mllib.feature import ElementwiseProduct
from pyspark.mllib.linalg import Vectors
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="ElementwiseProductExample") # SparkContext
# $example on$
data = sc.textFile("data/mllib/kmeans_data.txt")
parsedData = data.map(lambda x: [float(t) for t in x.split(" ")])
# Create weight vector.
transformingVector = Vectors.dense([0.0, 1.0, 2.0])
transformer = ElementwiseProduct(transformingVector)
# Batch transform
transformedData = transformer.transform(parsedData)
# Single-row transform
transformedData2 = transformer.transform(parsedData.first())
# $example off$
print("transformedData:")
for each in transformedData.collect():
print(each)
print("transformedData2:")
for each in transformedData2:
print(each)
sc.stop()
| apache-2.0 |
origingod/hug | tests/module_fake.py | 10 | 1042 | """Fake HUG API module usable for testing importation of modules"""
import hug
@hug.directive(apply_globally=False)
def my_directive(default=None, **kwargs):
'''for testing'''
return default
@hug.default_input_format('application/made-up')
def made_up_formatter(data):
'''for testing'''
return data
@hug.default_output_format()
def output_formatter(data):
'''for testing'''
return hug.output_format.json(data)
@hug.get()
def made_up_api(hug_my_directive=True):
'''for testing'''
return hug_my_directive
@hug.directive(apply_globally=True)
def my_directive_global(default=None, **kwargs):
'''for testing'''
return default
@hug.default_input_format('application/made-up', apply_globally=True)
def made_up_formatter_global(data):
'''for testing'''
return data
@hug.default_output_format(apply_globally=True)
def output_formatter_global(data):
'''for testing'''
return hug.output_format.json(data)
@hug.request_middleware()
def handle_request(request, response):
return
| mit |
xmaruto/mcord | xos/tosca/resources/hpchealthcheck.py | 3 | 1219 | import importlib
import os
import pdb
import sys
import tempfile
sys.path.append("/opt/tosca")
from translator.toscalib.tosca_template import ToscaTemplate
import pdb
from services.hpc.models import HpcHealthCheck, HpcService
from xosresource import XOSResource
class XOSHpcHealthCheck(XOSResource):
provides = "tosca.nodes.HpcHealthCheck"
xos_model = HpcHealthCheck
name_field = None
copyin_props = ("kind", "resource_name", "result_contains")
def get_xos_args(self, throw_exception=True):
args = super(XOSHpcHealthCheck, self).get_xos_args()
service_name = self.get_requirement("tosca.relationships.MemberOfService", throw_exception=throw_exception)
if service_name:
args["hpcService"] = self.get_xos_object(HpcService, throw_exception=throw_exception, name=service_name)
return args
def get_existing_objs(self):
args = self.get_xos_args(throw_exception=True)
return list( HpcHealthCheck.objects.filter(hpcService=args["hpcService"], kind=args["kind"], resource_name=args["resource_name"]) )
def postprocess(self, obj):
pass
def can_delete(self, obj):
return super(XOSTenant, self).can_delete(obj)
| apache-2.0 |
datalogics-robb/scons | src/engine/SCons/cpp.py | 2 | 18411 | #
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
__doc__ = """
SCons C Pre-Processor module
"""
import SCons.compat
import os
import re
import string
#
# First "subsystem" of regular expressions that we set up:
#
# Stuff to turn the C preprocessor directives in a file's contents into
# a list of tuples that we can process easily.
#
# A table of regular expressions that fetch the arguments from the rest of
# a C preprocessor line. Different directives have different arguments
# that we want to fetch, using the regular expressions to which the lists
# of preprocessor directives map.
cpp_lines_dict = {
# Fetch the rest of a #if/#elif/#ifdef/#ifndef/#import/#include/
# #include_next line as one argument.
('if', 'elif', 'ifdef', 'ifndef', 'import', 'include', 'include_next',)
: '\s+(.+)',
# We don't care what comes after a #else or #endif line.
('else', 'endif',) : '',
# Fetch three arguments from a #define line:
# 1) The #defined keyword.
# 2) The optional parentheses and arguments (if it's a function-like
# macro, '' if it's not).
# 3) The expansion value.
('define',) : '\s+([_A-Za-z][_A-Za-z0-9_]+)(\([^)]*\))?\s*(.*)',
# Fetch the #undefed keyword from a #undef line.
('undef',) : '\s+([_A-Za-z][A-Za-z0-9_]+)',
}
# Create a table that maps each individual C preprocessor directive to
# the corresponding compiled regular expression that fetches the arguments
# we care about.
Table = {}
for op_list, expr in cpp_lines_dict.items():
e = re.compile(expr)
for op in op_list:
Table[op] = e
del e
del op
del op_list
# Create a list of the expressions we'll use to match all of the
# preprocessor directives. These are the same as the directives
# themselves *except* that we must use a negative lookahead assertion
# when matching "if" so it doesn't match the "if" in "ifdef."
override = {
'if' : 'if(?!def)',
}
l = map(lambda x, o=override: o.get(x, x), Table.keys())
# Turn the list of expressions into one big honkin' regular expression
# that will match all the preprocessor lines at once. This will return
# a list of tuples, one for each preprocessor line. The preprocessor
# directive will be the first element in each tuple, and the rest of
# the line will be the second element.
e = '^\s*#\s*(' + string.join(l, '|') + ')(.*)$'
# And last but not least, compile the expression.
CPP_Expression = re.compile(e, re.M)
#
# Second "subsystem" of regular expressions that we set up:
#
# Stuff to translate a C preprocessor expression (as found on a #if or
# #elif line) into an equivalent Python expression that we can eval().
#
# A dictionary that maps the C representation of Boolean operators
# to their Python equivalents.
CPP_to_Python_Ops_Dict = {
'!' : ' not ',
'!=' : ' != ',
'&&' : ' and ',
'||' : ' or ',
'?' : ' and ',
':' : ' or ',
'\r' : '',
}
CPP_to_Python_Ops_Sub = lambda m, d=CPP_to_Python_Ops_Dict: d[m.group(0)]
# We have to sort the keys by length so that longer expressions
# come *before* shorter expressions--in particular, "!=" must
# come before "!" in the alternation. Without this, the Python
# re module, as late as version 2.2.2, empirically matches the
# "!" in "!=" first, instead of finding the longest match.
# What's up with that?
l = CPP_to_Python_Ops_Dict.keys()
l.sort(lambda a, b: cmp(len(b), len(a)))
# Turn the list of keys into one regular expression that will allow us
# to substitute all of the operators at once.
expr = string.join(map(re.escape, l), '|')
# ...and compile the expression.
CPP_to_Python_Ops_Expression = re.compile(expr)
# A separate list of expressions to be evaluated and substituted
# sequentially, not all at once.
CPP_to_Python_Eval_List = [
['defined\s+(\w+)', '__dict__.has_key("\\1")'],
['defined\s*\((\w+)\)', '__dict__.has_key("\\1")'],
['/\*.*\*/', ''],
['/\*.*', ''],
['//.*', ''],
['(0x[0-9A-Fa-f]*)[UL]+', '\\1L'],
]
# Replace the string representations of the regular expressions in the
# list with compiled versions.
for l in CPP_to_Python_Eval_List:
l[0] = re.compile(l[0])
# Wrap up all of the above into a handy function.
def CPP_to_Python(s):
"""
Converts a C pre-processor expression into an equivalent
Python expression that can be evaluated.
"""
s = CPP_to_Python_Ops_Expression.sub(CPP_to_Python_Ops_Sub, s)
for expr, repl in CPP_to_Python_Eval_List:
s = expr.sub(repl, s)
return s
del expr
del l
del override
class FunctionEvaluator:
"""
Handles delayed evaluation of a #define function call.
"""
def __init__(self, name, args, expansion):
"""
Squirrels away the arguments and expansion value of a #define
macro function for later evaluation when we must actually expand
a value that uses it.
"""
self.name = name
self.args = function_arg_separator.split(args)
self.expansion = string.split(expansion, '##')
def __call__(self, *values):
"""
Evaluates the expansion of a #define macro function called
with the specified values.
"""
if len(self.args) != len(values):
raise ValueError, "Incorrect number of arguments to `%s'" % self.name
# Create a dictionary that maps the macro arguments to the
# corresponding values in this "call." We'll use this when we
# eval() the expansion so that arguments will get expanded to
# the right values.
locals = {}
for k, v in zip(self.args, values):
locals[k] = v
parts = []
for s in self.expansion:
if not s in self.args:
s = repr(s)
parts.append(s)
statement = string.join(parts, ' + ')
return eval(statement, globals(), locals)
# Find line continuations.
line_continuations = re.compile('\\\\\r?\n')
# Search for a "function call" macro on an expansion. Returns the
# two-tuple of the "function" name itself, and a string containing the
# arguments within the call parentheses.
function_name = re.compile('(\S+)\(([^)]*)\)')
# Split a string containing comma-separated function call arguments into
# the separate arguments.
function_arg_separator = re.compile(',\s*')
class PreProcessor:
"""
The main workhorse class for handling C pre-processing.
"""
def __init__(self, current='.', cpppath=[], dict={}, all=0):
global Table
self.searchpath = {
'"' : [current] + cpppath,
'<' : cpppath + [current],
}
# Initialize our C preprocessor namespace for tracking the
# values of #defined keywords. We use this namespace to look
# for keywords on #ifdef/#ifndef lines, and to eval() the
# expressions on #if/#elif lines (after massaging them from C to
# Python).
self.cpp_namespace = dict.copy()
self.cpp_namespace['__dict__'] = self.cpp_namespace
if all:
self.do_include = self.all_include
# For efficiency, a dispatch table maps each C preprocessor
# directive (#if, #define, etc.) to the method that should be
# called when we see it. We accomodate state changes (#if,
# #ifdef, #ifndef) by pushing the current dispatch table on a
# stack and changing what method gets called for each relevant
# directive we might see next at this level (#else, #elif).
# #endif will simply pop the stack.
d = {}
for op in Table.keys():
d[op] = getattr(self, 'do_' + op)
self.default_table = d
# Controlling methods.
def tupleize(self, contents):
"""
Turns the contents of a file into a list of easily-processed
tuples describing the CPP lines in the file.
The first element of each tuple is the line's preprocessor
directive (#if, #include, #define, etc., minus the initial '#').
The remaining elements are specific to the type of directive, as
pulled apart by the regular expression.
"""
global CPP_Expression, Table
contents = line_continuations.sub('', contents)
cpp_tuples = CPP_Expression.findall(contents)
return map(lambda m, t=Table:
(m[0],) + t[m[0]].match(m[1]).groups(),
cpp_tuples)
def __call__(self, contents):
"""
Pre-processes a file contents.
This is the main entry point, which
"""
self.stack = []
self.dispatch_table = self.default_table.copy()
self.tuples = self.tupleize(contents)
self.result = []
while self.tuples:
t = self.tuples.pop(0)
# Uncomment to see the list of tuples being processed (e.g.,
# to validate the CPP lines are being translated correctly).
#print t
self.dispatch_table[t[0]](t)
return self.result
# Dispatch table stack manipulation methods.
def save(self):
"""
Pushes the current dispatch table on the stack and re-initializes
the current dispatch table to the default.
"""
self.stack.append(self.dispatch_table)
self.dispatch_table = self.default_table.copy()
def restore(self):
"""
Pops the previous dispatch table off the stack and makes it the
current one.
"""
try: self.dispatch_table = self.stack.pop()
except IndexError: pass
# Utility methods.
def do_nothing(self, t):
"""
Null method for when we explicitly want the action for a
specific preprocessor directive to do nothing.
"""
pass
def eval_expression(self, t):
"""
Evaluates a C preprocessor expression.
This is done by converting it to a Python equivalent and
eval()ing it in the C preprocessor namespace we use to
track #define values.
"""
t = CPP_to_Python(string.join(t[1:]))
try: return eval(t, self.cpp_namespace)
except (NameError, TypeError): return 0
def find_include_file(self, t):
"""
Finds the #include file for a given preprocessor tuple.
"""
fname = t[2]
for d in self.searchpath[t[1]]:
f = os.path.join(d, fname)
if os.path.isfile(f):
return f
return None
# Start and stop processing include lines.
def start_handling_includes(self, t=None):
"""
Causes the PreProcessor object to start processing #import,
#include and #include_next lines.
This method will be called when a #if, #ifdef, #ifndef or #elif
evaluates True, or when we reach the #else in a #if, #ifdef,
#ifndef or #elif block where a condition already evaluated
False.
"""
d = self.dispatch_table
d['import'] = self.do_import
d['include'] = self.do_include
d['include_next'] = self.do_include
def stop_handling_includes(self, t=None):
"""
Causes the PreProcessor object to stop processing #import,
#include and #include_next lines.
This method will be called when a #if, #ifdef, #ifndef or #elif
evaluates False, or when we reach the #else in a #if, #ifdef,
#ifndef or #elif block where a condition already evaluated True.
"""
d = self.dispatch_table
d['import'] = self.do_nothing
d['include'] = self.do_nothing
d['include_next'] = self.do_nothing
# Default methods for handling all of the preprocessor directives.
# (Note that what actually gets called for a given directive at any
# point in time is really controlled by the dispatch_table.)
def _do_if_else_condition(self, condition):
"""
Common logic for evaluating the conditions on #if, #ifdef and
#ifndef lines.
"""
self.save()
d = self.dispatch_table
if condition:
self.start_handling_includes()
d['elif'] = self.stop_handling_includes
d['else'] = self.stop_handling_includes
else:
self.stop_handling_includes()
d['elif'] = self.do_elif
d['else'] = self.start_handling_includes
def do_ifdef(self, t):
"""
Default handling of a #ifdef line.
"""
self._do_if_else_condition(self.cpp_namespace.has_key(t[1]))
def do_ifndef(self, t):
"""
Default handling of a #ifndef line.
"""
self._do_if_else_condition(not self.cpp_namespace.has_key(t[1]))
def do_if(self, t):
"""
Default handling of a #if line.
"""
self._do_if_else_condition(self.eval_expression(t))
def do_elif(self, t):
"""
Default handling of a #elif line.
"""
d = self.dispatch_table
if self.eval_expression(t):
self.start_handling_includes()
d['elif'] = self.stop_handling_includes
d['else'] = self.stop_handling_includes
def do_else(self, t):
"""
Default handling of a #else line.
"""
pass
def do_endif(self, t):
"""
Default handling of a #endif line.
"""
self.restore()
def do_define(self, t):
"""
Default handling of a #define line.
"""
_, name, args, expansion = t
try:
expansion = int(expansion)
except (TypeError, ValueError):
pass
if args:
evaluator = FunctionEvaluator(name, args[1:-1], expansion)
self.cpp_namespace[name] = evaluator
else:
self.cpp_namespace[name] = expansion
def do_undef(self, t):
"""
Default handling of a #undef line.
"""
try: del self.cpp_namespace[t[1]]
except KeyError: pass
def do_import(self, t):
"""
Default handling of a #import line.
"""
# XXX finish this -- maybe borrow/share logic from do_include()...?
pass
def do_include(self, t):
"""
Default handling of a #include line.
"""
t = self.resolve_include(t)
include_file = self.find_include_file(t)
if include_file:
#print "include_file =", include_file
self.result.append(include_file)
contents = open(include_file).read()
new_tuples = self.tupleize(contents)
self.tuples[:] = new_tuples + self.tuples
# Date: Tue, 22 Nov 2005 20:26:09 -0500
# From: Stefan Seefeld <[email protected]>
#
# By the way, #include_next is not the same as #include. The difference
# being that #include_next starts its search in the path following the
# path that let to the including file. In other words, if your system
# include paths are ['/foo', '/bar'], and you are looking at a header
# '/foo/baz.h', it might issue an '#include_next <baz.h>' which would
# correctly resolve to '/bar/baz.h' (if that exists), but *not* see
# '/foo/baz.h' again. See http://www.delorie.com/gnu/docs/gcc/cpp_11.html
# for more reasoning.
#
# I have no idea in what context 'import' might be used.
# XXX is #include_next really the same as #include ?
do_include_next = do_include
# Utility methods for handling resolution of include files.
def resolve_include(self, t):
"""Resolve a tuple-ized #include line.
This handles recursive expansion of values without "" or <>
surrounding the name until an initial " or < is found, to handle
#include FILE
where FILE is a #define somewhere else.
"""
s = t[1]
while not s[0] in '<"':
#print "s =", s
try:
s = self.cpp_namespace[s]
except KeyError:
m = function_name.search(s)
s = self.cpp_namespace[m.group(1)]
if callable(s):
args = function_arg_separator.split(m.group(2))
s = apply(s, args)
if not s:
return None
return (t[0], s[0], s[1:-1])
def all_include(self, t):
"""
"""
self.result.append(self.resolve_include(t))
class DumbPreProcessor(PreProcessor):
"""A preprocessor that ignores all #if/#elif/#else/#endif directives
and just reports back *all* of the #include files (like the classic
SCons scanner did).
This is functionally equivalent to using a regular expression to
find all of the #include lines, only slower. It exists mainly as
an example of how the main PreProcessor class can be sub-classed
to tailor its behavior.
"""
def __init__(self, *args, **kw):
apply(PreProcessor.__init__, (self,)+args, kw)
d = self.default_table
for func in ['if', 'elif', 'else', 'endif', 'ifdef', 'ifndef']:
d[func] = d[func] = self.do_nothing
del __revision__
| mit |
petercable/mi-instrument | mi/core/test/test_persistent_store.py | 9 | 11196 | #!/usr/bin/env python
"""
@package mi.core.test.test_persistent_store
@file <git-workspace>/ooi/edex/com.raytheon.uf.ooi.plugin.instrumentagent/utility/edex_static/base/ooi/instruments/mi-instrument/mi/core/test/test_persistent_store.py
@author Johnathon Rusk
@brief Unit tests for PersistentStoreDict module
"""
# Note: Execute via, "nosetests -a UNIT -v mi/core/test/test_persistent_store.py"
__author__ = 'Johnathon Rusk'
__license__ = 'Apache 2.0'
from nose.plugins.attrib import attr
from mi.core.unit_test import MiUnitTest
import sys
from mi.core.persistent_store import PersistentStoreDict
@attr('UNIT', group='mi')
class TestPersistentStoreDict(MiUnitTest):
def setUp(self):
self.UNICODE_KEY = "UNICODE_KEY" # Test 'str' type key
self.UNICODE_VALUES = [u"this is a unicode string", u"this is another unicode string"]
self.INT_KEY = u"INT_KEY"
self.INT_VALUES = [1234, 5678]
self.LONG_KEY = "LONG_KEY" # Test 'str' type key
self.LONG_VALUES = [sys.maxint + 1, sys.maxint + 2]
self.FLOAT_KEY = u"FLOAT_KEY"
self.FLOAT_VALUES = [56.78, 12.34]
self.BOOL_KEY = "BOOL_KEY" # Test 'str' type key
self.BOOL_VALUES = [True, False]
self.DICT_KEY = u"DICT_KEY"
self.DICT_VALUES = [{u"KEY_1":1, u"KEY_2":2, u"KEY_3":3}, {u"KEY_4":4, u"KEY_5":5, u"KEY_6":6}]
self.LIST_KEY = "LIST_KEY" # Test 'str' type key
self.LIST_VALUES = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 0]]
self.persistentStoreDict = PersistentStoreDict("unit_test", "GI01SUMO-00001")
def tearDown(self):
self.persistentStoreDict.clear() # NOTE: This technically assumes the delete functionality works.
def helper_get(self, key, expectedValue, expectedValueType):
self.assertIn(type(key), [str, unicode])
value = self.persistentStoreDict[key]
self.assertIs(type(value), expectedValueType)
self.assertEqual(value, expectedValue)
def helper_set(self, key, value, valueType, shouldAddKey):
self.assertIn(type(key), [str, unicode])
self.assertIs(type(value), valueType)
self.assertIs(type(shouldAddKey), bool)
initialKeyCount = len(self.persistentStoreDict.keys())
self.persistentStoreDict[key] = value
self.assertEqual(len(self.persistentStoreDict.keys()), (initialKeyCount + 1) if shouldAddKey else initialKeyCount)
def helper_del(self, key):
self.assertIn(type(key), [str, unicode])
initialKeyCount = len(self.persistentStoreDict.keys())
del self.persistentStoreDict[key]
self.assertEqual(len(self.persistentStoreDict.keys()), initialKeyCount - 1)
def test_createRecords_success_unicode(self):
self.helper_set(self.UNICODE_KEY, self.UNICODE_VALUES[0], unicode, True)
def test_createRecords_success_int(self):
self.helper_set(self.INT_KEY, self.INT_VALUES[0], int, True)
def test_createRecords_success_long(self):
self.helper_set(self.LONG_KEY, self.LONG_VALUES[0], long, True)
def test_createRecords_success_float(self):
self.helper_set(self.FLOAT_KEY, self.FLOAT_VALUES[0], float, True)
def test_createRecords_success_bool(self):
self.helper_set(self.BOOL_KEY, self.BOOL_VALUES[0], bool, True)
def test_createRecords_success_dict(self):
self.helper_set(self.DICT_KEY, self.DICT_VALUES[0], dict, True)
def test_createRecords_success_list(self):
self.helper_set(self.LIST_KEY, self.LIST_VALUES[0], list, True)
def test_createRecords_fail_badKeyType(self):
key = 0
value = u"this will fail"
self.assertNotIn(type(key), [str, unicode])
self.assertIn(type(value), [unicode, int, long, float, bool, dict, list])
with self.assertRaises(TypeError) as contextManager:
self.persistentStoreDict[key] = value
self.assertEqual(contextManager.exception.args[0], "Key must be of type 'str' or 'unicode'.")
def test_createRecords_fail_badItemType(self):
key = u"this will fail"
value = 2+3j
self.assertIn(type(key), [str, unicode])
self.assertNotIn(type(value), [unicode, int, long, float, bool, dict, list])
with self.assertRaises(TypeError) as contextManager:
self.persistentStoreDict[key] = value
self.assertEqual(contextManager.exception.args[0], "Value must be of type: 'unicode', 'int', 'long', 'float', 'bool', 'dict', or 'list'")
def test_createRecords_fail_badItemType_nested(self):
key = u"this will fail"
value = {u"KEY_1":[1, 2, 3], u"KEY_2":[1+2j, 3+4j, 5+6j]}
self.assertIn(type(key), [str, unicode])
self.assertIn(type(value), [unicode, int, long, float, bool, dict, list])
self.assertNotIn(type(value[u'KEY_2'][0]), [unicode, int, long, float, bool, dict, list])
with self.assertRaises(TypeError) as contextManager:
self.persistentStoreDict[key] = value
self.assertEqual(contextManager.exception.args[0], "Value must be of type: 'unicode', 'int', 'long', 'float', 'bool', 'dict', or 'list'")
def test_getRecords_success_unicode(self):
self.helper_set(self.UNICODE_KEY, self.UNICODE_VALUES[0], unicode, True)
self.helper_get(self.UNICODE_KEY, self.UNICODE_VALUES[0], unicode)
def test_getRecords_success_int(self):
self.helper_set(self.INT_KEY, self.INT_VALUES[0], int, True)
self.helper_get(self.INT_KEY, self.INT_VALUES[0], int)
def test_getRecords_success_long(self):
self.helper_set(self.LONG_KEY, self.LONG_VALUES[0], long, True)
self.helper_get(self.LONG_KEY, self.LONG_VALUES[0], long)
def test_getRecords_success_float(self):
self.helper_set(self.FLOAT_KEY, self.FLOAT_VALUES[0], float, True)
self.helper_get(self.FLOAT_KEY, self.FLOAT_VALUES[0], float)
def test_getRecords_success_bool(self):
self.helper_set(self.BOOL_KEY, self.BOOL_VALUES[0], bool, True)
self.helper_get(self.BOOL_KEY, self.BOOL_VALUES[0], bool)
def test_getRecords_success_dict(self):
self.helper_set(self.DICT_KEY, self.DICT_VALUES[0], dict, True)
self.helper_get(self.DICT_KEY, self.DICT_VALUES[0], dict)
def test_getRecords_success_list(self):
self.helper_set(self.LIST_KEY, self.LIST_VALUES[0], list, True)
self.helper_get(self.LIST_KEY, self.LIST_VALUES[0], list)
def test_getRecords_fail_badKeyType(self):
key = 0
self.assertNotIn(type(key), [str, unicode])
with self.assertRaises(TypeError) as contextManager:
value = self.persistentStoreDict[key]
self.assertEqual(contextManager.exception.args[0], "Key must be of type 'str' or 'unicode'.")
def test_getRecords_fail_keyNotFound(self):
key = u"this will fail"
self.assertIn(type(key), [str, unicode])
with self.assertRaises(KeyError) as contextManager:
value = self.persistentStoreDict[key]
self.assertEqual(contextManager.exception.args[0], "No item found with key: '{0}'".format(key))
def test_updateRecords_success_unicode(self):
self.helper_set(self.UNICODE_KEY, self.UNICODE_VALUES[0], unicode, True)
self.helper_get(self.UNICODE_KEY, self.UNICODE_VALUES[0], unicode)
self.helper_set(self.UNICODE_KEY, self.UNICODE_VALUES[1], unicode, False)
self.helper_get(self.UNICODE_KEY, self.UNICODE_VALUES[1], unicode)
def test_updateRecords_success_int(self):
self.helper_set(self.INT_KEY, self.INT_VALUES[0], int, True)
self.helper_get(self.INT_KEY, self.INT_VALUES[0], int)
self.helper_set(self.INT_KEY, self.INT_VALUES[1], int, False)
self.helper_get(self.INT_KEY, self.INT_VALUES[1], int)
def test_updateRecords_success_long(self):
self.helper_set(self.LONG_KEY, self.LONG_VALUES[0], long, True)
self.helper_get(self.LONG_KEY, self.LONG_VALUES[0], long)
self.helper_set(self.LONG_KEY, self.LONG_VALUES[1], long, False)
self.helper_get(self.LONG_KEY, self.LONG_VALUES[1], long)
def test_updateRecords_success_float(self):
self.helper_set(self.FLOAT_KEY, self.FLOAT_VALUES[0], float, True)
self.helper_get(self.FLOAT_KEY, self.FLOAT_VALUES[0], float)
self.helper_set(self.FLOAT_KEY, self.FLOAT_VALUES[1], float, False)
self.helper_get(self.FLOAT_KEY, self.FLOAT_VALUES[1], float)
def test_updateRecords_success_bool(self):
self.helper_set(self.BOOL_KEY, self.BOOL_VALUES[0], bool, True)
self.helper_get(self.BOOL_KEY, self.BOOL_VALUES[0], bool)
self.helper_set(self.BOOL_KEY, self.BOOL_VALUES[1], bool, False)
self.helper_get(self.BOOL_KEY, self.BOOL_VALUES[1], bool)
def test_updateRecords_success_dict(self):
self.helper_set(self.DICT_KEY, self.DICT_VALUES[0], dict, True)
self.helper_get(self.DICT_KEY, self.DICT_VALUES[0], dict)
self.helper_set(self.DICT_KEY, self.DICT_VALUES[1], dict, False)
self.helper_get(self.DICT_KEY, self.DICT_VALUES[1], dict)
def test_updateRecords_success_list(self):
self.helper_set(self.LIST_KEY, self.LIST_VALUES[0], list, True)
self.helper_get(self.LIST_KEY, self.LIST_VALUES[0], list)
self.helper_set(self.LIST_KEY, self.LIST_VALUES[1], list, False)
self.helper_get(self.LIST_KEY, self.LIST_VALUES[1], list)
def test_removeRecords_success_unicode(self):
self.helper_set(self.UNICODE_KEY, self.UNICODE_VALUES[0], unicode, True)
self.helper_del(self.UNICODE_KEY)
def test_removeRecords_success_int(self):
self.helper_set(self.INT_KEY, self.INT_VALUES[0], int, True)
self.helper_del(self.INT_KEY)
def test_removeRecords_success_long(self):
self.helper_set(self.LONG_KEY, self.LONG_VALUES[0], long, True)
self.helper_del(self.LONG_KEY)
def test_removeRecords_success_float(self):
self.helper_set(self.FLOAT_KEY, self.FLOAT_VALUES[0], float, True)
self.helper_del(self.FLOAT_KEY)
def test_removeRecords_success_bool(self):
self.helper_set(self.BOOL_KEY, self.BOOL_VALUES[0], bool, True)
self.helper_del(self.BOOL_KEY)
def test_removeRecords_success_dict(self):
self.helper_set(self.DICT_KEY, self.DICT_VALUES[0], dict, True)
self.helper_del(self.DICT_KEY)
def test_removeRecords_success_list(self):
self.helper_set(self.LIST_KEY, self.LIST_VALUES[0], list, True)
self.helper_del(self.LIST_KEY)
def test_removeRecords_fail_badKeyType(self):
key = 0
self.assertNotIn(type(key), [str, unicode])
with self.assertRaises(TypeError) as contextManager:
del self.persistentStoreDict[key]
self.assertEqual(contextManager.exception.args[0], "Key must be of type 'str' or 'unicode'.")
def test_removeRecords_fail_keyNotFound(self):
key = u"this will fail"
self.assertIn(type(key), [str, unicode])
with self.assertRaises(KeyError) as contextManager:
del self.persistentStoreDict[key]
self.assertEqual(contextManager.exception.args[0], "No item found with key: '{0}'".format(key))
| bsd-2-clause |
edxnercel/edx-platform | common/lib/chem/chem/chemtools.py | 250 | 10721 | """This module originally includes functions for grading Vsepr problems.
Also, may be this module is the place for other chemistry-related grade functions. TODO: discuss it.
"""
import json
import unittest
import itertools
def vsepr_parse_user_answer(user_input):
"""
user_input is json generated by vsepr.js from dictionary.
There are must be only two keys in original user_input dictionary: "geometry" and "atoms".
Format: u'{"geometry": "AX3E0","atoms":{"c0": "B","p0": "F","p1": "B","p2": "F"}}'
Order of elements inside "atoms" subdict does not matters.
Return dict from parsed json.
"Atoms" subdict stores positions of atoms in molecule.
General types of positions:
c0 - central atom
p0..pN - peripheral atoms
a0..aN - axial atoms
e0..eN - equatorial atoms
Each position is dictionary key, i.e. user_input["atoms"]["c0"] is central atom, user_input["atoms"]["a0"] is one of axial atoms.
Special position only for AX6 (Octahedral) geometry:
e10, e12 - atom pairs opposite the central atom,
e20, e22 - atom pairs opposite the central atom,
e1 and e2 pairs lying crosswise in equatorial plane.
In user_input["atoms"] may be only 3 set of keys:
(c0,p0..pN),
(c0, a0..aN, e0..eN),
(c0, a0, a1, e10,e11,e20,e21) - if geometry is AX6.
"""
return json.loads(user_input)
def vsepr_build_correct_answer(geometry, atoms):
"""
geometry is string.
atoms is dict of atoms with proper positions.
Example:
correct_answer = vsepr_build_correct_answer(geometry="AX4E0", atoms={"c0": "N", "p0": "H", "p1": "(ep)", "p2": "H", "p3": "H"})
returns a dictionary composed from input values:
{'geometry': geometry, 'atoms': atoms}
"""
return {'geometry': geometry, 'atoms': atoms}
def vsepr_grade(user_input, correct_answer, convert_to_peripheral=False):
"""
This function does comparison between user_input and correct_answer.
Comparison is successful if all steps are successful:
1) geometries are equal
2) central atoms (index in dictionary 'c0') are equal
3):
In next steps there is comparing of corresponding subsets of atom positions: equatorial (e0..eN), axial (a0..aN) or peripheral (p0..pN)
If convert_to_peripheral is True, then axial and equatorial positions are converted to peripheral.
This means that user_input from:
"atoms":{"c0": "Br","a0": "test","a1": "(ep)","e10": "H","e11": "(ep)","e20": "H","e21": "(ep)"}}' after parsing to json
is converted to:
{"c0": "Br", "p0": "(ep)", "p1": "test", "p2": "H", "p3": "H", "p4": "(ep)", "p6": "(ep)"}
i.e. aX and eX -> pX
So if converted, p subsets are compared,
if not a and e subsets are compared
If all subsets are equal, grade succeeds.
There is also one special case for AX6 geometry.
In this case user_input["atoms"] contains special 3 symbol keys: e10, e12, e20, and e21.
Correct answer for this geometry can be of 3 types:
1) c0 and peripheral
2) c0 and axial and equatorial
3) c0 and axial and equatorial-subset-1 (e1X) and equatorial-subset-2 (e2X)
If correct answer is type 1 or 2, then user_input is converted from type 3 to type 2 (or to type 1 if convert_to_peripheral is True)
If correct_answer is type 3, then we done special case comparison. We have 3 sets of atoms positions both in user_input and correct_answer: axial, eq-1 and eq-2.
Answer will be correct if these sets are equals for one of permutations. For example, if :
user_axial = correct_eq-1
user_eq-1 = correct-axial
user_eq-2 = correct-eq-2
"""
if user_input['geometry'] != correct_answer['geometry']:
return False
if user_input['atoms']['c0'] != correct_answer['atoms']['c0']:
return False
if convert_to_peripheral:
# convert user_input from (a,e,e1,e2) to (p)
# correct_answer must be set in (p) using this flag
c0 = user_input['atoms'].pop('c0')
user_input['atoms'] = {'p' + str(i): v for i, v in enumerate(user_input['atoms'].values())}
user_input['atoms']['c0'] = c0
# special case for AX6
if 'e10' in correct_answer['atoms']: # need check e1x, e2x symmetry for AX6..
a_user = {}
a_correct = {}
for ea_position in ['a', 'e1', 'e2']: # collecting positions:
a_user[ea_position] = [v for k, v in user_input['atoms'].items() if k.startswith(ea_position)]
a_correct[ea_position] = [v for k, v in correct_answer['atoms'].items() if k.startswith(ea_position)]
correct = [sorted(a_correct['a'])] + [sorted(a_correct['e1'])] + [sorted(a_correct['e2'])]
for permutation in itertools.permutations(['a', 'e1', 'e2']):
if correct == [sorted(a_user[permutation[0]])] + [sorted(a_user[permutation[1]])] + [sorted(a_user[permutation[2]])]:
return True
return False
else: # no need to check e1x,e2x symmetry - convert them to ex
if 'e10' in user_input['atoms']: # e1x exists, it is AX6.. case
e_index = 0
for k, v in user_input['atoms'].items():
if len(k) == 3: # e1x
del user_input['atoms'][k]
user_input['atoms']['e' + str(e_index)] = v
e_index += 1
# common case
for ea_position in ['p', 'a', 'e']:
# collecting atoms:
a_user = [v for k, v in user_input['atoms'].items() if k.startswith(ea_position)]
a_correct = [v for k, v in correct_answer['atoms'].items() if k.startswith(ea_position)]
# print a_user, a_correct
if len(a_user) != len(a_correct):
return False
if sorted(a_user) != sorted(a_correct):
return False
return True
class Test_Grade(unittest.TestCase):
''' test grade function '''
def test_incorrect_geometry(self):
correct_answer = vsepr_build_correct_answer(geometry="AX4E0", atoms={"c0": "N", "p0": "H", "p1": "(ep)", "p2": "H", "p3": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX3E0","atoms":{"c0": "B","p0": "F","p1": "B","p2": "F"}}')
self.assertFalse(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_p(self):
correct_answer = vsepr_build_correct_answer(geometry="AX4E0", atoms={"c0": "N", "p0": "H", "p1": "(ep)", "p2": "H", "p3": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX4E0","atoms":{"c0": "N","p0": "H","p1": "(ep)","p2": "H", "p3": "H"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_ae(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "test", "a1": "(ep)", "e0": "H", "e1": "H", "e2": "(ep)", "e3": "(ep)"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "test","a1": "(ep)","e10": "H","e11": "H","e20": "(ep)","e21": "(ep)"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_ae_convert_to_p_but_input_not_in_p(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "test", "e0": "H", "e1": "H", "e2": "(ep)", "e3": "(ep)"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "test","a1": "(ep)","e10": "H","e11": "(ep)","e20": "H","e21": "(ep)"}}')
self.assertFalse(vsepr_grade(user_answer, correct_answer, convert_to_peripheral=True))
def test_correct_answer_ae_convert_to_p(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "p0": "(ep)", "p1": "test", "p2": "H", "p3": "H", "p4": "(ep)", "p6": "(ep)"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "test","a1": "(ep)","e10": "H","e11": "(ep)","e20": "H","e21": "(ep)"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer, convert_to_peripheral=True))
def test_correct_answer_e1e2_in_a(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "(ep)", "e10": "H", "e11": "H", "e20": "H", "e21": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "(ep)","a1": "(ep)","e10": "H","e11": "H","e20": "H","e21": "H"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_e1e2_in_e1(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "(ep)", "e10": "H", "e11": "H", "e20": "H", "e21": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "H","a1": "H","e10": "(ep)","e11": "(ep)","e20": "H","e21": "H"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_e1e2_in_e2(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "(ep)", "e10": "H", "e11": "H", "e20": "H", "e21": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "H","a1": "H","e10": "H","e11": "H","e20": "(ep)","e21": "(ep)"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_incorrect_answer_e1e2(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "(ep)", "e10": "H", "e11": "H", "e20": "H", "e21": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "H","a1": "H","e10": "(ep)","e11": "H","e20": "H","e21": "(ep)"}}')
self.assertFalse(vsepr_grade(user_answer, correct_answer))
def test_incorrect_c0(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "test", "e0": "H", "e1": "H", "e2": "H", "e3": "(ep)"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "H","a0": "test","a1": "(ep)","e0": "H","e1": "H","e2": "(ep)","e3": "H"}}')
self.assertFalse(vsepr_grade(user_answer, correct_answer))
def suite():
testcases = [Test_Grade]
suites = []
for testcase in testcases:
suites.append(unittest.TestLoader().loadTestsFromTestCase(testcase))
return unittest.TestSuite(suites)
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=2).run(suite())
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.