repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
Varriount/Colliberation
|
libs/construct/lib/container.py
|
5
|
6148
|
"""
Various containers.
"""
def recursion_lock(retval, lock_name = "__recursion_lock__"):
def decorator(func):
def wrapper(self, *args, **kw):
if getattr(self, lock_name, False):
return retval
setattr(self, lock_name, True)
try:
return func(self, *args, **kw)
finally:
setattr(self, lock_name, False)
wrapper.__name__ = func.__name__
return wrapper
return decorator
class Container(dict):
"""
A generic container of attributes.
Containers are the common way to express parsed data.
"""
__slots__ = ["__keys_order__"]
def __init__(self, **kw):
object.__setattr__(self, "__keys_order__", [])
for k, v in kw.items():
self[k] = v
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setitem__(self, key, val):
if key not in self:
self.__keys_order__.append(key)
dict.__setitem__(self, key, val)
def __delitem__(self, key):
dict.__delitem__(self, key)
self.__keys_order__.remove(key)
__delattr__ = __delitem__
__setattr__ = __setitem__
def clear(self):
dict.clear(self)
del self.__keys_order__[:]
def pop(self, key, *default):
val = dict.pop(self, key, *default)
self.__keys_order__.remove(key)
return val
def popitem(self):
k, v = dict.popitem(self)
self.__keys_order__.remove(k)
return k, v
def update(self, seq, **kw):
if hasattr(seq, "keys"):
for k in seq.keys():
self[k] = seq[k]
else:
for k, v in seq:
self[k] = v
dict.update(self, kw)
def copy(self):
inst = self.__class__()
inst.update(self.iteritems())
return inst
__update__ = update
__copy__ = copy
def __iter__(self):
return iter(self.__keys_order__)
iterkeys = __iter__
def itervalues(self):
return (self[k] for k in self.__keys_order__)
def iteritems(self):
return ((k, self[k]) for k in self.__keys_order__)
def keys(self):
return self.__keys_order__
def values(self):
return list(self.itervalues())
def items(self):
return list(self.iteritems())
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, dict.__repr__(self))
@recursion_lock("<...>")
def __pretty_str__(self, nesting = 1, indentation = " "):
attrs = []
ind = indentation * nesting
for k, v in self.iteritems():
if not k.startswith("_"):
text = [ind, k, " = "]
if hasattr(v, "__pretty_str__"):
text.append(v.__pretty_str__(nesting + 1, indentation))
else:
text.append(repr(v))
attrs.append("".join(text))
if not attrs:
return "%s()" % (self.__class__.__name__,)
attrs.insert(0, self.__class__.__name__ + ":")
return "\n".join(attrs)
__str__ = __pretty_str__
class FlagsContainer(Container):
"""
A container providing pretty-printing for flags.
Only set flags are displayed.
"""
@recursion_lock("<...>")
def __pretty_str__(self, nesting = 1, indentation = " "):
attrs = []
ind = indentation * nesting
for k in self.keys():
v = self.__dict__[k]
if not k.startswith("_") and v:
attrs.append(ind + k)
if not attrs:
return "%s()" % (self.__class__.__name__,)
attrs.insert(0, self.__class__.__name__+ ":")
return "\n".join(attrs)
class ListContainer(list):
"""
A container for lists.
"""
__slots__ = ["__recursion_lock__"]
def __str__(self):
return self.__pretty_str__()
@recursion_lock("[...]")
def __pretty_str__(self, nesting = 1, indentation = " "):
if not self:
return "[]"
ind = indentation * nesting
lines = ["["]
for elem in self:
lines.append("\n")
lines.append(ind)
if hasattr(elem, "__pretty_str__"):
lines.append(elem.__pretty_str__(nesting + 1, indentation))
else:
lines.append(repr(elem))
lines.append("\n")
lines.append(indentation * (nesting - 1))
lines.append("]")
return "".join(lines)
class LazyContainer(object):
__slots__ = ["subcon", "stream", "pos", "context", "_value"]
def __init__(self, subcon, stream, pos, context):
self.subcon = subcon
self.stream = stream
self.pos = pos
self.context = context
self._value = NotImplemented
def __eq__(self, other):
try:
return self._value == other._value
except AttributeError:
return False
def __ne__(self, other):
return not (self == other)
def __str__(self):
return self.__pretty_str__()
def __pretty_str__(self, nesting = 1, indentation = " "):
if self._value is NotImplemented:
text = "<unread>"
elif hasattr(self._value, "__pretty_str__"):
text = self._value.__pretty_str__(nesting, indentation)
else:
text = str(self._value)
return "%s: %s" % (self.__class__.__name__, text)
def read(self):
self.stream.seek(self.pos)
return self.subcon._parse(self.stream, self.context)
def dispose(self):
self.subcon = None
self.stream = None
self.context = None
self.pos = None
def _get_value(self):
if self._value is NotImplemented:
self._value = self.read()
return self._value
value = property(_get_value)
has_value = property(lambda self: self._value is not NotImplemented)
if __name__ == "__main__":
c = Container(x=5)
c.y = 8
c.z = 9
c.w = 10
c.foo = 5
print (c)
|
mit
| -176,546,604,266,093,000 | -3,125,382,908,221,788,700 | 26.446429 | 75 | 0.500488 | false |
alexryndin/ambari
|
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py
|
3
|
3758
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import os
from resource_management import *
class ECSClient(Script):
def install(self, env):
self.install_packages(env)
self.configure(env)
def configure(self, env):
self.setup_config(env)
self.setup_hadoop_env(env)
def createdirs(self, env):
self.create_dirs(env)
def status(self, env):
raise ClientComponentHasNoStatus()
def setup_config(self, env):
import params
env.set_params(params)
stackversion = params.stack_version_unformatted
XmlConfig("core-site.xml",
conf_dir=params.hadoop_conf_dir,
configurations=params.config['configurations']['core-site'],
configuration_attributes=params.config['configuration_attributes']['core-site'],
owner=params.hdfs_user,
group=params.user_group,
only_if=format("ls {hadoop_conf_dir}"))
XmlConfig("hdfs-site.xml",
conf_dir=params.hadoop_conf_dir,
configurations=params.config['configurations']['hdfs-site'],
configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
owner=params.hdfs_user,
group=params.user_group,
only_if=format("ls {hadoop_conf_dir}"))
File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
mode=0644,
content=StaticFile("/var/lib/ambari-agent/cache/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar")
)
def setup_hadoop_env(self, env):
import params
env.set_params(params)
stackversion = params.stack_version_unformatted
if params.security_enabled:
tc_owner = "root"
else:
tc_owner = params.hdfs_user
# create /etc/hadoop
Directory(params.hadoop_dir, mode=0755)
# write out hadoop-env.sh, but only if the directory exists
if os.path.exists(params.hadoop_conf_dir):
File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'), owner=tc_owner,
group=params.user_group,
content=InlineTemplate(params.hadoop_env_sh_template))
# Create tmp dir for java.io.tmpdir
# Handle a situation when /tmp is set to noexec
Directory(params.hadoop_java_io_tmpdir,
owner=params.hdfs_user,
group=params.user_group,
mode=0777
)
def create_dirs(self,env):
import params
env.set_params(params)
params.HdfsResource(params.hdfs_tmp_dir,
type="directory",
action="create_on_execute",
owner=params.hdfs_user,
mode=0777
)
params.HdfsResource(params.smoke_hdfs_user_dir,
type="directory",
action="create_on_execute",
owner=params.smoke_user,
mode=params.smoke_hdfs_user_mode
)
params.HdfsResource(None,
action="execute"
)
if __name__ == "__main__":
ECSClient().execute()
|
apache-2.0
| -1,005,585,608,896,541,700 | 3,647,404,360,136,628,000 | 32.553571 | 125 | 0.645556 | false |
Revanth47/addons-server
|
src/olympia/api/tests/test_commands.py
|
5
|
4643
|
import os.path
from StringIO import StringIO
from django.core.management import call_command
from django.conf import settings
from olympia.amo.tests import TestCase, user_factory
from olympia.api.models import APIKey
class TestRevokeAPIKeys(TestCase):
def setUp(self):
self.csv_path = os.path.join(
settings.ROOT, 'src', 'olympia', 'api', 'tests', 'assets',
'test-revoke-api-keys.csv')
def test_api_key_does_not_exist(self):
user = user_factory()
# The test csv does not contain an entry for this user.
apikey = APIKey.new_jwt_credentials(user=user)
old_secret = apikey.secret
stdout = StringIO()
call_command('revoke_api_keys', self.csv_path, stdout=stdout)
stdout.seek(0)
output = stdout.readlines()
assert output[0] == (
'Ignoring APIKey user:12345:666, it does not exist.\n')
assert output[1] == (
'Ignoring APIKey user:67890:333, it does not exist.\n')
# APIKey is still active, secret hasn't changed, there are no
# additional APIKeys.
apikey.reload()
assert apikey.secret == old_secret
assert apikey.is_active
assert APIKey.objects.filter(user=user).count() == 1
def test_api_key_already_inactive(self):
user = user_factory(id=67890)
# The test csv contains an entry with this user and the "right" secret.
right_secret = (
'ab2228544a061cb2af21af97f637cc58e1f8340196f1ddc3de329b5974694b26')
apikey = APIKey.objects.create(
key='user:{}:{}'.format(user.pk, '333'), secret=right_secret,
user=user, is_active=False) # inactive APIKey.
stdout = StringIO()
call_command('revoke_api_keys', self.csv_path, stdout=stdout)
stdout.seek(0)
output = stdout.readlines()
assert output[0] == (
'Ignoring APIKey user:12345:666, it does not exist.\n')
assert output[1] == (
'Ignoring APIKey user:67890:333, it does not exist.\n')
# APIKey is still active, secret hasn't changed, there are no
# additional APIKeys.
apikey.reload()
assert apikey.secret == right_secret
assert not apikey.is_active
assert APIKey.objects.filter(user=user).count() == 1
def test_api_key_has_wrong_secret(self):
user = user_factory(id=12345)
# The test csv contains an entry with this user and the "wrong" secret.
right_secret = (
'ab2228544a061cb2af21af97f637cc58e1f8340196f1ddc3de329b5974694b26')
apikey = APIKey.objects.create(
key='user:{}:{}'.format(user.pk, '666'), secret=right_secret,
user=user, is_active=True)
stdout = StringIO()
call_command('revoke_api_keys', self.csv_path, stdout=stdout)
stdout.seek(0)
output = stdout.readlines()
assert output[0] == (
'Ignoring APIKey user:12345:666, secret differs.\n')
assert output[1] == (
'Ignoring APIKey user:67890:333, it does not exist.\n')
# APIKey is still active, secret hasn't changed, there are no
# additional APIKeys.
apikey.reload()
assert apikey.secret == right_secret
assert apikey.is_active
assert APIKey.objects.filter(user=user).count() == 1
def test_api_key_should_be_revoked(self):
user = user_factory(id=67890)
# The test csv contains an entry with this user and the "right" secret.
right_secret = (
'ab2228544a061cb2af21af97f637cc58e1f8340196f1ddc3de329b5974694b26')
apikey = APIKey.objects.create(
key='user:{}:{}'.format(user.pk, '333'), secret=right_secret,
user=user, is_active=True)
stdout = StringIO()
call_command('revoke_api_keys', self.csv_path, stdout=stdout)
stdout.seek(0)
output = stdout.readlines()
assert output[0] == (
'Ignoring APIKey user:12345:666, it does not exist.\n')
assert output[1] == (
'Revoked APIKey user:67890:333.\n')
assert output[2] == (
'Ignoring APIKey garbage, it does not exist.\n')
assert output[3] == (
'Done. Revoked 1 keys out of 3 entries.\n')
# APIKey is still active, secret hasn't changed, there are no
# additional APIKeys.
apikey.reload()
assert apikey.secret == right_secret
assert not apikey.is_active
assert APIKey.objects.filter(user=user).count() == 2
assert APIKey.objects.filter(user=user, is_active=True).count() == 1
|
bsd-3-clause
| 726,565,165,966,723,100 | -1,179,552,577,979,888,400 | 40.088496 | 79 | 0.614258 | false |
edx/ecommerce
|
ecommerce/extensions/analytics/migrations/0001_initial.py
|
1
|
3904
|
# -*- coding: utf-8 -*-
from decimal import Decimal
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ProductRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('num_views', models.PositiveIntegerField(default=0, verbose_name='Views')),
('num_basket_additions', models.PositiveIntegerField(default=0, verbose_name='Basket Additions')),
('num_purchases', models.PositiveIntegerField(default=0, db_index=True, verbose_name='Purchases')),
('score', models.FloatField(default=0.0, verbose_name='Score')),
],
options={
'ordering': ['-num_purchases'],
'verbose_name_plural': 'Product records',
'verbose_name': 'Product record',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserProductView',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')),
],
options={
'verbose_name_plural': 'User product views',
'verbose_name': 'User product view',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('num_product_views', models.PositiveIntegerField(default=0, verbose_name='Product Views')),
('num_basket_additions', models.PositiveIntegerField(default=0, verbose_name='Basket Additions')),
('num_orders', models.PositiveIntegerField(default=0, db_index=True, verbose_name='Orders')),
('num_order_lines', models.PositiveIntegerField(default=0, db_index=True, verbose_name='Order Lines')),
('num_order_items', models.PositiveIntegerField(default=0, db_index=True, verbose_name='Order Items')),
('total_spent', models.DecimalField(default=Decimal('0.00'), max_digits=12, decimal_places=2, verbose_name='Total Spent')),
('date_last_order', models.DateTimeField(blank=True, verbose_name='Last Order Date', null=True)),
('user', models.OneToOneField(verbose_name='User', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
options={
'verbose_name_plural': 'User records',
'verbose_name': 'User record',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserSearch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('query', models.CharField(max_length=255, db_index=True, verbose_name='Search term')),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')),
('user', models.ForeignKey(verbose_name='User', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
options={
'verbose_name_plural': 'User search queries',
'verbose_name': 'User search query',
'abstract': False,
},
bases=(models.Model,),
),
]
|
agpl-3.0
| -3,058,166,857,024,057,300 | -8,325,462,776,730,453,000 | 46.609756 | 139 | 0.566342 | false |
konstin/mucmiete
|
settings/base.py
|
2
|
3552
|
"""
Django settings for miete project.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import logging
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
from .preconfig import *
try:
from .plz_mapping import *
except:
logging.getLogger(__name__).error('You need to create "settings/plz_mapping.py" by running `./manage.py create_plz_mapping`.')
# Application definition
if HAVE_ADMIN:
ADMIN_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.admindocs',
)
ADMIN_MIDDLEWARE = (
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
)
ADMIN_CONTEXTPROC = ['django.contrib.auth.context_processors.auth']
else:
ADMIN_APPS = ()
ADMIN_MIDDLEWARE = ()
ADMIN_CONTEXTPROC = []
INSTALLED_APPS = (
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'captcha',
) + ADMIN_APPS + (
'miete',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
) + ADMIN_MIDDLEWARE + (
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'settings.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
] + ADMIN_CONTEXTPROC + [
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'settings.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'de-de'
LANGUAGES = (
('en', 'English'),
('de', 'Deutsch'),
)
TIME_ZONE = 'Europe/Berlin'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
# '/var/www/static/',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# addon packages
# No Captcha reCaptcha
NOCAPTCHA = True
BOOTSTRAP3 = {
# The URL to the jQuery JavaScript file
'jquery_url': '/static_jquery/js/jquery.min.js',
# The Bootstrap base URL
'base_url': '/static/bootstrap/',
}
|
agpl-3.0
| 7,167,911,494,991,319,000 | 8,548,706,165,507,994,000 | 23 | 130 | 0.625563 | false |
anastasiaguenova/radioactive-decay-simulator
|
electron/node_modules/accessibility-developer-tools/lib/closure-library/closure/bin/build/jscompiler_test.py
|
27
|
3823
|
#!/usr/bin/env python
#
# Copyright 2013 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for depstree."""
__author__ = '[email protected] (Nathan Naze)'
import os
import unittest
import jscompiler
class JsCompilerTestCase(unittest.TestCase):
"""Unit tests for jscompiler module."""
def testGetFlagFile(self):
flags_file = jscompiler._GetFlagFile(
['path/to/src1.js', 'path/to/src2.js'], ['--test_compiler_flag'])
def file_get_contents(filename):
with open(filename) as f:
content = f.read()
f.close()
return content
flags_file_content = file_get_contents(flags_file.name)
os.remove(flags_file.name)
self.assertEqual(
'--js path/to/src1.js --js path/to/src2.js --test_compiler_flag',
flags_file_content)
def testGetJsCompilerArgs(self):
original_check = jscompiler._JavaSupports32BitMode
jscompiler._JavaSupports32BitMode = lambda: False
args = jscompiler._GetJsCompilerArgs('path/to/jscompiler.jar', (1, 7),
['--test_jvm_flag'])
self.assertEqual(
['java', '-client', '--test_jvm_flag', '-jar',
'path/to/jscompiler.jar'], args)
def CheckJava15RaisesError():
jscompiler._GetJsCompilerArgs('path/to/jscompiler.jar', (1, 5),
['--test_jvm_flag'])
self.assertRaises(jscompiler.JsCompilerError, CheckJava15RaisesError)
jscompiler._JavaSupports32BitMode = original_check
def testGetJsCompilerArgs32BitJava(self):
original_check = jscompiler._JavaSupports32BitMode
# Should include the -d32 flag only if 32-bit Java is supported by the
# system.
jscompiler._JavaSupports32BitMode = lambda: True
args = jscompiler._GetJsCompilerArgs('path/to/jscompiler.jar', (1, 7),
['--test_jvm_flag'])
self.assertEqual(
['java', '-d32', '-client', '--test_jvm_flag', '-jar',
'path/to/jscompiler.jar'], args)
# Should exclude the -d32 flag if 32-bit Java is not supported by the
# system.
jscompiler._JavaSupports32BitMode = lambda: False
args = jscompiler._GetJsCompilerArgs('path/to/jscompiler.jar', (1, 7),
['--test_jvm_flag'])
self.assertEqual(
['java', '-client', '--test_jvm_flag', '-jar',
'path/to/jscompiler.jar'], args)
jscompiler._JavaSupports32BitMode = original_check
def testGetJavaVersion(self):
def assertVersion(expected, version_string):
self.assertEquals(expected, jscompiler._ParseJavaVersion(version_string))
assertVersion((1, 7), _TEST_JAVA_VERSION_STRING)
assertVersion((1, 6), _TEST_JAVA_NESTED_VERSION_STRING)
assertVersion((1, 4), 'java version "1.4.0_03-ea"')
_TEST_JAVA_VERSION_STRING = """\
openjdk version "1.7.0-google-v5"
OpenJDK Runtime Environment (build 1.7.0-google-v5-64327-39803485)
OpenJDK Server VM (build 22.0-b10, mixed mode)
"""
_TEST_JAVA_NESTED_VERSION_STRING = """\
Picked up JAVA_TOOL_OPTIONS: -Dfile.encoding=UTF-8
java version "1.6.0_35"
Java(TM) SE Runtime Environment (build 1.6.0_35-b10-428-11M3811)
Java HotSpot(TM) Client VM (build 20.10-b01-428, mixed mode)
"""
if __name__ == '__main__':
unittest.main()
|
mit
| 7,908,672,289,350,297,000 | -4,638,708,030,717,575,000 | 32.535088 | 79 | 0.660999 | false |
UstadMobile/eXePUB
|
exe/engine/wikipediaidevice.py
|
3
|
15520
|
# -- coding: utf-8 --
# ===========================================================================
# eXe
# Copyright 2004-2006, University of Auckland
# Copyright 2006-2008 eXe Project, http://eXeLearning.org/
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
A Wikipedia Idevice is one built from a Wikipedia article.
"""
import re
from BeautifulSoup import BeautifulSoup, Comment
from exe.engine.idevice import Idevice
from exe.engine.field import TextAreaField
from exe.engine.translate import lateTranslate
from exe.engine.path import Path, TempDirPath
from exe.engine.resource import Resource
import urllib
class UrlOpener(urllib.FancyURLopener):
"""
Set a distinctive User-Agent, so Wikipedia.org knows we're not spammers
"""
version = "eXe/[email protected]"
urllib._urlopener = UrlOpener()
import logging
log = logging.getLogger(__name__)
# ===========================================================================
class WikipediaIdevice(Idevice):
"""
A Wikipedia Idevice is one built from a Wikipedia article.
"""
persistenceVersion = 9
def __init__(self, defaultSite):
Idevice.__init__(self, x_(u"Wiki Article"),
x_(u"University of Auckland"),
x_(u"""<p>The Wikipedia iDevice allows you to locate
existing content from within Wikipedia and download this content into your eXe
resource. The Wikipedia Article iDevice takes a snapshot copy of the article
content. Changes in Wikipedia will not automatically update individual snapshot
copies in eXe, a fresh copy of the article will need to be taken. Likewise,
changes made in eXe will not be updated in Wikipedia. </p> <p>Wikipedia content
is covered by the GNU free documentation license.</p>"""),
u"", u"")
self.emphasis = Idevice.NoEmphasis
self.articleName = u""
self.article = TextAreaField(x_(u"Article"))
self.article.idevice = self
self.images = {}
self.site = defaultSite
self.icon = u"inter"
self._langInstruc = x_(u"""Select the appropriate language version
of Wikipedia to search and enter search term.""")
self._searchInstruc = x_("""Enter a phrase or term you wish to search
within Wikipedia.""")
self.ownUrl = ""
self.systemResources += ['exe_wikipedia.css']
# Properties
langInstruc = lateTranslate('langInstruc')
searchInstruc = lateTranslate('searchInstruc')
def loadArticle(self, name):
"""
Load the article from Wikipedia
"""
self.articleName = name
url = ""
name = urllib.quote(name.replace(" ", "_").encode('utf-8'))
try:
url = (self.site or self.ownUrl)
if not url.endswith('/') and name <> '': url += '/'
if '://' not in url: url = 'http://' + url
url += name
net = urllib.urlopen(url)
page = net.read()
net.close()
except IOError, error:
log.warning(unicode(error))
self.article.content = _(u"Unable to download from %s <br/>Please check the spelling and connection and try again.") % url
self.article.content_w_resourcePaths = self.article.content
self.article.content_wo_resourcePaths = self.article.content
return
page = unicode(page, "utf8")
# FIXME avoid problems with numeric entities in attributes
page = page.replace(u' ', u' ')
# avoidParserProblems is set to False because BeautifulSoup's
# cleanup was causing a "concatenating Null+Str" error,
# and Wikipedia's HTML doesn't need cleaning up.
# BeautifulSoup is faster this way too.
soup = BeautifulSoup(page, False)
content = soup.first('div', {'id': "content"})
#Fix bug #1359: El estilo ITE no respeta ancho de página al exportar
#a páginas web si se usa iDevice wikipedia
content['id'] = "wikipedia-content"
# remove the wiktionary, wikimedia commons, and categories boxes
# and the protected icon and the needs citations box
if content:
infoboxes = content.findAll('div',
{'class' : 'infobox sisterproject'})
[infobox.extract() for infobox in infoboxes]
catboxes = content.findAll('div', {'id' : 'catlinks'})
[catbox.extract() for catbox in catboxes]
amboxes = content.findAll('table',
{'class' : re.compile(r'.*\bambox\b.*')})
[ambox.extract() for ambox in amboxes]
protecteds = content.findAll('div', {'id' : 'protected-icon'})
[protected.extract() for protected in protecteds]
# Extract HTML comments
comments = content.findAll(text=lambda text:isinstance(text, Comment))
[comment.extract() for comment in comments]
else:
content = soup.first('body')
if not content:
log.error("no content")
self.article.content = _(u"Unable to download from %s <br/>Please check the spelling and connection and try again.") % url
# set the other elements as well
self.article.content_w_resourcePaths = self.article.content
self.article.content_wo_resourcePaths = self.article.content
return
# clear out any old images
while self.userResources:
self.userResources[0].delete()
self.images = {}
# Download the images
bits = url.split('/')
netloc = '%s//%s' % (bits[0], bits[2])
path = '/'.join(bits[3:-1])
tmpDir = TempDirPath()
for imageTag in content.fetch('img'):
imageSrc = unicode(imageTag['src'])
imageName = imageSrc.split('/')[-1]
imageName = imageName.replace('>', '>')
imageName = imageName.replace('<', '<')
imageName = imageName.replace('"', '"')
imageName = imageName.replace(' ', '')
imageName = imageName.replace('%2C', ',')
imageName = imageName.replace('%22', '"')
imageName = imageName.replace('%28', '(')
imageName = imageName.replace('%29', ')')
imageName = imageName.replace('%C3%A5', 'å')
#JR: decodificamos el nombre de la imagen
imageName = urllib.unquote(imageName)
# Search if we've already got this image
if imageName not in self.images:
if not imageSrc.startswith("http://"):
if imageSrc.startswith("/"):
# imageSrc = netloc + imageSrc
imageSrc = bits[0] + imageSrc
else:
imageSrc = '%s/%s/%s' % (netloc, path, imageSrc)
try:
# download whith its original name... in ASCII:
## er... just because some repositories do not undestand no ascii names of files:
imageName = imageName.encode('ascii', 'ignore')
urllib.urlretrieve(imageSrc, tmpDir/imageName)
new_resource = Resource(self, tmpDir/imageName)
except:
print 'Unable to download file'
if new_resource._storageName != imageName:
# looks like it was changed due to a possible conflict,
# so reset the imageName accordingly for the content:
imageName = new_resource._storageName
self.images[imageName] = True
imageTag['src'] = (u"resources/" + imageName)
self.article.content = self.reformatArticle(netloc, unicode(content))
# now that these are supporting images, any direct manipulation
# of the content field must also store this updated information
# into the other corresponding fields of TextAreaField:
# (perhaps eventually a property should be made for TextAreaField
# such that these extra set's are not necessary, but for now, here:)
self.article.content_w_resourcePaths = self.article.content
self.article.content_wo_resourcePaths = self.article.content
def reformatArticle(self, netloc, content):
"""
Changes links, etc
"""
content = re.sub(r'href="/', r'href="%s/' % netloc, content)
content = re.sub(r'<(span|div)\s+(id|class)="(editsection|jump-to-nav)".*?</\1>', '', content)
#TODO Find a way to remove scripts without removing newlines
content = content.replace("\n", " ")
content = re.sub(r'<script.*?</script>', '', content)
return content
def getResourcesField(self, this_resource):
"""
implement the specific resource finding mechanism for this iDevice:
"""
# be warned that before upgrading, this iDevice field could not exist:
if hasattr(self, 'article') and hasattr(self.article, 'images'):
for this_image in self.article.images:
if hasattr(this_image, '_imageResource') \
and this_resource == this_image._imageResource:
return self.article
# NOTE that WikipediaIdevices list their images
# in the idevice's .userResources, not in its .article.images...
# a slightly different (and earlier) approach to embedding images:
for this_image in self.userResources:
if this_resource == this_image:
return self.article
return None
def getRichTextFields(self):
"""
Like getResourcesField(), a general helper to allow nodes to search
through all of their fields without having to know the specifics of each
iDevice type.
"""
fields_list = []
if hasattr(self, 'article'):
fields_list.append(self.article)
return fields_list
def burstHTML(self, i):
"""
takes a BeautifulSoup fragment (i) and bursts its contents to
import this idevice from a CommonCartridge export
"""
# Wiki Article Idevice:
# option title for Wikipedia, with mode emphasis:
title = i.find(name='h2', attrs={'class' : 'iDeviceTitle' })
if title is not None:
self.title = title.renderContents().decode('utf-8')
self.emphasis=Idevice.SomeEmphasis
wiki = i.find(name='div', attrs={'id' : re.compile('^ta') })
self.article.content_wo_resourcePaths = \
wiki.renderContents().decode('utf-8')
# and add the LOCAL resource paths back in:
self.article.content_w_resourcePaths = \
self.article.MassageResourceDirsIntoContent( \
self.article.content_wo_resourcePaths)
self.article.content = self.article.content_w_resourcePaths
site = i.find(name='div', attrs={'class' : 'wiki_site' })
if site is not None:
self.site = site.attrMap['value'].decode('utf-8')
name = i.find(name='div', attrs={'class' : 'article_name' })
if name is not None:
# WARNING: the following crashes on accented characters, eg:
# 'ascii' codec can't encode character u'\xe8' in
# position 11: ordinal not in range(128)
self.articleName = name.attrMap['value'].decode('utf-8')
own_url = i.find(name='div', attrs={'class' : 'own_url' })
if own_url is not None:
self.own_url = own_url.attrMap['value'].decode('utf-8')
def __getstate__(self):
"""
Re-write the img URLs just in case the class name has changed
"""
log.debug("in __getstate__ " + repr(self.parentNode))
# need to check parentNode because __getstate__ is also called by
# deepcopy as well as Jelly.
if self.parentNode:
self.article.content = re.sub(r'/[^/]*?/',
u"/" + self.parentNode.package.name +
u"/",
self.article.content)
#self.article.content = re.sub(r'/[^/]*?/resources/',
# u"/" + self.parentNode.package.name +
# u"/resources/",
# self.article.content)
return Idevice.__getstate__(self)
def delete(self):
"""
Clear out any old images when this iDevice is deleted
"""
self.images = {}
Idevice.delete(self)
def upgradeToVersion1(self):
"""
Called to upgrade from 0.6 release
"""
self.site = _('http://en.wikipedia.org/')
def upgradeToVersion2(self):
"""
Upgrades v0.6 to v0.7.
"""
self.lastIdevice = False
def upgradeToVersion3(self):
"""
Upgrades exe to v0.10
"""
self._upgradeIdeviceToVersion1()
self._site = self.__dict__['site']
def upgradeToVersion4(self):
"""
Upgrades exe to v0.11... what was I thinking?
"""
self.site = self.__dict__['_site']
def upgradeToVersion5(self):
"""
Upgrades exe to v0.11... forgot to change the icon
"""
self.icon = u"inter"
def upgradeToVersion6(self):
"""
Upgrades to v0.12
"""
self._upgradeIdeviceToVersion2()
self.systemResources += ["fdl.html"]
if self.images and self.parentNode:
for image in self.images:
imageResource = Resource(self, Path(image))
def upgradeToVersion7(self):
"""
Upgrades to v0.12
"""
self._langInstruc = x_(u"""Select the appropriate language version
of Wikipedia to search and enter search term.""")
self._searchInstruc = x_("""Enter a phrase or term you wish to search
within Wikipedia.""")
def upgradeToVersion8(self):
"""
Upgrades to v0.19
"""
self.ownUrl = ""
def upgradeToVersion9(self):
if 'fdl.html' in self.systemResources:
self.systemResources.remove('fdl.html')
if 'exe_wikipedia.css' not in self.systemResources:
self.systemResources.append('exe_wikipedia.css')
# ===========================================================================
|
gpl-2.0
| 4,863,903,533,539,740,000 | 4,967,708,528,849,112,000 | 40.378667 | 134 | 0.564155 | false |
edxzw/edx-platform
|
cms/djangoapps/contentstore/views/tests/utils.py
|
198
|
2922
|
"""
Utilities for view tests.
"""
import json
from contentstore.tests.utils import CourseTestCase
from contentstore.views.helpers import xblock_studio_url
from xmodule.modulestore.tests.factories import ItemFactory
class StudioPageTestCase(CourseTestCase):
"""
Base class for all tests of Studio pages.
"""
def setUp(self):
super(StudioPageTestCase, self).setUp()
self.chapter = ItemFactory.create(parent_location=self.course.location,
category='chapter', display_name="Week 1")
self.sequential = ItemFactory.create(parent_location=self.chapter.location,
category='sequential', display_name="Lesson 1")
def get_page_html(self, xblock):
"""
Returns the HTML for the page representing the xblock.
"""
url = xblock_studio_url(xblock)
self.assertIsNotNone(url)
resp = self.client.get_html(url)
self.assertEqual(resp.status_code, 200)
return resp.content
def get_preview_html(self, xblock, view_name):
"""
Returns the HTML for the xblock when shown within a unit or container page.
"""
preview_url = '/xblock/{usage_key}/{view_name}'.format(usage_key=xblock.location, view_name=view_name)
resp = self.client.get_json(preview_url)
self.assertEqual(resp.status_code, 200)
resp_content = json.loads(resp.content)
return resp_content['html']
def validate_preview_html(self, xblock, view_name, can_add=True):
"""
Verify that the specified xblock's preview has the expected HTML elements.
"""
html = self.get_preview_html(xblock, view_name)
self.validate_html_for_add_buttons(html, can_add)
# Verify drag handles always appear.
drag_handle_html = '<span data-tooltip="Drag to reorder" class="drag-handle action"></span>'
self.assertIn(drag_handle_html, html)
# Verify that there are no action buttons for public blocks
expected_button_html = [
'<a href="#" class="edit-button action-button">',
'<a href="#" data-tooltip="Delete" class="delete-button action-button">',
'<a href="#" data-tooltip="Duplicate" class="duplicate-button action-button">'
]
for button_html in expected_button_html:
self.assertIn(button_html, html)
def validate_html_for_add_buttons(self, html, can_add=True):
"""
Validate that the specified HTML has the appropriate add actions for the current publish state.
"""
# Verify that there are no add buttons for public blocks
add_button_html = '<div class="add-xblock-component new-component-item adding"></div>'
if can_add:
self.assertIn(add_button_html, html)
else:
self.assertNotIn(add_button_html, html)
|
agpl-3.0
| -3,519,158,620,562,295,300 | 1,072,655,161,441,274,900 | 39.027397 | 110 | 0.631417 | false |
broferek/ansible
|
lib/ansible/plugins/become/enable.py
|
43
|
1422
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
become: enable
short_description: Switch to elevated permissions on a network device
description:
- This become plugins allows elevated permissions on a remote network device.
author: ansible (@core)
version_added: "2.8"
options:
become_pass:
description: password
ini:
- section: enable_become_plugin
key: password
vars:
- name: ansible_become_password
- name: ansible_become_pass
- name: ansible_enable_pass
env:
- name: ANSIBLE_BECOME_PASS
- name: ANSIBLE_ENABLE_PASS
notes:
- enable is really implemented in the network connection handler and as such can only be used with network connections.
- This plugin ignores the 'become_exe' and 'become_user' settings as it uses an API and not an executable.
"""
from ansible.plugins.become import BecomeBase
class BecomeModule(BecomeBase):
name = 'enable'
def build_become_command(self, cmd, shell):
# enable is implemented inside the network connection plugins
return cmd
|
gpl-3.0
| 3,222,787,924,220,600,300 | -7,410,237,676,428,669,000 | 33.682927 | 127 | 0.637131 | false |
robbinfan/thirdparty
|
protobuf-2.6.1/python/google/protobuf/pyext/descriptor_cpp2_test.py
|
73
|
2506
|
#! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.pyext behavior."""
__author__ = '[email protected] (Anuraag Agrawal)'
import os
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'cpp'
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION'] = '2'
# We must set the implementation version above before the google3 imports.
# pylint: disable=g-import-not-at-top
from google.apputils import basetest
from google.protobuf.internal import api_implementation
# Run all tests from the original module by putting them in our namespace.
# pylint: disable=wildcard-import
from google.protobuf.internal.descriptor_test import *
class ConfirmCppApi2Test(basetest.TestCase):
def testImplementationSetting(self):
self.assertEqual('cpp', api_implementation.Type())
self.assertEqual(2, api_implementation.Version())
if __name__ == '__main__':
basetest.main()
|
bsd-2-clause
| 5,661,712,039,732,384,000 | 3,329,820,255,674,169,300 | 42.206897 | 74 | 0.77494 | false |
epssy/hue
|
apps/useradmin/src/useradmin/test_ldap_deprecated.py
|
4
|
32107
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ldap
from nose.plugins.attrib import attr
from nose.tools import assert_true, assert_equal, assert_false
import desktop.conf
from desktop.lib.test_utils import grant_access
from desktop.lib.django_test_util import make_logged_in_client
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.core.urlresolvers import reverse
from useradmin.models import LdapGroup, UserProfile, get_profile
from hadoop import pseudo_hdfs4
from views import sync_ldap_users, sync_ldap_groups, import_ldap_users, import_ldap_groups, \
add_ldap_users, add_ldap_groups, sync_ldap_users_groups
import ldap_access
from tests import LdapTestConnection, reset_all_groups, reset_all_users
def test_useradmin_ldap_user_group_membership_sync():
settings.MIDDLEWARE_CLASSES.append('useradmin.middleware.LdapSynchronizationMiddleware')
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Make sure LDAP groups exist or they won't sync
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
try:
# Import curly who is part of TestUsers and Test Administrators
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'curly', sync_groups=False, import_by_dn=False)
# Set a password so that we can login
user = User.objects.get(username='curly')
user.set_password('test')
user.save()
# Should have 0 groups
assert_equal(0, user.groups.all().count())
# Make an authenticated request as curly so that we can see call middleware.
c = make_logged_in_client('curly', 'test', is_superuser=False)
grant_access("curly", "test", "useradmin")
response = c.get('/useradmin/users')
# Refresh user groups
user = User.objects.get(username='curly')
# Should have 3 groups now. 2 from LDAP and 1 from 'grant_access' call.
assert_equal(3, user.groups.all().count(), user.groups.all())
# Now remove a group and try again.
old_group = ldap_access.CACHED_LDAP_CONN._instance.users['curly']['groups'].pop()
# Make an authenticated request as curly so that we can see call middleware.
response = c.get('/useradmin/users')
# Refresh user groups
user = User.objects.get(username='curly')
# Should have 2 groups now. 1 from LDAP and 1 from 'grant_access' call.
assert_equal(3, user.groups.all().count(), user.groups.all())
finally:
settings.MIDDLEWARE_CLASSES.remove('useradmin.middleware.LdapSynchronizationMiddleware')
def test_useradmin_ldap_suboordinate_group_integration():
reset_all_users()
reset_all_groups()
reset = []
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test old subgroups
reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("suboordinate"))
try:
# Import groups only
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Should import a group, but will only sync already-imported members
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 3)
assert_equal(Group.objects.all().count(), 2)
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 2)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all()[0].username, larry.username)
# Only sync already imported
ldap_access.CACHED_LDAP_CONN.remove_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='moe').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 3)
assert_equal(User.objects.get(username='moe').groups.all().count(), 1)
# Import all members of TestUsers and members of subgroups
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 4)
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
finally:
for finish in reset:
finish()
def test_useradmin_ldap_nested_group_integration():
reset_all_users()
reset_all_groups()
reset = []
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test old subgroups
reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("nested"))
try:
# Import groups only
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Should import a group, but will only sync already-imported members
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 3)
assert_equal(Group.objects.all().count(), 2)
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 2)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all()[0].username, larry.username)
# Only sync already imported
assert_equal(test_users.user_set.all().count(), 3)
ldap_access.CACHED_LDAP_CONN.remove_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='moe').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 3)
assert_equal(User.objects.get(username='moe').groups.all().count(), 1)
# Import all members of TestUsers and not members of suboordinate groups (even though specified)
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Nested group import
# First without recursive import, then with.
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'NestedGroups', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
nested_groups = Group.objects.get(name='NestedGroups')
nested_group = Group.objects.get(name='NestedGroup')
assert_true(LdapGroup.objects.filter(group=nested_groups).exists())
assert_true(LdapGroup.objects.filter(group=nested_group).exists())
assert_equal(nested_groups.user_set.all().count(), 0, nested_groups.user_set.all())
assert_equal(nested_group.user_set.all().count(), 0, nested_group.user_set.all())
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'NestedGroups', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
nested_groups = Group.objects.get(name='NestedGroups')
nested_group = Group.objects.get(name='NestedGroup')
assert_true(LdapGroup.objects.filter(group=nested_groups).exists())
assert_true(LdapGroup.objects.filter(group=nested_group).exists())
assert_equal(nested_groups.user_set.all().count(), 0, nested_groups.user_set.all())
assert_equal(nested_group.user_set.all().count(), 1, nested_group.user_set.all())
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
finally:
for finish in reset:
finish()
def test_useradmin_ldap_suboordinate_posix_group_integration():
reset_all_users()
reset_all_groups()
reset = []
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test old subgroups
reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("suboordinate"))
try:
# Import groups only
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Should import a group, but will only sync already-imported members
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 2, User.objects.all())
assert_equal(Group.objects.all().count(), 2, Group.objects.all())
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 1)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all()[0].username, larry.username)
# Only sync already imported
ldap_access.CACHED_LDAP_CONN.remove_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 1)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 1)
# Import all members of PosixGroup and members of subgroups
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
finally:
for finish in reset:
finish()
def test_useradmin_ldap_nested_posix_group_integration():
reset_all_users()
reset_all_groups()
reset = []
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test nested groups
reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("nested"))
try:
# Import groups only
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Should import a group, but will only sync already-imported members
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 2, User.objects.all())
assert_equal(Group.objects.all().count(), 2, Group.objects.all())
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 1)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all()[0].username, larry.username)
# Only sync already imported
ldap_access.CACHED_LDAP_CONN.remove_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 1)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 1)
# Import all members of PosixGroup and members of subgroups (there should be no subgroups)
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Import all members of NestedPosixGroups and members of subgroups
reset_all_users()
reset_all_groups()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'NestedPosixGroups', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='NestedPosixGroups')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
finally:
for finish in reset:
finish()
def test_useradmin_ldap_user_integration():
done = []
try:
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Try importing a user
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'lårry', sync_groups=False, import_by_dn=False)
larry = User.objects.get(username='lårry')
assert_true(larry.first_name == 'Larry')
assert_true(larry.last_name == 'Stooge')
assert_true(larry.email == '[email protected]')
assert_true(get_profile(larry).creation_method == str(UserProfile.CreationMethod.EXTERNAL))
# Should be a noop
sync_ldap_users(ldap_access.CACHED_LDAP_CONN)
sync_ldap_groups(ldap_access.CACHED_LDAP_CONN)
assert_equal(User.objects.all().count(), 1)
assert_equal(Group.objects.all().count(), 0)
# Make sure that if a Hue user already exists with a naming collision, we
# won't overwrite any of that user's information.
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'otherguy', sync_groups=False, import_by_dn=False)
hue_user = User.objects.get(username='otherguy')
assert_equal(get_profile(hue_user).creation_method, str(UserProfile.CreationMethod.HUE))
assert_equal(hue_user.first_name, 'Different')
# Make sure LDAP groups exist or they won't sync
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
# Try importing a user and sync groups
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'curly', sync_groups=True, import_by_dn=False)
curly = User.objects.get(username='curly')
assert_equal(curly.first_name, 'Curly')
assert_equal(curly.last_name, 'Stooge')
assert_equal(curly.email, '[email protected]')
assert_equal(get_profile(curly).creation_method, str(UserProfile.CreationMethod.EXTERNAL))
assert_equal(2, curly.groups.all().count(), curly.groups.all())
reset_all_users()
reset_all_groups()
# Test import case sensitivity
done.append(desktop.conf.LDAP.IGNORE_USERNAME_CASE.set_for_testing(True))
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Lårry', sync_groups=False, import_by_dn=False)
assert_false(User.objects.filter(username='Lårry').exists())
assert_true(User.objects.filter(username='lårry').exists())
# Test lower case
User.objects.filter(username__iexact='Rock').delete()
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Rock', sync_groups=False, import_by_dn=False)
assert_false(User.objects.filter(username='Rock').exists())
assert_true(User.objects.filter(username='rock').exists())
done.append(desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.set_for_testing(True))
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Rock', sync_groups=False, import_by_dn=False)
assert_false(User.objects.filter(username='Rock').exists())
assert_true(User.objects.filter(username='rock').exists())
User.objects.filter(username='Rock').delete()
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Rock', sync_groups=False, import_by_dn=False)
assert_false(User.objects.filter(username='Rock').exists())
assert_true(User.objects.filter(username='rock').exists())
finally:
for finish in done:
finish()
def test_add_ldap_users():
done = []
try:
URL = reverse(add_ldap_users)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
c = make_logged_in_client('test', is_superuser=True)
assert_true(c.get(URL))
response = c.post(URL, dict(username_pattern='moe', password1='test', password2='test'))
assert_true('Location' in response, response)
assert_true('/useradmin/users' in response['Location'], response)
response = c.post(URL, dict(username_pattern='bad_name', password1='test', password2='test'))
assert_true('Could not' in response.context['form'].errors['username_pattern'][0], response)
# Test wild card
response = c.post(URL, dict(username_pattern='*rr*', password1='test', password2='test'))
assert_true('/useradmin/users' in response['Location'], response)
# Test ignore case
done.append(desktop.conf.LDAP.IGNORE_USERNAME_CASE.set_for_testing(True))
User.objects.filter(username='moe').delete()
assert_false(User.objects.filter(username='Moe').exists())
assert_false(User.objects.filter(username='moe').exists())
response = c.post(URL, dict(username_pattern='Moe', password1='test', password2='test'))
assert_true('Location' in response, response)
assert_true('/useradmin/users' in response['Location'], response)
assert_false(User.objects.filter(username='Moe').exists())
assert_true(User.objects.filter(username='moe').exists())
# Test lower case
done.append(desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.set_for_testing(True))
User.objects.filter(username__iexact='Rock').delete()
assert_false(User.objects.filter(username='Rock').exists())
assert_false(User.objects.filter(username='rock').exists())
response = c.post(URL, dict(username_pattern='rock', password1='test', password2='test'))
assert_true('Location' in response, response)
assert_true('/useradmin/users' in response['Location'], response)
assert_false(User.objects.filter(username='Rock').exists())
assert_true(User.objects.filter(username='rock').exists())
# Test regular with spaces (should fail)
response = c.post(URL, dict(username_pattern='user with space', password1='test', password2='test'))
assert_true("Username must not contain whitespaces and ':'" in response.context['form'].errors['username_pattern'][0], response)
# Test dn with spaces in username and dn (should fail)
response = c.post(URL, dict(username_pattern='uid=user with space,ou=People,dc=example,dc=com', password1='test', password2='test', dn=True))
assert_true("There was a problem with some of the LDAP information" in response.content, response)
assert_true("Username must not contain whitespaces" in response.content, response)
# Test dn with spaces in dn, but not username (should succeed)
response = c.post(URL, dict(username_pattern='uid=user without space,ou=People,dc=example,dc=com', password1='test', password2='test', dn=True))
assert_true(User.objects.filter(username='spaceless').exists())
finally:
for finish in done:
finish()
def test_add_ldap_groups():
URL = reverse(add_ldap_groups)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
c = make_logged_in_client(username='test', is_superuser=True)
assert_true(c.get(URL))
response = c.post(URL, dict(groupname_pattern='TestUsers'))
assert_true('Location' in response, response)
assert_true('/useradmin/groups' in response['Location'])
# Test with space
response = c.post(URL, dict(groupname_pattern='Test Administrators'))
assert_true('Location' in response, response)
assert_true('/useradmin/groups' in response['Location'], response)
response = c.post(URL, dict(groupname_pattern='toolongnametoolongnametoolongnametoolongnametoolongnametoolongname'
'toolongnametoolongnametoolongnametoolongnametoolongnametoolongname'
'toolongnametoolongnametoolongnametoolongnametoolongnametoolongname'
'toolongnametoolongnametoolongnametoolongnametoolongnametoolongname'))
assert_true('Ensure this value has at most 256 characters' in response.context['form'].errors['groupname_pattern'][0], response)
# Test wild card
response = c.post(URL, dict(groupname_pattern='*r*'))
assert_true('/useradmin/groups' in response['Location'], response)
def test_sync_ldap_users_groups():
URL = reverse(sync_ldap_users_groups)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
c = make_logged_in_client('test', is_superuser=True)
assert_true(c.get(URL))
assert_true(c.post(URL))
def test_ldap_exception_handling():
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
class LdapTestConnectionError(LdapTestConnection):
def find_users(self, user, find_by_dn=False):
raise ldap.LDAPError('No such object')
ldap_access.CACHED_LDAP_CONN = LdapTestConnectionError()
c = make_logged_in_client('test', is_superuser=True)
response = c.post(reverse(add_ldap_users), dict(username_pattern='moe', password1='test', password2='test'), follow=True)
assert_true('There was an error when communicating with LDAP' in response.content, response)
@attr('requires_hadoop')
def test_ensure_home_directory_add_ldap_users():
try:
URL = reverse(add_ldap_users)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
cluster = pseudo_hdfs4.shared_cluster()
c = make_logged_in_client(cluster.superuser, is_superuser=True)
cluster.fs.setuser(cluster.superuser)
assert_true(c.get(URL))
response = c.post(URL, dict(username_pattern='moe', password1='test', password2='test'))
assert_true('/useradmin/users' in response['Location'])
assert_false(cluster.fs.exists('/user/moe'))
# Try same thing with home directory creation.
response = c.post(URL, dict(username_pattern='curly', password1='test', password2='test', ensure_home_directory=True))
assert_true('/useradmin/users' in response['Location'])
assert_true(cluster.fs.exists('/user/curly'))
response = c.post(URL, dict(username_pattern='bad_name', password1='test', password2='test'))
assert_true('Could not' in response.context['form'].errors['username_pattern'][0])
assert_false(cluster.fs.exists('/user/bad_name'))
# See if moe, who did not ask for his home directory, has a home directory.
assert_false(cluster.fs.exists('/user/moe'))
# Try wild card now
response = c.post(URL, dict(username_pattern='*rr*', password1='test', password2='test', ensure_home_directory=True))
assert_true('/useradmin/users' in response['Location'])
assert_true(cluster.fs.exists('/user/curly'))
assert_true(cluster.fs.exists(u'/user/lårry'))
assert_false(cluster.fs.exists('/user/otherguy'))
finally:
# Clean up
if cluster.fs.exists('/user/curly'):
cluster.fs.rmtree('/user/curly')
if cluster.fs.exists(u'/user/lårry'):
cluster.fs.rmtree(u'/user/lårry')
if cluster.fs.exists('/user/otherguy'):
cluster.fs.rmtree('/user/otherguy')
@attr('requires_hadoop')
def test_ensure_home_directory_sync_ldap_users_groups():
URL = reverse(sync_ldap_users_groups)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
cluster = pseudo_hdfs4.shared_cluster()
c = make_logged_in_client(cluster.superuser, is_superuser=True)
cluster.fs.setuser(cluster.superuser)
c.post(reverse(add_ldap_users), dict(username_pattern='curly', password1='test', password2='test'))
assert_false(cluster.fs.exists('/user/curly'))
assert_true(c.post(URL, dict(ensure_home_directory=True)))
assert_true(cluster.fs.exists('/user/curly'))
|
apache-2.0
| 7,790,095,731,738,867,000 | 932,239,197,733,898,900 | 48.605873 | 167 | 0.724069 | false |
sanguinariojoe/FreeCAD
|
src/Mod/Part/BOPTools/JoinFeatures.py
|
13
|
15669
|
#/***************************************************************************
# * Copyright (c) 2016 Victor Titov (DeepSOIC) <[email protected]> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This library is free software; you can redistribute it and/or *
# * modify it under the terms of the GNU Library General Public *
# * License as published by the Free Software Foundation; either *
# * version 2 of the License, or (at your option) any later version. *
# * *
# * This library is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this library; see the file COPYING.LIB. If not, *
# * write to the Free Software Foundation, Inc., 59 Temple Place, *
# * Suite 330, Boston, MA 02111-1307, USA *
# * *
# ***************************************************************************/
__title__ = "BOPTools.JoinFeatures module"
__author__ = "DeepSOIC"
__url__ = "http://www.freecadweb.org"
__doc__ = "Implementation of document objects (features) for connect, ebmed and cutout operations."
from . import JoinAPI
import FreeCAD
import Part
if FreeCAD.GuiUp:
import FreeCADGui
from PySide import QtCore, QtGui
# -------------------------- common stuff -------------------------------------
# -------------------------- translation-related code -------------------------
# Thanks, yorik! (see forum thread "A new Part tool is being born... JoinFeatures!"
# http://forum.freecadweb.org/viewtopic.php?f=22&t=11112&start=30#p90239 )
try:
_fromUtf8 = QtCore.QString.fromUtf8
except Exception:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
else:
def _translate(context, text, disambig):
return text
# --------------------------/translation-related code -------------------------
def getParamRefine():
return FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Part/Boolean").GetBool("RefineModel")
def cmdCreateJoinFeature(name, mode):
"""cmdCreateJoinFeature(name, mode): generalized implementation of GUI commands."""
sel = FreeCADGui.Selection.getSelectionEx()
FreeCAD.ActiveDocument.openTransaction("Create "+mode)
FreeCADGui.addModule("BOPTools.JoinFeatures")
FreeCADGui.doCommand("j = BOPTools.JoinFeatures.make{mode}(name='{name}')".format(mode=mode, name=name))
if mode == "Embed" or mode == "Cutout":
FreeCADGui.doCommand("j.Base = App.ActiveDocument."+sel[0].Object.Name)
FreeCADGui.doCommand("j.Tool = App.ActiveDocument."+sel[1].Object.Name)
elif mode == "Connect":
FreeCADGui.doCommand("j.Objects = {sel}".format(
sel= "[" + ", ".join(["App.ActiveDocument."+so.Object.Name for so in sel]) + "]"
))
else:
raise ValueError("cmdCreateJoinFeature: Unexpected mode {mode}".format(mode=repr(mode)))
try:
FreeCADGui.doCommand("j.Proxy.execute(j)")
FreeCADGui.doCommand("j.purgeTouched()")
except Exception as err:
mb = QtGui.QMessageBox()
mb.setIcon(mb.Icon.Warning)
mb.setText(_translate("Part_JoinFeatures",
"Computing the result failed with an error:\n\n"
"{err}\n\n"
"Click 'Continue' to create the feature anyway, or 'Abort' to cancel.", None)
.format(err=str(err)))
mb.setWindowTitle(_translate("Part_JoinFeatures","Bad selection", None))
btnAbort = mb.addButton(QtGui.QMessageBox.StandardButton.Abort)
btnOK = mb.addButton(_translate("Part_JoinFeatures","Continue",None),
QtGui.QMessageBox.ButtonRole.ActionRole)
mb.setDefaultButton(btnOK)
mb.exec_()
if mb.clickedButton() is btnAbort:
FreeCAD.ActiveDocument.abortTransaction()
return
FreeCADGui.doCommand("for obj in j.ViewObject.Proxy.claimChildren():\n"
" obj.ViewObject.hide()")
FreeCAD.ActiveDocument.commitTransaction()
def getIconPath(icon_dot_svg):
return icon_dot_svg
# -------------------------- /common stuff ------------------------------------
# -------------------------- Connect ------------------------------------------
def makeConnect(name):
'''makeConnect(name): makes an Connect object.'''
obj = FreeCAD.ActiveDocument.addObject("Part::FeaturePython",name)
FeatureConnect(obj)
if FreeCAD.GuiUp:
ViewProviderConnect(obj.ViewObject)
return obj
class FeatureConnect:
"""The PartJoinFeature object."""
def __init__(self,obj):
obj.addProperty("App::PropertyLinkList","Objects","Connect","Object to be connectded.")
obj.addProperty("App::PropertyBool","Refine","Connect",
"True = refine resulting shape. False = output as is.")
obj.Refine = getParamRefine()
obj.addProperty("App::PropertyLength","Tolerance","Connect",
"Tolerance when intersecting (fuzzy value). "
"In addition to tolerances of the shapes.")
obj.Proxy = self
self.Type = "FeatureConnect"
def execute(self,selfobj):
rst = JoinAPI.connect([obj.Shape for obj in selfobj.Objects], selfobj.Tolerance)
if selfobj.Refine:
rst = rst.removeSplitter()
selfobj.Shape = rst
class ViewProviderConnect:
"""A View Provider for the Part Connect feature."""
def __init__(self,vobj):
vobj.Proxy = self
def getIcon(self):
return getIconPath("Part_JoinConnect.svg")
def attach(self, vobj):
self.ViewObject = vobj
self.Object = vobj.Object
def __getstate__(self):
return None
def __setstate__(self,state):
return None
def claimChildren(self):
return self.Object.Objects
def onDelete(self, feature, subelements):
try:
for obj in self.claimChildren():
obj.ViewObject.show()
except Exception as err:
FreeCAD.Console.PrintError("Error in onDelete: " + str(err))
return True
def canDragObjects(self):
return True
def canDropObjects(self):
return True
def canDragObject(self, dragged_object):
return True
def canDropObject(self, incoming_object):
return hasattr(incoming_object, 'Shape')
def dragObject(self, selfvp, dragged_object):
objs = self.Object.Objects
objs.remove(dragged_object)
self.Object.Objects = objs
def dropObject(self, selfvp, incoming_object):
self.Object.Objects = self.Object.Objects + [incoming_object]
class CommandConnect:
"""Command to create Connect feature."""
def GetResources(self):
return {'Pixmap': getIconPath("Part_JoinConnect.svg"),
'MenuText': QtCore.QT_TRANSLATE_NOOP("Part_JoinConnect","Connect objects"),
'Accel': "",
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Part_JoinConnect",
"Fuses objects, taking care to preserve voids.")}
def Activated(self):
if len(FreeCADGui.Selection.getSelectionEx()) >= 1:
cmdCreateJoinFeature(name="Connect", mode="Connect")
else:
mb = QtGui.QMessageBox()
mb.setIcon(mb.Icon.Warning)
mb.setText(_translate("Part_JoinFeatures",
"Select at least two objects, or one or more compounds", None))
mb.setWindowTitle(_translate("Part_JoinFeatures","Bad selection", None))
mb.exec_()
def IsActive(self):
if FreeCAD.ActiveDocument:
return True
else:
return False
# -------------------------- /Connect -----------------------------------------
# -------------------------- Embed --------------------------------------------
def makeEmbed(name):
'''makeEmbed(name): makes an Embed object.'''
obj = FreeCAD.ActiveDocument.addObject("Part::FeaturePython",name)
FeatureEmbed(obj)
if FreeCAD.GuiUp:
ViewProviderEmbed(obj.ViewObject)
return obj
class FeatureEmbed:
"""The Part Embed object."""
def __init__(self,obj):
obj.addProperty("App::PropertyLink","Base","Embed","Object to embed into.")
obj.addProperty("App::PropertyLink","Tool","Embed","Object to be embedded.")
obj.addProperty("App::PropertyBool","Refine","Embed",
"True = refine resulting shape. False = output as is.")
obj.Refine = getParamRefine()
obj.addProperty("App::PropertyLength","Tolerance","Embed",
"Tolerance when intersecting (fuzzy value). "
"In addition to tolerances of the shapes.")
obj.Proxy = self
self.Type = "FeatureEmbed"
def execute(self,selfobj):
rst = JoinAPI.embed_legacy(selfobj.Base.Shape, selfobj.Tool.Shape, selfobj.Tolerance)
if selfobj.Refine:
rst = rst.removeSplitter()
selfobj.Shape = rst
class ViewProviderEmbed:
"""A View Provider for the Part Embed feature."""
def __init__(self,vobj):
vobj.Proxy = self
def getIcon(self):
return getIconPath("Part_JoinEmbed.svg")
def attach(self, vobj):
self.ViewObject = vobj
self.Object = vobj.Object
def __getstate__(self):
return None
def __setstate__(self,state):
return None
def claimChildren(self):
return [self.Object.Base, self.Object.Tool]
def onDelete(self, feature, subelements):
try:
self.Object.Base.ViewObject.show()
self.Object.Tool.ViewObject.show()
except Exception as err:
FreeCAD.Console.PrintError("Error in onDelete: " + str(err))
return True
class CommandEmbed:
"""Command to create Part Embed feature."""
def GetResources(self):
return {'Pixmap': getIconPath("Part_JoinEmbed.svg"),
'MenuText': QtCore.QT_TRANSLATE_NOOP("Part_JoinEmbed","Embed object"),
'Accel': "",
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Part_JoinEmbed",
"Fuses one object into another, taking care to preserve voids.")}
def Activated(self):
if len(FreeCADGui.Selection.getSelectionEx()) == 2:
cmdCreateJoinFeature(name = "Embed", mode = "Embed")
else:
mb = QtGui.QMessageBox()
mb.setIcon(mb.Icon.Warning)
mb.setText(_translate("Part_JoinFeatures",
"Select base object, then the object to embed, "
"and then invoke this tool.", None))
mb.setWindowTitle(_translate("Part_JoinFeatures","Bad selection", None))
mb.exec_()
def IsActive(self):
if FreeCAD.ActiveDocument:
return True
else:
return False
# -------------------------- /Embed -------------------------------------------
# -------------------------- Cutout -------------------------------------------
def makeCutout(name):
'''makeCutout(name): makes an Cutout object.'''
obj = FreeCAD.ActiveDocument.addObject("Part::FeaturePython",name)
FeatureCutout(obj)
if FreeCAD.GuiUp:
ViewProviderCutout(obj.ViewObject)
return obj
class FeatureCutout:
"""The Part Cutout object."""
def __init__(self,obj):
obj.addProperty("App::PropertyLink","Base","Cutout","Object to be cut.")
obj.addProperty("App::PropertyLink","Tool","Cutout","Object to make cutout for.")
obj.addProperty("App::PropertyBool","Refine","Cutout",
"True = refine resulting shape. False = output as is.")
obj.Refine = getParamRefine()
obj.addProperty("App::PropertyLength","Tolerance","Cutout",
"Tolerance when intersecting (fuzzy value). In addition to tolerances of the shapes.")
obj.Proxy = self
self.Type = "FeatureCutout"
def execute(self,selfobj):
rst = JoinAPI.cutout_legacy(selfobj.Base.Shape, selfobj.Tool.Shape, selfobj.Tolerance)
if selfobj.Refine:
rst = rst.removeSplitter()
selfobj.Shape = rst
class ViewProviderCutout:
"""A View Provider for the Part Cutout feature."""
def __init__(self,vobj):
vobj.Proxy = self
def getIcon(self):
return getIconPath("Part_JoinCutout.svg")
def attach(self, vobj):
self.ViewObject = vobj
self.Object = vobj.Object
def __getstate__(self):
return None
def __setstate__(self,state):
return None
def claimChildren(self):
return [self.Object.Base, self.Object.Tool]
def onDelete(self, feature, subelements):
try:
self.Object.Base.ViewObject.show()
self.Object.Tool.ViewObject.show()
except Exception as err:
FreeCAD.Console.PrintError("Error in onDelete: " + str(err))
return True
class CommandCutout:
"""Command to create PartJoinFeature in Cutout mode."""
def GetResources(self):
return {'Pixmap': getIconPath("Part_JoinCutout.svg"),
'MenuText': QtCore.QT_TRANSLATE_NOOP("Part_JoinCutout","Cutout for object"),
'Accel': "",
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Part_JoinCutout",
"Makes a cutout in one object to fit another object.")}
def Activated(self):
if len(FreeCADGui.Selection.getSelectionEx()) == 2:
cmdCreateJoinFeature(name="Cutout", mode="Cutout")
else:
mb = QtGui.QMessageBox()
mb.setIcon(mb.Icon.Warning)
mb.setText(_translate("Part_JoinFeatures",
"Select the object to make a cutout in, "
"then the object that should fit into the cutout, "
"and then invoke this tool.", None))
mb.setWindowTitle(_translate("Part_JoinFeatures","Bad selection", None))
mb.exec_()
def IsActive(self):
if FreeCAD.ActiveDocument:
return True
else:
return False
# -------------------------- /Cutout ------------------------------------------
def addCommands():
FreeCADGui.addCommand('Part_JoinCutout', CommandCutout())
FreeCADGui.addCommand('Part_JoinEmbed', CommandEmbed())
FreeCADGui.addCommand('Part_JoinConnect', CommandConnect())
|
lgpl-2.1
| 5,387,259,186,306,032,000 | 6,253,751,467,233,381,000 | 36.57554 | 117 | 0.561044 | false |
xuweiliang/Codelibrary
|
openstack_dashboard/test/integration_tests/pages/loginpage.py
|
18
|
3628
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from selenium.webdriver.common import by
from selenium.webdriver.common import keys
from openstack_dashboard.test.integration_tests.pages.admin.system import \
overviewpage as system_overviewpage
from openstack_dashboard.test.integration_tests.pages import pageobject
from openstack_dashboard.test.integration_tests.pages.project.compute import \
overviewpage as compute_overviewpage
class LoginPage(pageobject.PageObject):
_login_username_field_locator = (by.By.ID, 'id_username')
_login_password_field_locator = (by.By.ID, 'id_password')
_login_submit_button_locator = (by.By.CSS_SELECTOR,
'div.panel-footer button.btn')
_login_logout_reason_locator = (by.By.ID, 'logout_reason')
def __init__(self, driver, conf):
super(LoginPage, self).__init__(driver, conf)
self._page_title = "Login"
def is_login_page(self):
return (self.is_the_current_page() and
self._is_element_visible(*self._login_submit_button_locator))
@property
def username(self):
return self._get_element(*self._login_username_field_locator)
@property
def password(self):
return self._get_element(*self._login_password_field_locator)
@property
def login_button(self):
return self._get_element(*self._login_submit_button_locator)
def _click_on_login_button(self):
self.login_button.click()
def _press_enter_on_login_button(self):
self.login_button.send_keys(keys.Keys.RETURN)
def is_logout_reason_displayed(self):
return self._get_element(*self._login_logout_reason_locator)
def login(self, user=None, password=None):
return self.login_with_mouse_click(user, password)
def login_with_mouse_click(self, user, password):
return self._do_login(user, password, self._click_on_login_button)
def login_with_enter_key(self, user, password):
return self._do_login(user, password,
self._press_enter_on_login_button)
def _do_login(self, user, password, login_method):
if user == self.conf.identity.admin_username:
if password is None:
password = self.conf.identity.admin_password
return self.login_as_admin(password, login_method)
else:
if password is None:
password = self.conf.identity.password
if user is None:
user = self.conf.identity.username
return self.login_as_user(user, password, login_method)
def login_as_admin(self, password, login_method):
self.username.send_keys(self.conf.identity.admin_username)
self.password.send_keys(password)
login_method()
return system_overviewpage.OverviewPage(self.driver, self.conf)
def login_as_user(self, user, password, login_method):
self.username.send_keys(user)
self.password.send_keys(password)
login_method()
return compute_overviewpage.OverviewPage(self.driver, self.conf)
|
apache-2.0
| -9,152,293,988,039,607,000 | 4,378,344,570,150,050,300 | 38.868132 | 78 | 0.671444 | false |
zlsun/XX-Net
|
code/default/gae_proxy/server/lib/google/net/proto/ProtocolBuffer.py
|
10
|
27352
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import array
import httplib
import re
import struct
try:
import google.net.proto.proto1 as proto1
except ImportError:
class ProtocolBufferDecodeError(Exception): pass
class ProtocolBufferEncodeError(Exception): pass
class ProtocolBufferReturnError(Exception): pass
else:
ProtocolBufferDecodeError = proto1.ProtocolBufferDecodeError
ProtocolBufferEncodeError = proto1.ProtocolBufferEncodeError
ProtocolBufferReturnError = proto1.ProtocolBufferReturnError
__all__ = ['ProtocolMessage', 'Encoder', 'Decoder',
'ExtendableProtocolMessage',
'ProtocolBufferDecodeError',
'ProtocolBufferEncodeError',
'ProtocolBufferReturnError']
URL_RE = re.compile('^(https?)://([^/]+)(/.*)$')
class ProtocolMessage:
def __init__(self, contents=None):
raise NotImplementedError
def Clear(self):
raise NotImplementedError
def IsInitialized(self, debug_strs=None):
raise NotImplementedError
def Encode(self):
try:
return self._CEncode()
except (NotImplementedError, AttributeError):
e = Encoder()
self.Output(e)
return e.buffer().tostring()
def SerializeToString(self):
return self.Encode()
def SerializePartialToString(self):
try:
return self._CEncodePartial()
except (NotImplementedError, AttributeError):
e = Encoder()
self.OutputPartial(e)
return e.buffer().tostring()
def _CEncode(self):
raise NotImplementedError
def _CEncodePartial(self):
raise NotImplementedError
def ParseFromString(self, s):
self.Clear()
self.MergeFromString(s)
def ParsePartialFromString(self, s):
self.Clear()
self.MergePartialFromString(s)
def MergeFromString(self, s):
self.MergePartialFromString(s)
dbg = []
if not self.IsInitialized(dbg):
raise ProtocolBufferDecodeError, '\n\t'.join(dbg)
def MergePartialFromString(self, s):
try:
self._CMergeFromString(s)
except (NotImplementedError, AttributeError):
a = array.array('B')
a.fromstring(s)
d = Decoder(a, 0, len(a))
self.TryMerge(d)
def _CMergeFromString(self, s):
raise NotImplementedError
def __getstate__(self):
return self.Encode()
def __setstate__(self, contents_):
self.__init__(contents=contents_)
def sendCommand(self, server, url, response, follow_redirects=1,
secure=0, keyfile=None, certfile=None):
data = self.Encode()
if secure:
if keyfile and certfile:
conn = httplib.HTTPSConnection(server, key_file=keyfile,
cert_file=certfile)
else:
conn = httplib.HTTPSConnection(server)
else:
conn = httplib.HTTPConnection(server)
conn.putrequest("POST", url)
conn.putheader("Content-Length", "%d" %len(data))
conn.endheaders()
conn.send(data)
resp = conn.getresponse()
if follow_redirects > 0 and resp.status == 302:
m = URL_RE.match(resp.getheader('Location'))
if m:
protocol, server, url = m.groups()
return self.sendCommand(server, url, response,
follow_redirects=follow_redirects - 1,
secure=(protocol == 'https'),
keyfile=keyfile,
certfile=certfile)
if resp.status != 200:
raise ProtocolBufferReturnError(resp.status)
if response is not None:
response.ParseFromString(resp.read())
return response
def sendSecureCommand(self, server, keyfile, certfile, url, response,
follow_redirects=1):
return self.sendCommand(server, url, response,
follow_redirects=follow_redirects,
secure=1, keyfile=keyfile, certfile=certfile)
def __str__(self, prefix="", printElemNumber=0):
raise NotImplementedError
def ToASCII(self):
return self._CToASCII(ProtocolMessage._SYMBOLIC_FULL_ASCII)
def ToCompactASCII(self):
return self._CToASCII(ProtocolMessage._NUMERIC_ASCII)
def ToShortASCII(self):
return self._CToASCII(ProtocolMessage._SYMBOLIC_SHORT_ASCII)
_NUMERIC_ASCII = 0
_SYMBOLIC_SHORT_ASCII = 1
_SYMBOLIC_FULL_ASCII = 2
def _CToASCII(self, output_format):
raise NotImplementedError
def ParseASCII(self, ascii_string):
raise NotImplementedError
def ParseASCIIIgnoreUnknown(self, ascii_string):
raise NotImplementedError
def Equals(self, other):
raise NotImplementedError
def __eq__(self, other):
if other.__class__ is self.__class__:
return self.Equals(other)
return NotImplemented
def __ne__(self, other):
if other.__class__ is self.__class__:
return not self.Equals(other)
return NotImplemented
def Output(self, e):
dbg = []
if not self.IsInitialized(dbg):
raise ProtocolBufferEncodeError, '\n\t'.join(dbg)
self.OutputUnchecked(e)
return
def OutputUnchecked(self, e):
raise NotImplementedError
def OutputPartial(self, e):
raise NotImplementedError
def Parse(self, d):
self.Clear()
self.Merge(d)
return
def Merge(self, d):
self.TryMerge(d)
dbg = []
if not self.IsInitialized(dbg):
raise ProtocolBufferDecodeError, '\n\t'.join(dbg)
return
def TryMerge(self, d):
raise NotImplementedError
def CopyFrom(self, pb):
if (pb == self): return
self.Clear()
self.MergeFrom(pb)
def MergeFrom(self, pb):
raise NotImplementedError
def lengthVarInt32(self, n):
return self.lengthVarInt64(n)
def lengthVarInt64(self, n):
if n < 0:
return 10
result = 0
while 1:
result += 1
n >>= 7
if n == 0:
break
return result
def lengthString(self, n):
return self.lengthVarInt32(n) + n
def DebugFormat(self, value):
return "%s" % value
def DebugFormatInt32(self, value):
if (value <= -2000000000 or value >= 2000000000):
return self.DebugFormatFixed32(value)
return "%d" % value
def DebugFormatInt64(self, value):
if (value <= -20000000000000 or value >= 20000000000000):
return self.DebugFormatFixed64(value)
return "%d" % value
def DebugFormatString(self, value):
def escape(c):
o = ord(c)
if o == 10: return r"\n"
if o == 39: return r"\'"
if o == 34: return r'\"'
if o == 92: return r"\\"
if o >= 127 or o < 32: return "\\%03o" % o
return c
return '"' + "".join([escape(c) for c in value]) + '"'
def DebugFormatFloat(self, value):
return "%ff" % value
def DebugFormatFixed32(self, value):
if (value < 0): value += (1L<<32)
return "0x%x" % value
def DebugFormatFixed64(self, value):
if (value < 0): value += (1L<<64)
return "0x%x" % value
def DebugFormatBool(self, value):
if value:
return "true"
else:
return "false"
TYPE_DOUBLE = 1
TYPE_FLOAT = 2
TYPE_INT64 = 3
TYPE_UINT64 = 4
TYPE_INT32 = 5
TYPE_FIXED64 = 6
TYPE_FIXED32 = 7
TYPE_BOOL = 8
TYPE_STRING = 9
TYPE_GROUP = 10
TYPE_FOREIGN = 11
_TYPE_TO_DEBUG_STRING = {
TYPE_INT32: ProtocolMessage.DebugFormatInt32,
TYPE_INT64: ProtocolMessage.DebugFormatInt64,
TYPE_UINT64: ProtocolMessage.DebugFormatInt64,
TYPE_FLOAT: ProtocolMessage.DebugFormatFloat,
TYPE_STRING: ProtocolMessage.DebugFormatString,
TYPE_FIXED32: ProtocolMessage.DebugFormatFixed32,
TYPE_FIXED64: ProtocolMessage.DebugFormatFixed64,
TYPE_BOOL: ProtocolMessage.DebugFormatBool }
class Encoder:
NUMERIC = 0
DOUBLE = 1
STRING = 2
STARTGROUP = 3
ENDGROUP = 4
FLOAT = 5
MAX_TYPE = 6
def __init__(self):
self.buf = array.array('B')
return
def buffer(self):
return self.buf
def put8(self, v):
if v < 0 or v >= (1<<8): raise ProtocolBufferEncodeError, "u8 too big"
self.buf.append(v & 255)
return
def put16(self, v):
if v < 0 or v >= (1<<16): raise ProtocolBufferEncodeError, "u16 too big"
self.buf.append((v >> 0) & 255)
self.buf.append((v >> 8) & 255)
return
def put32(self, v):
if v < 0 or v >= (1L<<32): raise ProtocolBufferEncodeError, "u32 too big"
self.buf.append((v >> 0) & 255)
self.buf.append((v >> 8) & 255)
self.buf.append((v >> 16) & 255)
self.buf.append((v >> 24) & 255)
return
def put64(self, v):
if v < 0 or v >= (1L<<64): raise ProtocolBufferEncodeError, "u64 too big"
self.buf.append((v >> 0) & 255)
self.buf.append((v >> 8) & 255)
self.buf.append((v >> 16) & 255)
self.buf.append((v >> 24) & 255)
self.buf.append((v >> 32) & 255)
self.buf.append((v >> 40) & 255)
self.buf.append((v >> 48) & 255)
self.buf.append((v >> 56) & 255)
return
def putVarInt32(self, v):
buf_append = self.buf.append
if v & 127 == v:
buf_append(v)
return
if v >= 0x80000000 or v < -0x80000000:
raise ProtocolBufferEncodeError, "int32 too big"
if v < 0:
v += 0x10000000000000000
while True:
bits = v & 127
v >>= 7
if v:
bits |= 128
buf_append(bits)
if not v:
break
return
def putVarInt64(self, v):
buf_append = self.buf.append
if v >= 0x8000000000000000 or v < -0x8000000000000000:
raise ProtocolBufferEncodeError, "int64 too big"
if v < 0:
v += 0x10000000000000000
while True:
bits = v & 127
v >>= 7
if v:
bits |= 128
buf_append(bits)
if not v:
break
return
def putVarUint64(self, v):
buf_append = self.buf.append
if v < 0 or v >= 0x10000000000000000:
raise ProtocolBufferEncodeError, "uint64 too big"
while True:
bits = v & 127
v >>= 7
if v:
bits |= 128
buf_append(bits)
if not v:
break
return
def putFloat(self, v):
a = array.array('B')
a.fromstring(struct.pack("<f", v))
self.buf.extend(a)
return
def putDouble(self, v):
a = array.array('B')
a.fromstring(struct.pack("<d", v))
self.buf.extend(a)
return
def putBoolean(self, v):
if v:
self.buf.append(1)
else:
self.buf.append(0)
return
def putPrefixedString(self, v):
v = str(v)
self.putVarInt32(len(v))
self.buf.fromstring(v)
return
def putRawString(self, v):
self.buf.fromstring(v)
_TYPE_TO_METHOD = {
TYPE_DOUBLE: putDouble,
TYPE_FLOAT: putFloat,
TYPE_FIXED64: put64,
TYPE_FIXED32: put32,
TYPE_INT32: putVarInt32,
TYPE_INT64: putVarInt64,
TYPE_UINT64: putVarUint64,
TYPE_BOOL: putBoolean,
TYPE_STRING: putPrefixedString }
_TYPE_TO_BYTE_SIZE = {
TYPE_DOUBLE: 8,
TYPE_FLOAT: 4,
TYPE_FIXED64: 8,
TYPE_FIXED32: 4,
TYPE_BOOL: 1 }
class Decoder:
def __init__(self, buf, idx, limit):
self.buf = buf
self.idx = idx
self.limit = limit
return
def avail(self):
return self.limit - self.idx
def buffer(self):
return self.buf
def pos(self):
return self.idx
def skip(self, n):
if self.idx + n > self.limit: raise ProtocolBufferDecodeError, "truncated"
self.idx += n
return
def skipData(self, tag):
t = tag & 7
if t == Encoder.NUMERIC:
self.getVarInt64()
elif t == Encoder.DOUBLE:
self.skip(8)
elif t == Encoder.STRING:
n = self.getVarInt32()
self.skip(n)
elif t == Encoder.STARTGROUP:
while 1:
t = self.getVarInt32()
if (t & 7) == Encoder.ENDGROUP:
break
else:
self.skipData(t)
if (t - Encoder.ENDGROUP) != (tag - Encoder.STARTGROUP):
raise ProtocolBufferDecodeError, "corrupted"
elif t == Encoder.ENDGROUP:
raise ProtocolBufferDecodeError, "corrupted"
elif t == Encoder.FLOAT:
self.skip(4)
else:
raise ProtocolBufferDecodeError, "corrupted"
def get8(self):
if self.idx >= self.limit: raise ProtocolBufferDecodeError, "truncated"
c = self.buf[self.idx]
self.idx += 1
return c
def get16(self):
if self.idx + 2 > self.limit: raise ProtocolBufferDecodeError, "truncated"
c = self.buf[self.idx]
d = self.buf[self.idx + 1]
self.idx += 2
return (d << 8) | c
def get32(self):
if self.idx + 4 > self.limit: raise ProtocolBufferDecodeError, "truncated"
c = self.buf[self.idx]
d = self.buf[self.idx + 1]
e = self.buf[self.idx + 2]
f = long(self.buf[self.idx + 3])
self.idx += 4
return (f << 24) | (e << 16) | (d << 8) | c
def get64(self):
if self.idx + 8 > self.limit: raise ProtocolBufferDecodeError, "truncated"
c = self.buf[self.idx]
d = self.buf[self.idx + 1]
e = self.buf[self.idx + 2]
f = long(self.buf[self.idx + 3])
g = long(self.buf[self.idx + 4])
h = long(self.buf[self.idx + 5])
i = long(self.buf[self.idx + 6])
j = long(self.buf[self.idx + 7])
self.idx += 8
return ((j << 56) | (i << 48) | (h << 40) | (g << 32) | (f << 24)
| (e << 16) | (d << 8) | c)
def getVarInt32(self):
b = self.get8()
if not (b & 128):
return b
result = long(0)
shift = 0
while 1:
result |= (long(b & 127) << shift)
shift += 7
if not (b & 128):
if result >= 0x10000000000000000L:
raise ProtocolBufferDecodeError, "corrupted"
break
if shift >= 64: raise ProtocolBufferDecodeError, "corrupted"
b = self.get8()
if result >= 0x8000000000000000L:
result -= 0x10000000000000000L
if result >= 0x80000000L or result < -0x80000000L:
raise ProtocolBufferDecodeError, "corrupted"
return result
def getVarInt64(self):
result = self.getVarUint64()
if result >= (1L << 63):
result -= (1L << 64)
return result
def getVarUint64(self):
result = long(0)
shift = 0
while 1:
if shift >= 64: raise ProtocolBufferDecodeError, "corrupted"
b = self.get8()
result |= (long(b & 127) << shift)
shift += 7
if not (b & 128):
if result >= (1L << 64): raise ProtocolBufferDecodeError, "corrupted"
return result
return result
def getFloat(self):
if self.idx + 4 > self.limit: raise ProtocolBufferDecodeError, "truncated"
a = self.buf[self.idx:self.idx+4]
self.idx += 4
return struct.unpack("<f", a)[0]
def getDouble(self):
if self.idx + 8 > self.limit: raise ProtocolBufferDecodeError, "truncated"
a = self.buf[self.idx:self.idx+8]
self.idx += 8
return struct.unpack("<d", a)[0]
def getBoolean(self):
b = self.get8()
if b != 0 and b != 1: raise ProtocolBufferDecodeError, "corrupted"
return b
def getPrefixedString(self):
length = self.getVarInt32()
if self.idx + length > self.limit:
raise ProtocolBufferDecodeError, "truncated"
r = self.buf[self.idx : self.idx + length]
self.idx += length
return r.tostring()
def getRawString(self):
r = self.buf[self.idx:self.limit]
self.idx = self.limit
return r.tostring()
_TYPE_TO_METHOD = {
TYPE_DOUBLE: getDouble,
TYPE_FLOAT: getFloat,
TYPE_FIXED64: get64,
TYPE_FIXED32: get32,
TYPE_INT32: getVarInt32,
TYPE_INT64: getVarInt64,
TYPE_UINT64: getVarUint64,
TYPE_BOOL: getBoolean,
TYPE_STRING: getPrefixedString }
class ExtensionIdentifier(object):
__slots__ = ('full_name', 'number', 'field_type', 'wire_tag', 'is_repeated',
'default', 'containing_cls', 'composite_cls', 'message_name')
def __init__(self, full_name, number, field_type, wire_tag, is_repeated,
default):
self.full_name = full_name
self.number = number
self.field_type = field_type
self.wire_tag = wire_tag
self.is_repeated = is_repeated
self.default = default
class ExtendableProtocolMessage(ProtocolMessage):
def HasExtension(self, extension):
self._VerifyExtensionIdentifier(extension)
return extension in self._extension_fields
def ClearExtension(self, extension):
self._VerifyExtensionIdentifier(extension)
if extension in self._extension_fields:
del self._extension_fields[extension]
def GetExtension(self, extension, index=None):
self._VerifyExtensionIdentifier(extension)
if extension in self._extension_fields:
result = self._extension_fields[extension]
else:
if extension.is_repeated:
result = []
elif extension.composite_cls:
result = extension.composite_cls()
else:
result = extension.default
if extension.is_repeated:
result = result[index]
return result
def SetExtension(self, extension, *args):
self._VerifyExtensionIdentifier(extension)
if extension.composite_cls:
raise TypeError(
'Cannot assign to extension "%s" because it is a composite type.' %
extension.full_name)
if extension.is_repeated:
if (len(args) != 2):
raise TypeError(
'SetExtension(extension, index, value) for repeated extension '
'takes exactly 3 arguments: (%d given)' % len(args))
index = args[0]
value = args[1]
self._extension_fields[extension][index] = value
else:
if (len(args) != 1):
raise TypeError(
'SetExtension(extension, value) for singular extension '
'takes exactly 3 arguments: (%d given)' % len(args))
value = args[0]
self._extension_fields[extension] = value
def MutableExtension(self, extension, index=None):
self._VerifyExtensionIdentifier(extension)
if extension.composite_cls is None:
raise TypeError(
'MutableExtension() cannot be applied to "%s", because it is not a '
'composite type.' % extension.full_name)
if extension.is_repeated:
if index is None:
raise TypeError(
'MutableExtension(extension, index) for repeated extension '
'takes exactly 2 arguments: (1 given)')
return self.GetExtension(extension, index)
if extension in self._extension_fields:
return self._extension_fields[extension]
else:
result = extension.composite_cls()
self._extension_fields[extension] = result
return result
def ExtensionList(self, extension):
self._VerifyExtensionIdentifier(extension)
if not extension.is_repeated:
raise TypeError(
'ExtensionList() cannot be applied to "%s", because it is not a '
'repeated extension.' % extension.full_name)
if extension in self._extension_fields:
return self._extension_fields[extension]
result = []
self._extension_fields[extension] = result
return result
def ExtensionSize(self, extension):
self._VerifyExtensionIdentifier(extension)
if not extension.is_repeated:
raise TypeError(
'ExtensionSize() cannot be applied to "%s", because it is not a '
'repeated extension.' % extension.full_name)
if extension in self._extension_fields:
return len(self._extension_fields[extension])
return 0
def AddExtension(self, extension, value=None):
self._VerifyExtensionIdentifier(extension)
if not extension.is_repeated:
raise TypeError(
'AddExtension() cannot be applied to "%s", because it is not a '
'repeated extension.' % extension.full_name)
if extension in self._extension_fields:
field = self._extension_fields[extension]
else:
field = []
self._extension_fields[extension] = field
if extension.composite_cls:
if value is not None:
raise TypeError(
'value must not be set in AddExtension() for "%s", because it is '
'a message type extension. Set values on the returned message '
'instead.' % extension.full_name)
msg = extension.composite_cls()
field.append(msg)
return msg
field.append(value)
def _VerifyExtensionIdentifier(self, extension):
if extension.containing_cls != self.__class__:
raise TypeError("Containing type of %s is %s, but not %s."
% (extension.full_name,
extension.containing_cls.__name__,
self.__class__.__name__))
def _MergeExtensionFields(self, x):
for ext, val in x._extension_fields.items():
if ext.is_repeated:
for i in xrange(len(val)):
if ext.composite_cls is None:
self.AddExtension(ext, val[i])
else:
self.AddExtension(ext).MergeFrom(val[i])
else:
if ext.composite_cls is None:
self.SetExtension(ext, val)
else:
self.MutableExtension(ext).MergeFrom(val)
def _ListExtensions(self):
result = [ext for ext in self._extension_fields.keys()
if (not ext.is_repeated) or self.ExtensionSize(ext) > 0]
result.sort(key = lambda item: item.number)
return result
def _ExtensionEquals(self, x):
extensions = self._ListExtensions()
if extensions != x._ListExtensions():
return False
for ext in extensions:
if ext.is_repeated:
if self.ExtensionSize(ext) != x.ExtensionSize(ext): return False
for e1, e2 in zip(self.ExtensionList(ext),
x.ExtensionList(ext)):
if e1 != e2: return False
else:
if self.GetExtension(ext) != x.GetExtension(ext): return False
return True
def _OutputExtensionFields(self, out, partial, extensions, start_index,
end_field_number):
def OutputSingleField(ext, value):
out.putVarInt32(ext.wire_tag)
if ext.field_type == TYPE_GROUP:
if partial:
value.OutputPartial(out)
else:
value.OutputUnchecked(out)
out.putVarInt32(ext.wire_tag + 1)
elif ext.field_type == TYPE_FOREIGN:
if partial:
out.putVarInt32(value.ByteSizePartial())
value.OutputPartial(out)
else:
out.putVarInt32(value.ByteSize())
value.OutputUnchecked(out)
else:
Encoder._TYPE_TO_METHOD[ext.field_type](out, value)
size = len(extensions)
for ext_index in xrange(start_index, size):
ext = extensions[ext_index]
if ext.number >= end_field_number:
return ext_index
if ext.is_repeated:
for i in xrange(len(self._extension_fields[ext])):
OutputSingleField(ext, self._extension_fields[ext][i])
else:
OutputSingleField(ext, self._extension_fields[ext])
return size
def _ParseOneExtensionField(self, wire_tag, d):
number = wire_tag >> 3
if number in self._extensions_by_field_number:
ext = self._extensions_by_field_number[number]
if wire_tag != ext.wire_tag:
return
if ext.field_type == TYPE_FOREIGN:
length = d.getVarInt32()
tmp = Decoder(d.buffer(), d.pos(), d.pos() + length)
if ext.is_repeated:
self.AddExtension(ext).TryMerge(tmp)
else:
self.MutableExtension(ext).TryMerge(tmp)
d.skip(length)
elif ext.field_type == TYPE_GROUP:
if ext.is_repeated:
self.AddExtension(ext).TryMerge(d)
else:
self.MutableExtension(ext).TryMerge(d)
else:
value = Decoder._TYPE_TO_METHOD[ext.field_type](d)
if ext.is_repeated:
self.AddExtension(ext, value)
else:
self.SetExtension(ext, value)
else:
d.skipData(wire_tag)
def _ExtensionByteSize(self, partial):
size = 0
for extension, value in self._extension_fields.items():
ftype = extension.field_type
tag_size = self.lengthVarInt64(extension.wire_tag)
if ftype == TYPE_GROUP:
tag_size *= 2
if extension.is_repeated:
size += tag_size * len(value)
for single_value in value:
size += self._FieldByteSize(ftype, single_value, partial)
else:
size += tag_size + self._FieldByteSize(ftype, value, partial)
return size
def _FieldByteSize(self, ftype, value, partial):
size = 0
if ftype == TYPE_STRING:
size = self.lengthString(len(value))
elif ftype == TYPE_FOREIGN or ftype == TYPE_GROUP:
if partial:
size = self.lengthString(value.ByteSizePartial())
else:
size = self.lengthString(value.ByteSize())
elif ftype == TYPE_INT64 or ftype == TYPE_UINT64 or ftype == TYPE_INT32:
size = self.lengthVarInt64(value)
else:
if ftype in Encoder._TYPE_TO_BYTE_SIZE:
size = Encoder._TYPE_TO_BYTE_SIZE[ftype]
else:
raise AssertionError(
'Extension type %d is not recognized.' % ftype)
return size
def _ExtensionDebugString(self, prefix, printElemNumber):
res = ''
extensions = self._ListExtensions()
for extension in extensions:
value = self._extension_fields[extension]
if extension.is_repeated:
cnt = 0
for e in value:
elm=""
if printElemNumber: elm = "(%d)" % cnt
if extension.composite_cls is not None:
res += prefix + "[%s%s] {\n" % (extension.full_name, elm)
res += e.__str__(prefix + " ", printElemNumber)
res += prefix + "}\n"
else:
if extension.composite_cls is not None:
res += prefix + "[%s] {\n" % extension.full_name
res += value.__str__(
prefix + " ", printElemNumber)
res += prefix + "}\n"
else:
if extension.field_type in _TYPE_TO_DEBUG_STRING:
text_value = _TYPE_TO_DEBUG_STRING[
extension.field_type](self, value)
else:
text_value = self.DebugFormat(value)
res += prefix + "[%s]: %s\n" % (extension.full_name, text_value)
return res
@staticmethod
def _RegisterExtension(cls, extension, composite_cls=None):
extension.containing_cls = cls
extension.composite_cls = composite_cls
if composite_cls is not None:
extension.message_name = composite_cls._PROTO_DESCRIPTOR_NAME
actual_handle = cls._extensions_by_field_number.setdefault(
extension.number, extension)
if actual_handle is not extension:
raise AssertionError(
'Extensions "%s" and "%s" both try to extend message type "%s" with '
'field number %d.' %
(extension.full_name, actual_handle.full_name,
cls.__name__, extension.number))
|
bsd-2-clause
| 2,965,896,103,886,853,000 | -7,418,319,344,609,615,000 | 21.530478 | 79 | 0.610924 | false |
nbborlongan/geonode
|
geonode/sitemap.py
|
35
|
1218
|
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.contrib.sitemaps import Sitemap
from geonode.maps.models import Layer, Map
class LayerSitemap(Sitemap):
changefreq = "never"
priority = 0.5
def items(self):
return Layer.objects.all()
def lastmod(self, obj):
return obj.date
class MapSitemap(Sitemap):
changefreq = "never"
priority = 0.5
def items(self):
return Map.objects.all()
|
gpl-3.0
| -3,978,652,716,716,825,000 | 6,969,488,853,460,647,000 | 29.45 | 73 | 0.633826 | false |
tensorflow/lucid
|
tests/recipes/activation_atlas.py
|
1
|
1157
|
import pytest
from lucid.modelzoo.aligned_activations import NUMBER_OF_AVAILABLE_SAMPLES
from lucid.modelzoo.vision_models import AlexNet, InceptionV1
from lucid.recipes.activation_atlas import activation_atlas, aligned_activation_atlas
from lucid.misc.io import save
# Run test with just 1/10th of available samples
subset = NUMBER_OF_AVAILABLE_SAMPLES // 10
@pytest.mark.skip(reason="takes too long to complete on CI")
def test_activation_atlas():
model = AlexNet()
layer = model.layers[1]
atlas = activation_atlas(model, layer, number_activations=subset)
save(atlas, "tests/recipes/results/activation_atlas/atlas.jpg")
@pytest.mark.skip(reason="takes too long to complete on CI")
def test_aligned_activation_atlas():
model1 = AlexNet()
layer1 = model1.layers[1]
model2 = InceptionV1()
layer2 = model2.layers[8] # mixed4d
atlasses = aligned_activation_atlas(
model1, layer1, model2, layer2, number_activations=subset
)
path = "tests/recipes/results/activation_atlas/aligned_atlas-{}-of-{}.jpg".format(index, len(atlasses))
for index, atlas in enumerate(atlasses):
save(atlas, path)
|
apache-2.0
| -782,462,283,522,328,800 | -6,657,343,651,498,151,000 | 34.060606 | 107 | 0.733794 | false |
JeanMarieMineau/ISN-s-Cube
|
Bouton.py
|
1
|
5185
|
'''
Created on 5 mai 2017
Copyright 2017 Jean-Marie Mineau, Maxime Keller
This file is part of "ISN's Cube".
"ISN's Cube" is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
"ISN's Cube" is distributed in the hope that it will be useful and
recreative, but WITHOUT ANY WARRANTY; without even the implied
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with "ISN's Cube". If not, see <http://www.gnu.org/licenses/>.
@author: <[email protected]>
Class pour les boutons. Une classe qui gère l'ensemble des
boutons, et une autre qui est le bouton.
'''
import pygame
class Boutons:
"""Class gérant la création de boutons et le fait de clique dessus,
ainsi que l'affichage."""
def __init__(self):
"""Cette class à pour attribut une liste de boutons."""
self.boutons = []
def nouveauBouton(self, pos, image=None, couleur=(255,0,255),
size=(60,60), callback=lambda *args: None, argsCallback=[]):
"""Crée un nouveau bouton."""
bouton = Bouton(pos, self, image=image, couleur=couleur,
size=size, callback=callback, argsCallback=argsCallback)
self.boutons.append(bouton)
return bouton
def update(self, events):
"""Gère le click sur le bouton."""
for event in events:
if event.type == pygame.MOUSEBUTTONDOWN:
pos = event.pos
self.callbackClic(pos)
def callbackClic(self, pos):
"""Methode appellé lors d'un clic."""
for bouton in self.boutons:
if bouton.rect.collidepoint(*pos):
bouton.callback(*bouton.argsCallback)
return
def display(self, screen):
"""Affiche les Boutons."""
for bouton in self.boutons:
bouton.display(screen)
class Bouton:
"""Un Bouton."""
def __init__(self, pos, parent, image=None, couleur=(255,0,255),
size=(60,60), callback=lambda *args: None, argsCallback=[]):
"""Crée un bouton, si une image est donné, il la charge, sinon,
c'est un rectangle de taille size et de couleur couleur qui est affiché."""
self.parent = parent
self.pos = pos
if image is not None:
self.surface = pygame.image.load(image).convert_alpha()
self.rect = self.surface.get_rect()
else:
self.surface = pygame.Surface(size)
self.surface.fill(couleur)
self.rect = self.surface.get_rect()
self.rect = self.rect.move(self.pos)
self.callback = callback
self.argsCallback = argsCallback
def suppr(self):
"""Suprime le Bouton."""
self.parent.boutons.remove(self)
def display(self, screen):
"""Affiche le Bouton."""
screen.blit(self.surface, self.rect)
def callbackTest(a, b, c, d, e, f, g):
"""Test de callback."""
print(a)
print(b)
print(c)
print(d)
print(e)
print(f)
print(g)
def callback2(*args):
print("toto")
if __name__ == "__main__":
boutons = Boutons()
screen = pygame.display.set_mode((1000, 400))
clock = pygame.time.Clock()
pos = (50,50)
bouton = boutons.nouveauBouton(pos, callback=callbackTest, argsCallback=["A",
"B",
"C",
"D",
"E",
"F",
"G"])
pos2 = (50, 200)
bouton2 = boutons.nouveauBouton(pos2, callback=callback2, argsCallback=["H",
"I",
"J",
"K",
"L",
"M",
"N"])
while True:
screen.fill((150, 150, 150))
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
exit()
quit()
boutons.update(events)
boutons.display(screen)
pygame.display.update()
clock.tick(30)
|
gpl-3.0
| 2,782,760,401,261,237,000 | -4,992,198,099,747,877,000 | 33.278146 | 83 | 0.479614 | false |
omprakasha/odoo
|
addons/l10n_do/__openerp__.py
|
309
|
2992
|
# -*- coding: utf-8 -*-
# #############################################################################
#
# First author: Jose Ernesto Mendez <[email protected]> (Open Business Solutions SRL.)
# Copyright (c) 2012 -TODAY Open Business Solutions, SRL. (http://obsdr.com). All rights reserved.
#
# This is a fork to upgrade to odoo 8.0
# by Marcos Organizador de Negocios - Eneldo Serrata - www.marcos.org.do
#
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company like Marcos Organizador de Negocios.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
{
'name': 'Dominican Republic - Accounting',
'version': '1.0',
'category': 'Localization/Account Charts',
'description': """
This is the base module to manage the accounting chart for Dominican Republic.
==============================================================================
* Chart of Accounts.
* The Tax Code Chart for Domincan Republic
* The main taxes used in Domincan Republic
* Fiscal position for local """,
'author': 'Eneldo Serrata - Marcos Organizador de Negocios, SRL.',
'website': 'http://marcos.do',
'depends': ['account', 'base_iban'],
'data': [
# basic accounting data
'data/ir_sequence_type.xml',
'data/ir_sequence.xml',
'data/account_journal.xml',
'data/account.account.type.csv',
'data/account.account.template.csv',
'data/account.tax.code.template.csv',
'data/account_chart_template.xml',
'data/account.tax.template.csv',
'data/l10n_do_base_data.xml',
# Adds fiscal position
'data/account.fiscal.position.template.csv',
'data/account.fiscal.position.tax.template.csv',
# configuration wizard, views, reports...
'data/l10n_do_wizard.xml'
],
'test': [],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| 9,079,398,747,873,551,000 | 8,339,213,592,134,441,000 | 40.555556 | 98 | 0.646056 | false |
clay23/lab4
|
lib/werkzeug/contrib/fixers.py
|
464
|
9949
|
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.fixers
~~~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.5
This module includes various helpers that fix bugs in web servers. They may
be necessary for some versions of a buggy web server but not others. We try
to stay updated with the status of the bugs as good as possible but you have
to make sure whether they fix the problem you encounter.
If you notice bugs in webservers not fixed in this module consider
contributing a patch.
:copyright: Copyright 2009 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
from werkzeug.http import parse_options_header, parse_cache_control_header, \
parse_set_header
from werkzeug.useragents import UserAgent
from werkzeug.datastructures import Headers, ResponseCacheControl
class CGIRootFix(object):
"""Wrap the application in this middleware if you are using FastCGI or CGI
and you have problems with your app root being set to the cgi script's path
instead of the path users are going to visit
.. versionchanged:: 0.9
Added `app_root` parameter and renamed from `LighttpdCGIRootFix`.
:param app: the WSGI application
:param app_root: Defaulting to ``'/'``, you can set this to something else
if your app is mounted somewhere else.
"""
def __init__(self, app, app_root='/'):
self.app = app
self.app_root = app_root
def __call__(self, environ, start_response):
# only set PATH_INFO for older versions of Lighty or if no
# server software is provided. That's because the test was
# added in newer Werkzeug versions and we don't want to break
# people's code if they are using this fixer in a test that
# does not set the SERVER_SOFTWARE key.
if 'SERVER_SOFTWARE' not in environ or \
environ['SERVER_SOFTWARE'] < 'lighttpd/1.4.28':
environ['PATH_INFO'] = environ.get('SCRIPT_NAME', '') + \
environ.get('PATH_INFO', '')
environ['SCRIPT_NAME'] = self.app_root.strip('/')
return self.app(environ, start_response)
# backwards compatibility
LighttpdCGIRootFix = CGIRootFix
class PathInfoFromRequestUriFix(object):
"""On windows environment variables are limited to the system charset
which makes it impossible to store the `PATH_INFO` variable in the
environment without loss of information on some systems.
This is for example a problem for CGI scripts on a Windows Apache.
This fixer works by recreating the `PATH_INFO` from `REQUEST_URI`,
`REQUEST_URL`, or `UNENCODED_URL` (whatever is available). Thus the
fix can only be applied if the webserver supports either of these
variables.
:param app: the WSGI application
"""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
for key in 'REQUEST_URL', 'REQUEST_URI', 'UNENCODED_URL':
if key not in environ:
continue
request_uri = unquote(environ[key])
script_name = unquote(environ.get('SCRIPT_NAME', ''))
if request_uri.startswith(script_name):
environ['PATH_INFO'] = request_uri[len(script_name):] \
.split('?', 1)[0]
break
return self.app(environ, start_response)
class ProxyFix(object):
"""This middleware can be applied to add HTTP proxy support to an
application that was not designed with HTTP proxies in mind. It
sets `REMOTE_ADDR`, `HTTP_HOST` from `X-Forwarded` headers.
If you have more than one proxy server in front of your app, set
`num_proxies` accordingly.
Do not use this middleware in non-proxy setups for security reasons.
The original values of `REMOTE_ADDR` and `HTTP_HOST` are stored in
the WSGI environment as `werkzeug.proxy_fix.orig_remote_addr` and
`werkzeug.proxy_fix.orig_http_host`.
:param app: the WSGI application
:param num_proxies: the number of proxy servers in front of the app.
"""
def __init__(self, app, num_proxies=1):
self.app = app
self.num_proxies = num_proxies
def get_remote_addr(self, forwarded_for):
"""Selects the new remote addr from the given list of ips in
X-Forwarded-For. By default it picks the one that the `num_proxies`
proxy server provides. Before 0.9 it would always pick the first.
.. versionadded:: 0.8
"""
if len(forwarded_for) >= self.num_proxies:
return forwarded_for[-1 * self.num_proxies]
def __call__(self, environ, start_response):
getter = environ.get
forwarded_proto = getter('HTTP_X_FORWARDED_PROTO', '')
forwarded_for = getter('HTTP_X_FORWARDED_FOR', '').split(',')
forwarded_host = getter('HTTP_X_FORWARDED_HOST', '')
environ.update({
'werkzeug.proxy_fix.orig_wsgi_url_scheme': getter('wsgi.url_scheme'),
'werkzeug.proxy_fix.orig_remote_addr': getter('REMOTE_ADDR'),
'werkzeug.proxy_fix.orig_http_host': getter('HTTP_HOST')
})
forwarded_for = [x for x in [x.strip() for x in forwarded_for] if x]
remote_addr = self.get_remote_addr(forwarded_for)
if remote_addr is not None:
environ['REMOTE_ADDR'] = remote_addr
if forwarded_host:
environ['HTTP_HOST'] = forwarded_host
if forwarded_proto:
environ['wsgi.url_scheme'] = forwarded_proto
return self.app(environ, start_response)
class HeaderRewriterFix(object):
"""This middleware can remove response headers and add others. This
is for example useful to remove the `Date` header from responses if you
are using a server that adds that header, no matter if it's present or
not or to add `X-Powered-By` headers::
app = HeaderRewriterFix(app, remove_headers=['Date'],
add_headers=[('X-Powered-By', 'WSGI')])
:param app: the WSGI application
:param remove_headers: a sequence of header keys that should be
removed.
:param add_headers: a sequence of ``(key, value)`` tuples that should
be added.
"""
def __init__(self, app, remove_headers=None, add_headers=None):
self.app = app
self.remove_headers = set(x.lower() for x in (remove_headers or ()))
self.add_headers = list(add_headers or ())
def __call__(self, environ, start_response):
def rewriting_start_response(status, headers, exc_info=None):
new_headers = []
for key, value in headers:
if key.lower() not in self.remove_headers:
new_headers.append((key, value))
new_headers += self.add_headers
return start_response(status, new_headers, exc_info)
return self.app(environ, rewriting_start_response)
class InternetExplorerFix(object):
"""This middleware fixes a couple of bugs with Microsoft Internet
Explorer. Currently the following fixes are applied:
- removing of `Vary` headers for unsupported mimetypes which
causes troubles with caching. Can be disabled by passing
``fix_vary=False`` to the constructor.
see: http://support.microsoft.com/kb/824847/en-us
- removes offending headers to work around caching bugs in
Internet Explorer if `Content-Disposition` is set. Can be
disabled by passing ``fix_attach=False`` to the constructor.
If it does not detect affected Internet Explorer versions it won't touch
the request / response.
"""
# This code was inspired by Django fixers for the same bugs. The
# fix_vary and fix_attach fixers were originally implemented in Django
# by Michael Axiak and is available as part of the Django project:
# http://code.djangoproject.com/ticket/4148
def __init__(self, app, fix_vary=True, fix_attach=True):
self.app = app
self.fix_vary = fix_vary
self.fix_attach = fix_attach
def fix_headers(self, environ, headers, status=None):
if self.fix_vary:
header = headers.get('content-type', '')
mimetype, options = parse_options_header(header)
if mimetype not in ('text/html', 'text/plain', 'text/sgml'):
headers.pop('vary', None)
if self.fix_attach and 'content-disposition' in headers:
pragma = parse_set_header(headers.get('pragma', ''))
pragma.discard('no-cache')
header = pragma.to_header()
if not header:
headers.pop('pragma', '')
else:
headers['Pragma'] = header
header = headers.get('cache-control', '')
if header:
cc = parse_cache_control_header(header,
cls=ResponseCacheControl)
cc.no_cache = None
cc.no_store = False
header = cc.to_header()
if not header:
headers.pop('cache-control', '')
else:
headers['Cache-Control'] = header
def run_fixed(self, environ, start_response):
def fixing_start_response(status, headers, exc_info=None):
headers = Headers(headers)
self.fix_headers(environ, headers, status)
return start_response(status, headers.to_wsgi_list(), exc_info)
return self.app(environ, fixing_start_response)
def __call__(self, environ, start_response):
ua = UserAgent(environ)
if ua.browser != 'msie':
return self.app(environ, start_response)
return self.run_fixed(environ, start_response)
|
apache-2.0
| -7,555,413,263,510,827,000 | 3,146,415,719,407,892,500 | 39.77459 | 82 | 0.625792 | false |
akashsinghal/Speech-Memorization-App
|
speech/Swift/Speech-gRPC-Streaming/env/lib/python3.6/site-packages/setuptools/command/egg_info.py
|
50
|
25016
|
"""setuptools.command.egg_info
Create a distribution's .egg-info directory and contents"""
from distutils.filelist import FileList as _FileList
from distutils.errors import DistutilsInternalError
from distutils.util import convert_path
from distutils import log
import distutils.errors
import distutils.filelist
import os
import re
import sys
import io
import warnings
import time
import collections
from setuptools.extern import six
from setuptools.extern.six.moves import map
from setuptools import Command
from setuptools.command.sdist import sdist
from setuptools.command.sdist import walk_revctrl
from setuptools.command.setopt import edit_config
from setuptools.command import bdist_egg
from pkg_resources import (
parse_requirements, safe_name, parse_version,
safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename)
import setuptools.unicode_utils as unicode_utils
from setuptools.glob import glob
from pkg_resources.extern import packaging
def translate_pattern(glob):
"""
Translate a file path glob like '*.txt' in to a regular expression.
This differs from fnmatch.translate which allows wildcards to match
directory separators. It also knows about '**/' which matches any number of
directories.
"""
pat = ''
# This will split on '/' within [character classes]. This is deliberate.
chunks = glob.split(os.path.sep)
sep = re.escape(os.sep)
valid_char = '[^%s]' % (sep,)
for c, chunk in enumerate(chunks):
last_chunk = c == len(chunks) - 1
# Chunks that are a literal ** are globstars. They match anything.
if chunk == '**':
if last_chunk:
# Match anything if this is the last component
pat += '.*'
else:
# Match '(name/)*'
pat += '(?:%s+%s)*' % (valid_char, sep)
continue # Break here as the whole path component has been handled
# Find any special characters in the remainder
i = 0
chunk_len = len(chunk)
while i < chunk_len:
char = chunk[i]
if char == '*':
# Match any number of name characters
pat += valid_char + '*'
elif char == '?':
# Match a name character
pat += valid_char
elif char == '[':
# Character class
inner_i = i + 1
# Skip initial !/] chars
if inner_i < chunk_len and chunk[inner_i] == '!':
inner_i = inner_i + 1
if inner_i < chunk_len and chunk[inner_i] == ']':
inner_i = inner_i + 1
# Loop till the closing ] is found
while inner_i < chunk_len and chunk[inner_i] != ']':
inner_i = inner_i + 1
if inner_i >= chunk_len:
# Got to the end of the string without finding a closing ]
# Do not treat this as a matching group, but as a literal [
pat += re.escape(char)
else:
# Grab the insides of the [brackets]
inner = chunk[i + 1:inner_i]
char_class = ''
# Class negation
if inner[0] == '!':
char_class = '^'
inner = inner[1:]
char_class += re.escape(inner)
pat += '[%s]' % (char_class,)
# Skip to the end ]
i = inner_i
else:
pat += re.escape(char)
i += 1
# Join each chunk with the dir separator
if not last_chunk:
pat += sep
pat += r'\Z'
return re.compile(pat, flags=re.MULTILINE|re.DOTALL)
class egg_info(Command):
description = "create a distribution's .egg-info directory"
user_options = [
('egg-base=', 'e', "directory containing .egg-info directories"
" (default: top of the source tree)"),
('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
('tag-build=', 'b', "Specify explicit tag to add to version number"),
('no-date', 'D', "Don't include date stamp [default]"),
]
boolean_options = ['tag-date']
negative_opt = {
'no-date': 'tag-date',
}
def initialize_options(self):
self.egg_name = None
self.egg_version = None
self.egg_base = None
self.egg_info = None
self.tag_build = None
self.tag_date = 0
self.broken_egg_info = False
self.vtags = None
####################################
# allow the 'tag_svn_revision' to be detected and
# set, supporting sdists built on older Setuptools.
@property
def tag_svn_revision(self):
pass
@tag_svn_revision.setter
def tag_svn_revision(self, value):
pass
####################################
def save_version_info(self, filename):
"""
Materialize the value of date into the
build tag. Install build keys in a deterministic order
to avoid arbitrary reordering on subsequent builds.
"""
# python 2.6 compatibility
odict = getattr(collections, 'OrderedDict', dict)
egg_info = odict()
# follow the order these keys would have been added
# when PYTHONHASHSEED=0
egg_info['tag_build'] = self.tags()
egg_info['tag_date'] = 0
edit_config(filename, dict(egg_info=egg_info))
def finalize_options(self):
self.egg_name = safe_name(self.distribution.get_name())
self.vtags = self.tags()
self.egg_version = self.tagged_version()
parsed_version = parse_version(self.egg_version)
try:
is_version = isinstance(parsed_version, packaging.version.Version)
spec = (
"%s==%s" if is_version else "%s===%s"
)
list(
parse_requirements(spec % (self.egg_name, self.egg_version))
)
except ValueError:
raise distutils.errors.DistutilsOptionError(
"Invalid distribution name or version syntax: %s-%s" %
(self.egg_name, self.egg_version)
)
if self.egg_base is None:
dirs = self.distribution.package_dir
self.egg_base = (dirs or {}).get('', os.curdir)
self.ensure_dirname('egg_base')
self.egg_info = to_filename(self.egg_name) + '.egg-info'
if self.egg_base != os.curdir:
self.egg_info = os.path.join(self.egg_base, self.egg_info)
if '-' in self.egg_name:
self.check_broken_egg_info()
# Set package version for the benefit of dumber commands
# (e.g. sdist, bdist_wininst, etc.)
#
self.distribution.metadata.version = self.egg_version
# If we bootstrapped around the lack of a PKG-INFO, as might be the
# case in a fresh checkout, make sure that any special tags get added
# to the version info
#
pd = self.distribution._patched_dist
if pd is not None and pd.key == self.egg_name.lower():
pd._version = self.egg_version
pd._parsed_version = parse_version(self.egg_version)
self.distribution._patched_dist = None
def write_or_delete_file(self, what, filename, data, force=False):
"""Write `data` to `filename` or delete if empty
If `data` is non-empty, this routine is the same as ``write_file()``.
If `data` is empty but not ``None``, this is the same as calling
``delete_file(filename)`. If `data` is ``None``, then this is a no-op
unless `filename` exists, in which case a warning is issued about the
orphaned file (if `force` is false), or deleted (if `force` is true).
"""
if data:
self.write_file(what, filename, data)
elif os.path.exists(filename):
if data is None and not force:
log.warn(
"%s not set in setup(), but %s exists", what, filename
)
return
else:
self.delete_file(filename)
def write_file(self, what, filename, data):
"""Write `data` to `filename` (if not a dry run) after announcing it
`what` is used in a log message to identify what is being written
to the file.
"""
log.info("writing %s to %s", what, filename)
if six.PY3:
data = data.encode("utf-8")
if not self.dry_run:
f = open(filename, 'wb')
f.write(data)
f.close()
def delete_file(self, filename):
"""Delete `filename` (if not a dry run) after announcing it"""
log.info("deleting %s", filename)
if not self.dry_run:
os.unlink(filename)
def tagged_version(self):
version = self.distribution.get_version()
# egg_info may be called more than once for a distribution,
# in which case the version string already contains all tags.
if self.vtags and version.endswith(self.vtags):
return safe_version(version)
return safe_version(version + self.vtags)
def run(self):
self.mkpath(self.egg_info)
installer = self.distribution.fetch_build_egg
for ep in iter_entry_points('egg_info.writers'):
ep.require(installer=installer)
writer = ep.resolve()
writer(self, ep.name, os.path.join(self.egg_info, ep.name))
# Get rid of native_libs.txt if it was put there by older bdist_egg
nl = os.path.join(self.egg_info, "native_libs.txt")
if os.path.exists(nl):
self.delete_file(nl)
self.find_sources()
def tags(self):
version = ''
if self.tag_build:
version += self.tag_build
if self.tag_date:
version += time.strftime("-%Y%m%d")
return version
def find_sources(self):
"""Generate SOURCES.txt manifest file"""
manifest_filename = os.path.join(self.egg_info, "SOURCES.txt")
mm = manifest_maker(self.distribution)
mm.manifest = manifest_filename
mm.run()
self.filelist = mm.filelist
def check_broken_egg_info(self):
bei = self.egg_name + '.egg-info'
if self.egg_base != os.curdir:
bei = os.path.join(self.egg_base, bei)
if os.path.exists(bei):
log.warn(
"-" * 78 + '\n'
"Note: Your current .egg-info directory has a '-' in its name;"
'\nthis will not work correctly with "setup.py develop".\n\n'
'Please rename %s to %s to correct this problem.\n' + '-' * 78,
bei, self.egg_info
)
self.broken_egg_info = self.egg_info
self.egg_info = bei # make it work for now
class FileList(_FileList):
# Implementations of the various MANIFEST.in commands
def process_template_line(self, line):
# Parse the line: split it up, make sure the right number of words
# is there, and return the relevant words. 'action' is always
# defined: it's the first word of the line. Which of the other
# three are defined depends on the action; it'll be either
# patterns, (dir and patterns), or (dir_pattern).
(action, patterns, dir, dir_pattern) = self._parse_template_line(line)
# OK, now we know that the action is valid and we have the
# right number of words on the line for that action -- so we
# can proceed with minimal error-checking.
if action == 'include':
self.debug_print("include " + ' '.join(patterns))
for pattern in patterns:
if not self.include(pattern):
log.warn("warning: no files found matching '%s'", pattern)
elif action == 'exclude':
self.debug_print("exclude " + ' '.join(patterns))
for pattern in patterns:
if not self.exclude(pattern):
log.warn(("warning: no previously-included files "
"found matching '%s'"), pattern)
elif action == 'global-include':
self.debug_print("global-include " + ' '.join(patterns))
for pattern in patterns:
if not self.global_include(pattern):
log.warn(("warning: no files found matching '%s' "
"anywhere in distribution"), pattern)
elif action == 'global-exclude':
self.debug_print("global-exclude " + ' '.join(patterns))
for pattern in patterns:
if not self.global_exclude(pattern):
log.warn(("warning: no previously-included files matching "
"'%s' found anywhere in distribution"),
pattern)
elif action == 'recursive-include':
self.debug_print("recursive-include %s %s" %
(dir, ' '.join(patterns)))
for pattern in patterns:
if not self.recursive_include(dir, pattern):
log.warn(("warning: no files found matching '%s' "
"under directory '%s'"),
pattern, dir)
elif action == 'recursive-exclude':
self.debug_print("recursive-exclude %s %s" %
(dir, ' '.join(patterns)))
for pattern in patterns:
if not self.recursive_exclude(dir, pattern):
log.warn(("warning: no previously-included files matching "
"'%s' found under directory '%s'"),
pattern, dir)
elif action == 'graft':
self.debug_print("graft " + dir_pattern)
if not self.graft(dir_pattern):
log.warn("warning: no directories found matching '%s'",
dir_pattern)
elif action == 'prune':
self.debug_print("prune " + dir_pattern)
if not self.prune(dir_pattern):
log.warn(("no previously-included directories found "
"matching '%s'"), dir_pattern)
else:
raise DistutilsInternalError(
"this cannot happen: invalid action '%s'" % action)
def _remove_files(self, predicate):
"""
Remove all files from the file list that match the predicate.
Return True if any matching files were removed
"""
found = False
for i in range(len(self.files) - 1, -1, -1):
if predicate(self.files[i]):
self.debug_print(" removing " + self.files[i])
del self.files[i]
found = True
return found
def include(self, pattern):
"""Include files that match 'pattern'."""
found = [f for f in glob(pattern) if not os.path.isdir(f)]
self.extend(found)
return bool(found)
def exclude(self, pattern):
"""Exclude files that match 'pattern'."""
match = translate_pattern(pattern)
return self._remove_files(match.match)
def recursive_include(self, dir, pattern):
"""
Include all files anywhere in 'dir/' that match the pattern.
"""
full_pattern = os.path.join(dir, '**', pattern)
found = [f for f in glob(full_pattern, recursive=True)
if not os.path.isdir(f)]
self.extend(found)
return bool(found)
def recursive_exclude(self, dir, pattern):
"""
Exclude any file anywhere in 'dir/' that match the pattern.
"""
match = translate_pattern(os.path.join(dir, '**', pattern))
return self._remove_files(match.match)
def graft(self, dir):
"""Include all files from 'dir/'."""
found = [
item
for match_dir in glob(dir)
for item in distutils.filelist.findall(match_dir)
]
self.extend(found)
return bool(found)
def prune(self, dir):
"""Filter out files from 'dir/'."""
match = translate_pattern(os.path.join(dir, '**'))
return self._remove_files(match.match)
def global_include(self, pattern):
"""
Include all files anywhere in the current directory that match the
pattern. This is very inefficient on large file trees.
"""
if self.allfiles is None:
self.findall()
match = translate_pattern(os.path.join('**', pattern))
found = [f for f in self.allfiles if match.match(f)]
self.extend(found)
return bool(found)
def global_exclude(self, pattern):
"""
Exclude all files anywhere that match the pattern.
"""
match = translate_pattern(os.path.join('**', pattern))
return self._remove_files(match.match)
def append(self, item):
if item.endswith('\r'): # Fix older sdists built on Windows
item = item[:-1]
path = convert_path(item)
if self._safe_path(path):
self.files.append(path)
def extend(self, paths):
self.files.extend(filter(self._safe_path, paths))
def _repair(self):
"""
Replace self.files with only safe paths
Because some owners of FileList manipulate the underlying
``files`` attribute directly, this method must be called to
repair those paths.
"""
self.files = list(filter(self._safe_path, self.files))
def _safe_path(self, path):
enc_warn = "'%s' not %s encodable -- skipping"
# To avoid accidental trans-codings errors, first to unicode
u_path = unicode_utils.filesys_decode(path)
if u_path is None:
log.warn("'%s' in unexpected encoding -- skipping" % path)
return False
# Must ensure utf-8 encodability
utf8_path = unicode_utils.try_encode(u_path, "utf-8")
if utf8_path is None:
log.warn(enc_warn, path, 'utf-8')
return False
try:
# accept is either way checks out
if os.path.exists(u_path) or os.path.exists(utf8_path):
return True
# this will catch any encode errors decoding u_path
except UnicodeEncodeError:
log.warn(enc_warn, path, sys.getfilesystemencoding())
class manifest_maker(sdist):
template = "MANIFEST.in"
def initialize_options(self):
self.use_defaults = 1
self.prune = 1
self.manifest_only = 1
self.force_manifest = 1
def finalize_options(self):
pass
def run(self):
self.filelist = FileList()
if not os.path.exists(self.manifest):
self.write_manifest() # it must exist so it'll get in the list
self.add_defaults()
if os.path.exists(self.template):
self.read_template()
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
def _manifest_normalize(self, path):
path = unicode_utils.filesys_decode(path)
return path.replace(os.sep, '/')
def write_manifest(self):
"""
Write the file list in 'self.filelist' to the manifest file
named by 'self.manifest'.
"""
self.filelist._repair()
# Now _repairs should encodability, but not unicode
files = [self._manifest_normalize(f) for f in self.filelist.files]
msg = "writing manifest file '%s'" % self.manifest
self.execute(write_file, (self.manifest, files), msg)
def warn(self, msg):
if not self._should_suppress_warning(msg):
sdist.warn(self, msg)
@staticmethod
def _should_suppress_warning(msg):
"""
suppress missing-file warnings from sdist
"""
return re.match(r"standard file .*not found", msg)
def add_defaults(self):
sdist.add_defaults(self)
self.filelist.append(self.template)
self.filelist.append(self.manifest)
rcfiles = list(walk_revctrl())
if rcfiles:
self.filelist.extend(rcfiles)
elif os.path.exists(self.manifest):
self.read_manifest()
ei_cmd = self.get_finalized_command('egg_info')
self.filelist.graft(ei_cmd.egg_info)
def prune_file_list(self):
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.prune(build.build_base)
self.filelist.prune(base_dir)
sep = re.escape(os.sep)
self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep,
is_regex=1)
def write_file(filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
contents = "\n".join(contents)
# assuming the contents has been vetted for utf-8 encoding
contents = contents.encode("utf-8")
with open(filename, "wb") as f: # always write POSIX-style manifest
f.write(contents)
def write_pkg_info(cmd, basename, filename):
log.info("writing %s", filename)
if not cmd.dry_run:
metadata = cmd.distribution.metadata
metadata.version, oldver = cmd.egg_version, metadata.version
metadata.name, oldname = cmd.egg_name, metadata.name
metadata.long_description_content_type = getattr(
cmd.distribution,
'long_description_content_type'
)
try:
# write unescaped data to PKG-INFO, so older pkg_resources
# can still parse it
metadata.write_pkg_info(cmd.egg_info)
finally:
metadata.name, metadata.version = oldname, oldver
safe = getattr(cmd.distribution, 'zip_safe', None)
bdist_egg.write_safety_flag(cmd.egg_info, safe)
def warn_depends_obsolete(cmd, basename, filename):
if os.path.exists(filename):
log.warn(
"WARNING: 'depends.txt' is not used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
def _write_requirements(stream, reqs):
lines = yield_lines(reqs or ())
append_cr = lambda line: line + '\n'
lines = map(append_cr, lines)
stream.writelines(lines)
def write_requirements(cmd, basename, filename):
dist = cmd.distribution
data = six.StringIO()
_write_requirements(data, dist.install_requires)
extras_require = dist.extras_require or {}
for extra in sorted(extras_require):
data.write('\n[{extra}]\n'.format(**vars()))
_write_requirements(data, extras_require[extra])
cmd.write_or_delete_file("requirements", filename, data.getvalue())
def write_setup_requirements(cmd, basename, filename):
data = StringIO()
_write_requirements(data, cmd.distribution.setup_requires)
cmd.write_or_delete_file("setup-requirements", filename, data.getvalue())
def write_toplevel_names(cmd, basename, filename):
pkgs = dict.fromkeys(
[
k.split('.', 1)[0]
for k in cmd.distribution.iter_distribution_names()
]
)
cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n')
def overwrite_arg(cmd, basename, filename):
write_arg(cmd, basename, filename, True)
def write_arg(cmd, basename, filename, force=False):
argname = os.path.splitext(basename)[0]
value = getattr(cmd.distribution, argname, None)
if value is not None:
value = '\n'.join(value) + '\n'
cmd.write_or_delete_file(argname, filename, value, force)
def write_entries(cmd, basename, filename):
ep = cmd.distribution.entry_points
if isinstance(ep, six.string_types) or ep is None:
data = ep
elif ep is not None:
data = []
for section, contents in sorted(ep.items()):
if not isinstance(contents, six.string_types):
contents = EntryPoint.parse_group(section, contents)
contents = '\n'.join(sorted(map(str, contents.values())))
data.append('[%s]\n%s\n\n' % (section, contents))
data = ''.join(data)
cmd.write_or_delete_file('entry points', filename, data, True)
def get_pkg_info_revision():
"""
Get a -r### off of PKG-INFO Version in case this is an sdist of
a subversion revision.
"""
warnings.warn("get_pkg_info_revision is deprecated.", DeprecationWarning)
if os.path.exists('PKG-INFO'):
with io.open('PKG-INFO') as f:
for line in f:
match = re.match(r"Version:.*-r(\d+)\s*$", line)
if match:
return int(match.group(1))
return 0
|
apache-2.0
| -3,993,512,812,707,618,000 | -5,681,251,659,197,776,000 | 34.686163 | 79 | 0.569675 | false |
kcyu1993/ML_course_kyu
|
projects/project1/scripts/model.py
|
1
|
19450
|
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
import copy
from data_utils import build_k_indices
from learning_model import *
from regularizer import *
from helpers import save_numpy_array
import numpy as np
class Model(object):
"""
Author: Kaicheng Yu
Machine learning model engine
Implement the optimizers
sgd
normal equations
cross-validation of given parameters
Abstract method:
__call__ produce the raw prediction, use the latest weight obtained by training
predict produce prediction values, could take weight as input
get_gradient define gradient here, including the gradient for regularizer
normalequ define normal equations
Support:
L1, L2 normalization
Due to the distribution of work, only LogisticRegression is fully tested for
fitting data, and cross-validation.
LinearRegression model should also work but not fully tested.
The goal of this class is not only specific to this learning project, but also for reusable and scalable
to other problems, models.
"""
def __init__(self, train_data, validation=None, initial_weight=None,
loss_function_name='mse', cal_weight='gradient',
regularizer=None, regularizer_p=None):
"""
Initializer of all learning models.
:param train_data: training data.
:param validation_data:
"""
self.train_x = train_data[1]
self.train_y = train_data[0]
self.set_valid(validation)
''' Define the progress of history here '''
self.losses = []
self.iterations = 0
self.weights = []
self.misclass_rate = []
''' Define loss, weight calculation, regularizer '''
self.loss_function = get_loss_function(loss_function_name)
self.loss_function_name = loss_function_name
self.calculate_weight = cal_weight
self.regularizer = Regularizer.get_regularizer(regularizer, regularizer_p)
self.regularizer_p = regularizer_p
# Asserting degree
if len(self.train_x.shape) > 1:
degree = self.train_x.shape[1]
else:
degree = 1
# Initialize the weight for linear model.
if initial_weight is not None:
self.weights.append(initial_weight)
else:
self.weights.append(np.random.rand(degree))
def set_valid(self, validation):
# Set validation here.
self.validation = False
self.valid_x = None
self.valid_y = None
self.valid_losses = None
self.valid_misclass_rate = None
if validation is not None:
(valid_y, valid_x) = validation
self.valid_x = valid_x
self.valid_y = valid_y
self.validation = True
self.valid_losses = []
self.valid_misclass_rate = []
@abstractmethod
def __call__(self, **kwargs):
"""Define the fit function and get prediction"""
raise NotImplementedError
@abstractmethod
def get_gradient(self, y, x, weight):
raise NotImplementedError
@abstractmethod
def predict(self, x, weight):
raise NotImplementedError
@abstractmethod
def normalequ(self, **kwargs):
''' define normal equation method to calculate optimal weights'''
raise NotImplementedError
def compute_weight(self, y, x, test_x=None, test_y=None, **kwargs):
""" Return weight under given parameter """
model = copy.copy(self)
model.__setattr__('train_y', y)
model.__setattr__('train_x', x)
if test_x is not None and test_y is not None:
model.set_valid((test_y, test_x))
_kwargs = []
for name, value in kwargs.items():
# Recognize parameter "
if name is "regularizer_p":
model.__setattr__(name, value)
model.regularizer.set_parameter(value)
else:
_kwargs.append((name, value))
_kwargs = dict(_kwargs)
if model.calculate_weight is 'gradient':
return model.sgd(**_kwargs)
# elif model.calculate_weight is 'newton':
# return model.newton(**_kwargs)
elif model.calculate_weight is 'normalequ':
return model.normalequ(**_kwargs)
def get_history(self):
"""
Get the training history of current model
:return: list as [iterations, [losses], [weights], [mis_class]]
"""
if self.validation:
return self.iterations, (self.losses, self.valid_losses), \
(self.weights), (self.misclass_rate, self.valid_misclass_rate)
return self.iterations, self.losses, self.weights, self.misclass_rate
def train(self, optimizer='sgd', loss_function='mse', **kwargs):
"""
Train function to perform one time training
Will based optimizer to select.
TODO: Would add 'newton' in the future
This
:param optimizer: only support 'sgd'
:param loss_function: loss_function name {mse, mae, logistic}
:param kwargs: passed into sgd
:return: best weight
"""
self.loss_function = get_loss_function(loss_function)
self.loss_function_name = loss_function
if optimizer is 'sgd':
self.sgd(**kwargs)
return self.weights[-1]
"""===================================="""
""" Beginning of the optimize Routines """
"""===================================="""
def sgd(self, lr=0.01, decay=0.5, max_iters=1000,
batch_size=128, early_stop=150, decay_intval=50, decay_lim=9):
"""
Define the SGD algorithm here
Implementing weight decay, early stop.
:param lr: learning rate
:param decay: weight decay after fix iterations
:param max_iters: maximum iterations
:param batch_size: batch_size
:param early_stop: early_stop after no improvement
:return: final weight vector
"""
np.set_printoptions(precision=4)
w = self.weights[0]
loss = self.compute_loss(self.train_y, self.train_x, w)
best_loss = loss
best_counter = 0
decay_counter = 0
# print("initial loss is {} ".format(loss))
for epoch in range(max_iters):
for batch_y, batch_x in batch_iter(self.train_y, self.train_x, batch_size):
grad = self.get_gradient(batch_y, batch_x, w)
w = w - lr * grad
loss = self.compute_loss(self.train_y, self.train_x, w)
mis_class = self.compute_metrics(self.train_y, self.train_x, w)
self.weights.append(w)
self.losses.append(loss)
self.misclass_rate.append(mis_class)
if self.validation is True:
valid_loss = self.compute_loss(self.valid_y, self.valid_x, w)
valid_mis_class = self.compute_metrics(self.valid_y, self.valid_x, w)
self.valid_losses.append(valid_loss)
self.valid_misclass_rate.append(valid_mis_class)
# Display every 25 epoch
if (epoch + 1) % 25 == 0:
print('Epoch {e} in {m}'.format(e=epoch + 1, m=max_iters), end="\t")
if self.validation is True:
# print('\tTrain Loss {0:0.4f}, \tTrain mis-class {0:0.4f}, '
# '\tvalid loss {0:0.4f}, \tvalid mis-class {0:0.4f}'.
# format(loss, mis_class, valid_loss, valid_mis_class))
print('\tTrain Loss {}, \tTrain mis-class {}, '
'\tvalid loss {}, \tvalid mis-class {}'.
format(loss, mis_class, valid_loss, valid_mis_class))
else:
print('\tTrain Loss {}, \tTrain mis-class {}'.
format(loss, mis_class))
# judge the performance
if best_loss - loss > 0.000001:
best_loss = loss
best_counter = 0
else:
best_counter += 1
if best_counter > early_stop:
print("Learning early stop since loss not improving for {} epoch.".format(best_counter))
break
if best_counter % decay_intval == 0:
print("weight decay by {}".format(decay))
lr *= decay
decay_counter += 1
if decay_counter > decay_lim:
print("decay {} times, stop".format(decay_lim))
break
return self.weights[-1]
def newton(self, lr=0.01, max_iters=100):
# TODO: implement newton method later
raise NotImplementedError
def cross_validation(self, cv, lambdas, lambda_name, seed=1, skip=False, plot=False, **kwargs):
"""
Cross validation method to acquire the best prediction parameters.
It will use the train_x y as data and do K-fold cross validation.
:param cv: cross validation times
:param lambdas: array of lambdas to be validated
:param lambda_name: the lambda name tag
:param seed: random seed
:param skip: skip the cross validation, only valid 1 time
:param plot plot cross-validation plot, if machine does not
support matplotlib.pyplot, set to false.
:param kwargs: other parameters could pass into compute_weight
:return: best weights, best_lambda, (training error, valid error)
"""
np.set_printoptions(precision=4)
k_indices = build_k_indices(self.train_y, cv, seed)
# define lists to store the loss of training data and test data
err_tr = []
err_te = []
weights = []
print("K-fold ({}) cross validation to examine [{}]".
format(cv, lambdas))
for lamb in lambdas:
print("For lambda: {}".format(lamb))
_mse_tr = []
_mse_te = []
_weight = []
for k in range(cv):
print('Cross valid iteration {}'.format(k))
weight, loss_tr, loss_te = self._loop_cross_validation(self.train_y, self.train_x,
k_indices, k,
lamb, lambda_name, **kwargs)
_mse_tr += [loss_tr]
_mse_te += [loss_te]
_weight.append(weight)
if skip:
break
avg_tr = np.average(_mse_tr)
avg_te = np.average(_mse_te)
err_tr += [avg_tr]
err_te += [avg_te]
weights.append(_weight)
print("\t train error {}, \t valid error {}".
format(avg_tr, avg_te))
# Select the best parameter during the cross validations.
print('K-fold cross validation result: \n {} \n {}'.
format(err_tr, err_te))
# Select the best based on least err_te
min_err_te = np.argmin(err_te)
print('Best err_te result {}, lambda {}'.
format(err_te[min_err_te], lambdas[min_err_te]))
if plot:
from plots import cross_validation_visualization
cross_validation_visualization(lambdas, err_tr, err_te, title=lambda_name,
error_name=self.loss_function_name)
else:
save_numpy_array(lambdas, err_tr, err_te, names=['lambda', 'err_tr', 'err_te'], title=self.regularizer.name)
return weights[min_err_te], lambdas[min_err_te], (err_tr, err_te)
def _loop_cross_validation(self, y, x, k_indices, k, lamb, lambda_name, **kwargs):
"""
Single loop of cross validation
:param y: train labels
:param x: train data
:param k_indices: indices array
:param k: number of cross validations
:param lamb: lambda to use
:param lambda_name: lambda_name to pass into compute weight
:return: weight, mis_tr, mis_te
"""
train_ind = np.concatenate((k_indices[:k], k_indices[k + 1:]), axis=0)
train_ind = np.reshape(train_ind, (train_ind.size,))
test_ind = k_indices[k]
# Note: different from np.ndarray, tuple is name[index,]
# ndarray is name[index,:]
train_x = x[train_ind,]
train_y = y[train_ind,]
test_x = x[test_ind,]
test_y = y[test_ind,]
# Insert one more kwargs item
kwargs[lambda_name] = lamb
weight = self.compute_weight(train_y, train_x, test_x, test_y, **kwargs)
# Compute the metrics and return
loss_tr = self.compute_metrics(train_y, train_x, weight)
loss_te = self.compute_metrics(test_y, test_x, weight)
return weight, loss_tr, loss_te
def compute_metrics(self, target, data, weight):
"""
Compute the following metrics
Misclassification rate
"""
pred = self.predict(data, weight)
assert len(pred) == len(target)
# Calculate the mis-classification rate:
N = len(pred)
pred = np.reshape(pred, (N,))
target = np.reshape(target, (N,))
nb_misclass = np.count_nonzero(target - pred)
return nb_misclass / N
def compute_loss(self, y, x, weight):
return self.loss_function(y, x, weight)
class LogisticRegression(Model):
""" Logistic regression """
def __init__(self, train, validation=None, initial_weight=None,
loss_function_name='logistic',
calculate_weight='gradient',
regularizer=None, regularizer_p=None):
"""
Constructor of Logistic Regression model
:param train: tuple (y, x)
:param validation: tuple (y, x)
:param initial_weight: weight vector, dim align x
:param loss_function: f(x, y, weight)
:param regularizer: "Ridge" || "Lasso"
:param regularizer_p: parameter
"""
# Initialize the super class with given data.
# Transform the y into {0,1}
y, tx = train
y[np.where(y < 0)] = 0
train = (y, tx)
if validation:
val_y, val_tx = validation
val_y[np.where(val_y < 0)] = 0
validation = (val_y, val_tx)
super(LogisticRegression, self).__init__(train, validation,
initial_weight=initial_weight,
loss_function_name=loss_function_name,
cal_weight=calculate_weight,
regularizer=regularizer,
regularizer_p=regularizer_p)
# Set predicted label
self.pred_label = [-1, 1]
def __call__(self, x, weight=None):
"""
Define the fit function and get prediction,
generate probability of occurrence
"""
if weight is None:
weight = self.weights[-1]
return sigmoid(np.dot(x, weight))
def get_gradient(self, y, x, weight):
""" calculate gradient given data and weight """
y = np.reshape(y, (len(y),))
return np.dot(x.T, sigmoid(np.dot(x, weight)) - y) \
+ self.regularizer.get_gradient(weight)
def get_hessian(self, y, x, weight):
# TODO: implement hessian for newton method
raise NotImplementedError
def predict(self, x, weight=None, cutting=0.5):
""" Prediction of event {0,1} """
if weight is None: weight = self.weights[-1]
pred = sigmoid(np.dot(x, weight))
pred[np.where(pred <= cutting)] = 0
pred[np.where(pred > cutting)] = 1
return pred
def predict_label(self, x, weight=None, cutting=0.5, predict_label=None):
""" Prediction result with labels """
if predict_label is None:
predict_label = self.pred_label
if weight is None: weight = self.weights[-1]
pred = self.predict(x, weight, cutting)
pred[np.where(pred == 0)] = predict_label[0]
pred[np.where(pred == 1)] = predict_label[1]
return pred
def train(self, loss_function='logistic',
lr=0.1, decay=0.5, max_iters=3000, batch_size=128, **kwargs):
""" Make the default loss logistic, set default parameters """
return super(LogisticRegression, self).train('sgd', loss_function,
lr=lr,
decay=decay, max_iters=max_iters,
batch_size=batch_size, **kwargs)
def normalequ(self, **kwargs):
""" Should never call """
raise NotImplementedError
class LinearRegression(Model):
""" Linear regression model
This is not fully tested, especially the cross-validation, please refers
to the implemenations.py for linear model.
"""
def __init__(self, train, validation=None, initial_weight=None,
regularizer=None, regularizer_p=None,
loss_function_name='mse', calculate_weight='normalequ'):
# Initialize the super class with given data.
super(LinearRegression, self).__init__(train, validation,
initial_weight=initial_weight,
loss_function_name=loss_function_name,
cal_weight=calculate_weight,
regularizer=regularizer,
regularizer_p=regularizer_p)
def __call__(self, x):
""" calulate prediction based on latest result """
return np.dot(x, self.weights[-1])
def get_gradient(self, batch_y, batch_x, weight):
""" return gradient of linear model, including the regularizer """
N = batch_y.shape[0]
grad = np.empty(len(weight))
for index in range(N):
_y = batch_y[index]
_x = batch_x[index]
grad = grad + gradient_least_square(_y, _x, weight, self.loss_function_name)
grad /= N
grad += self.regularizer.get_gradient(weight)
return grad
def predict(self, x, weight):
""" Prediction function, predicting final result """
pred = np.dot(x, weight)
pred[np.where(pred <= 0)] = -1
pred[np.where(pred > 0)] = 1
return pred
def normalequ(self):
""" Normal equation to get parameters """
tx = self.train_x
y = self.train_y
if self.regularizer is None:
return np.linalg.solve(np.dot(tx.T, tx), np.dot(tx.T, y))
elif self.regularizer.name is 'Ridge':
G = np.eye(tx.shape[1])
G[0, 0] = 0
hes = np.dot(tx.T, tx) + self.regularizer_p * G
return np.linalg.solve(hes, np.dot(tx.T, y))
else:
raise NotImplementedError
|
mit
| 7,143,484,833,543,779,000 | -673,181,947,188,073,700 | 39.352697 | 120 | 0.54437 | false |
msimet/Stile
|
devel/make_simple_catalog.py
|
1
|
1417
|
import galsim
import numpy
use_noise = True
extent_degrees = 1. # Create galaxies within a box of this side length
n_galaxies_per_sq_arcmin = 20
z_min = 0.1
z_max = 2.0
z_powerlaw_slope = 2.0
z_lens = 0.2
def make_safe_shear(g):
if g[0]>1:
g[0] = 1
if g[1]>1:
g[1] = 1
mag = numpy.sqrt(g[0]**2+g[1]**2)
if mag>0.99999:
g /= (mag+0.00001)
return g
def main():
z_offs = z_min**(z_powerlaw_slope+1)
n_total_galaxies = int(extent_degrees**2*3600*n_galaxies_per_sq_arcmin)
halo = galsim.NFWHalo(mass=1.E14, conc=4., redshift=z_lens)
for i in range(n_total_galaxies):
ra,dec = extent_degrees*numpy.random.rand(2)-0.5*extent_degrees
z = ((z_powerlaw_slope+1)*numpy.random.random()+z_offs)**(1./(z_powerlaw_slope+1))
if use_noise:
g_int = make_safe_shear(numpy.random.normal(scale=0.35,size=2))
g_int = galsim.Shear(g1=g_int[0], g2=g_int[1])
else:
g_int = galsim.Shear(g1=0,g2=0)
if z>z_lens:
g_induced = halo.getShear(galsim.PositionD(3600*ra,3600*dec),z)
# g_induced = (min(g_induced[0],1),min(g_induced[0],1))
g_induced = galsim.Shear(g1=g_induced[0],g2=g_induced[1])
g_total = g_induced+g_int
else:
g_total = g_int
print i, ra, dec, z, g_total.getG1(), g_total.getG2()
if __name__=='__main__':
main()
|
bsd-3-clause
| 7,177,032,448,549,311,000 | -7,852,747,105,026,449,000 | 30.488889 | 90 | 0.568102 | false |
kdart/pycopia
|
core/pycopia/inet/rfc2822.py
|
1
|
15042
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Objects for constructing, parsing, and editing rfc 2822 compliant messages
(plus extensions).
"""
from __future__ import print_function
import re
from pycopia.inet.ABNF import *
from pycopia.fsm import FSM, ANY
from pycopia.aid import IF, Enums
SPECIALS = '()<>@,:;."[]'
LWS = WSP
EXTRA = "!#$%&'*+-/=?^_`{|}~"
ATEXT = ALPHA+DIGIT+EXTRA
HEADBREAK = CRLF+CRLF
FOLDED = re.compile(r'%s([%s]+)' % (CRLF, WSP))
class RFC2822Error(Exception):
pass
class RFC2822SyntaxError(RFC2822Error):
pass
class _RFC2822FSM(FSM):
def reset(self):
self._reset()
self.arg = ''
self.cl_name = None
self.cl_params = {}
self.cl_value = None
def unfold(s):
"""Unfold a folded string, keeping line breaks and other white space."""
return FOLDED.sub(r"\1", s)
def headerlines(bigstring):
"""Yield unfolded lines from a chunk of text."""
bigstring = unfold(bigstring)
for line in bigstring.split(CRLF):
yield line
def get_headers(fo):
s = []
b = 4
while 1:
data = fo.read(80)
if not data:
break
i = data.find(HEADBREAK)
if i == -1:
# catch HEADBREAK split across chunks
if data.startswith("\r"):
if s[-1].endswith("\r\n"):
b = 2
break
elif data.startswith("\n"):
if s[-1].endswith("\r\n\r"):
b = 1
break
else:
s.append(data)
continue
else:
s.append(data[:i+4])
break
rv = []
for line in headerlines("".join(s)):
if line:
rv.append(getHeader(line))
return rv, data[i+b:]
def getHeader(line):
[name, val] = line.split(":", 1)
return Header(name.strip(), val.lstrip())
def get_headers_dict(fo):
headers, left = get_headers(fo)
rv = Headers()
for h in headers:
rv[h.name] = h.value
return rv, left
class Header(object):
"""base class for header objects."""
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
return "%s: %s" % (self.name, self.value)
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__, self.name, self.value)
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return self.name.upper() == other.name.upper()
def __ne__(self, other):
return self.name.upper() != other.name.upper()
def __lt__(self, other):
return self.name.upper() < other.name.upper()
def __gt__(self, other):
return self.name.upper() > other.name.upper()
def __le__(self, other):
return self.name.upper() <= other.name.upper()
def __ge__(self, other):
return self.name.upper() >= other.name.upper()
# concrete header fields. These encapsulate any special rules for methods for
# its kind. The names of these are significant... the actual heading value is
# taken from it with some translation applied.
class Return_Path(Header):
def __str__(self):
return "%s: %s" % (self.NAME, IF(self.data, self.value, "<>"))
class Date(Header):
def __init__(self, timevalue=None):
Header.__init__(self)
self.value = timevalue
def __str__(self):
return "%s: %s" % (self.name, formatdate(self.value) )
class From(Header):
pass
class Sender(Header):
pass
class Reply_To(Header):
pass
class To(Header):
pass
class Cc(Header):
pass
class Bcc(Header):
pass
class Message_ID(Header):
pass
class In_Reply_To(Header):
pass
class References(Header):
pass
class Subject(Header):
pass
class Comments(Header):
pass
class Keywords(Header):
pass
class Resent_Date(Header):
pass
class Resent_From(Header):
pass
class Resent_Sender(Header):
pass
class Resent_To(Header):
pass
class Resent_Cc(Header):
pass
class Resent_Bcc(Header):
pass
class Resent_Message_ID(Header):
pass
class Return_Path(Header):
pass
class Recieved(Header):
pass
##### message parts #####
class Headers(dict):
"""A Collection of headers. No duplicates allowed here."""
def __setitem__(self, name, ho):
dict.__setitem__(self, name.lower(), ho)
def __delitem__(self, name):
dict.__delitem__(self, name.lower())
def __getitem__(self, name):
try:
return dict.__getitem__(self, name.lower())
except KeyError:
return None
def get(self, key, default=None):
return dict.get(self, key.lower(), default)
def emit(self, fo):
for h in self.values():
fo.write(str(h))
class Body(object):
def __init__(self, text=""):
self.text = str(text)
def __str__(self):
return self.text
def emit(self, fo):
fo.write(self.text)
class Message(object):
"""Represents an email message."""
def __init__(self, header, body=None):
self.header = header
self.body = body or Body()
def __str__(self):
return str(self.header)+"\n\n"+str(self.body)
def emit(self, fo):
self.header.emit(fo)
fo.write("\n\n")
self.body.emit(fo)
class QuotedString(object):
"""QuotedString(data)
Represents an quoted string. Automatically encodes the value.
"""
def __init__(self, val):
self.data = val
def __str__(self):
return quote(str(self.data))
def __repr__(self):
return "%s(%r)" % (self.__class__, self.data)
def parse(self, data):
self.data = unquote(data)
class Comment(object):
"""A header comment. """
def __init__(self, item):
self.data = item
def __str__(self):
return "( %s )" % (self.item)
class Address(object):
def __init__(self, address, name=None):
self.address = address
self.name = name
def __str__(self):
if self.name:
return '"%s" <%s>' % (self.name, self.address)
else:
return str(self.address)
def __len__(self):
return len(str(self))
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__, self.address, self.name)
def __eq__(self, other):
try:
return self.address == other.address and self.name == other.name
except AttributeError:
return str(self) == str(other) # other might just be a string
def __ne__(self, other):
try:
return self.address != other.address or self.name != other.name
except AttributeError:
return str(self) != str(other) # other might just be a string
class AddressList(list):
def append(self, address, name=None):
super(AddressList, self).append(Address(address, name))
add = append
def insert(self, i, address, name=None):
super(AddressList, self).insert(i, Address(address, name))
def __str__(self):
return ", ".join(map(str, self))
def formatdate(timeval=None):
"""Returns time format preferred for Internet standards.
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
According to RFC 1123, day and month names must always be in
English. If not for that, this code could use strftime(). It
can't because strftime() honors the locale and could generated
non-English names.
"""
from pycopia import timelib
if timeval is None:
timeval = timelib.time()
timeval = timelib.gmtime(timeval)
return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (
["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][timeval[6]],
timeval[2],
["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"][timeval[1]-1],
timeval[0], timeval[3], timeval[4], timeval[5])
class RFC2822Parser(object):
def __init__(self):
self._contenthandler = None
self._errorhandler = None
self._initfsm()
def setContentHandler(self, handler):
assert isinstance(handler, _HandlerBase), "must be handler object."
self._contenthandler = handler
def getContentHandler(self):
return self._contenthandler
def setErrorHandler(self, handler):
self._errorhandler = handler
def getErrorHandler(self):
return self._errorhandler
def parse(self, url):
import urllib2
fo = urllib2.urlopen(url)
try:
self.parseFile(fo)
finally:
fo.close()
# parser unfolds folded strings
def parseFile(self, fo):
self._contenthandler.startDocument()
lastline = ''
savedlines = []
while 1:
line = fo.readline()
if not line:
if lastline:
line = "".join(savedlines)+lastline
self._process_line(line)
break
if not lastline:
lastline = line
continue
if line[0] in WSP:
savedlines.append(lastline.rstrip())
lastline = line[1:]
continue
if savedlines:
newline = "".join(savedlines)+lastline
savedlines = []
self._process_line(newline)
lastline = line
else:
self._process_line(lastline)
lastline = line
self._contenthandler.endDocument()
def _process_line(self, line):
self._fsm.process_string(line)
self._fsm.reset()
# XXX
def _initfsm(self):
# state names:
[NAME, SLASH, QUOTE, SLASHQUOTE, PARAM, PARAMNAME, VALUE,
ENDLINE, PARAMVAL, PARAMQUOTE, VSLASH,
] = Enums(
"NAME", "SLASH", "QUOTE", "SLASHQUOTE", "PARAM", "PARAMNAME", "VALUE",
"ENDLINE", "PARAMVAL", "PARAMQUOTE", "VSLASH",
)
f = _RFC2822FSM(NAME)
f.add_default_transition(self._error, NAME)
#
f.add_transition_list(IANA_TOKEN, NAME, self._addtext, NAME)
f.add_transition(":", NAME, self._endname, VALUE)
f.add_transition(";", NAME, self._endname, PARAMNAME)
f.add_transition_list(VALUE_CHAR, VALUE, self._addtext, VALUE)
f.add_transition(CR, VALUE, None, ENDLINE)
f.add_transition(LF, ENDLINE, self._doline, NAME)
# parameters
f.add_transition_list(IANA_TOKEN, PARAMNAME, self._addtext, PARAMNAME)
f.add_transition("=", PARAMNAME, self._endparamname, PARAMVAL)
f.add_transition_list(SAFE_CHAR, PARAMVAL, self._addtext, PARAMVAL)
f.add_transition(",", PARAMVAL, self._nextparam, PARAMVAL)
f.add_transition(";", PARAMVAL, self._nextparam, PARAMNAME)
f.add_transition(DQUOTE, PARAMVAL, self._startquote, PARAMQUOTE)
f.add_transition_list(QSAFE_CHAR, PARAMQUOTE, self._addtext, PARAMQUOTE)
f.add_transition(DQUOTE, PARAMQUOTE, self._endquote, PARAMVAL)
f.add_transition(":", PARAMVAL, self._nextparam, VALUE)
# slashes
f.add_transition("\\", VALUE, None, VSLASH)
f.add_transition(ANY, VSLASH, self._slashescape, VALUE)
# f.add_transition("\\", QUOTE, None, SLASHQUOTE)
# f.add_transition(ANY, SLASHQUOTE, self._slashescape, QUOTE)
# # double quotes
# f.add_transition(DQUOTE, xxx, None, QUOTE)
# f.add_transition(DQUOTE, QUOTE, self._doublequote, xxx)
# f.add_transition(ANY, QUOTE, self._addtext, QUOTE)
f.reset()
self._fsm = f
def _error(self, c, fsm):
if self._errorhandler:
self._errorhandler(c, fsm)
else:
fsm.reset()
raise RFC2822SyntaxError('Syntax error: %s\n%r' % (c, fsm.stack))
def _addtext(self, c, fsm):
fsm.arg += c
_SPECIAL = {"r":"\r", "n":"\n", "t":"\t", "N":"\n"}
def _slashescape(self, c, fsm):
fsm.arg += self._SPECIAL.get(c, c)
# def _doublequote(self, c, fsm):
# self.arg_list.append(fsm.arg)
# fsm.arg = ''
def _startquote(self, c, fsm):
fsm.arg = ''
def _endquote(self, c, fsm):
paramval = fsm.arg
fsm.arg = ''
fsm.cl_params[fsm.cl_paramname].append(paramval)
def _endname(self, c, fsm):
fsm.cl_name = ABNF.Literal(fsm.arg)
fsm.arg = ''
def _endparamname(self, c, fsm):
name = ABNF.Literal(fsm.arg)
fsm.cl_params[name] = []
fsm.cl_paramname = name
fsm.arg = ''
def _nextparam(self, c, fsm):
paramval = fsm.arg
fsm.arg = ''
fsm.cl_params[fsm.cl_paramname].append(paramval)
def _doline(self, c, fsm):
value = fsm.arg
fsm.arg = ''
self._contenthandler.handleLine(fsm.cl_name, fsm.cl_params, value)
class _HandlerBase(object):
def handleLine(self, name, paramdict, value):
pass
def startDocument(self):
pass
def endDocument(self):
pass
class TestHandler(_HandlerBase):
def handleLine(self, name, paramdict, value):
print ("%r %r %r" % (name, paramdict, value))
def startDocument(self):
print ("*** startDocument")
def endDocument(self):
print ("*** endDocument")
class BufferedFile(object):
def __init__(self):
# The last partial line pushed into this object.
self._partial = ''
# The list of full, pushed lines, in reverse order
self._lines = []
# A flag indicating whether the file has been closed or not.
self._closed = False
def close(self):
# Don't forget any trailing partial line.
self._lines.append(self._partial)
self._partial = ''
self._closed = True
def readline(self):
return '' # XXX
pass
def unreadline(self, line):
self._lines.append(line)
def push(self, data):
pass
def pushlines(self, lines):
# Reverse and insert at the front of the lines.
self._lines[:0] = lines[::-1]
def is_closed(self):
return self._closed
def __iter__(self):
return self
def next(self):
line = self.readline()
if line == '':
raise StopIteration
return line
|
apache-2.0
| 3,820,985,500,701,952,000 | 6,291,130,600,905,368,000 | 25.069324 | 80 | 0.569672 | false |
alehander42/hatlog
|
hatlog/prolog.py
|
1
|
1202
|
def generate_prolog(x, name, file):
header = '''\
:- initialization main.
:- use_module(pythonTypeSystem).
:- use_module(prettyTypes).
'''
fun = generate_fun(x, name)
m = '''main :-
open('%s.txt', write, Stream),
(
f(%s, Z0, Z1),
unvar(Z0, Z1, Z2, Z3, Z4), %% replace free vars with names
pretty_args(Z2, Y),
pretty_type(Z3, Z),
pretty_generic(Z4, X),
format(Stream, '~a::', [X]),
write(Stream, Y),
write(Stream, ' -> '),
write(Stream, Z),
write(Stream, '\\n'),
true
),
close(Stream),
halt.
main :-
halt(1).
''' %(file, name)
program = '%s\n%s\n%s' % (header, fun, m)
with open('%s.pl' % file, 'w') as f:
f.write(program)
return '%s.pl' % file
def generate_fun(x, name):
head = 'f(%s, [%s], %s) :-' % (name, ', '.join(x[-1][1]), x[-1][-1])
# print(x[:-1])
block = ',\n '.join(['%s(%s)' % (a, ', '.join(map(generate_arg, args + [b]))) for a, args, b in x[:-1]])
return '%s\n %s.\n' % (head, block)
def generate_arg(a):
if isinstance(a, str):
return a
else:
return '[%s]' % ', '.join(map(generate_arg, a))
|
apache-2.0
| -5,255,237,794,072,995,000 | 6,438,985,999,677,999,000 | 23.530612 | 111 | 0.476705 | false |
tmerrick1/spack
|
var/spack/repos/builtin/packages/r-whisker/package.py
|
5
|
1636
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RWhisker(RPackage):
"""logicless templating, reuse templates in many programming languages
including R"""
homepage = "http://github.com/edwindj/whisker"
url = "https://cran.r-project.org/src/contrib/whisker_0.3-2.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/whisker"
version('0.3-2', 'c4b9bf9a22e69ce003fe68663ab5e8e6')
|
lgpl-2.1
| -1,906,576,031,509,337,900 | 2,477,164,330,650,419,000 | 44.444444 | 78 | 0.679095 | false |
makson96/free-engineer
|
tools/steam.py
|
2
|
8550
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
##This software is available to you under the terms of the GPL-3, see "/usr/share/common-licenses/GPL-3".
##Copyright:
##- Tomasz Makarewicz ([email protected])
import os, tarfile, urllib.request, time, shutil
from subprocess import Popen, PIPE
recultis_dir = os.getenv("HOME") + "/.recultis/"
steam_dir = recultis_dir + "shops/steam/"
def start(login, password, recultis_dir, s_appid, game_dir):
print("Starting SteamCMD procedure")
shop_install_dir = recultis_dir + "shops/steam/"
if os.path.isdir(shop_install_dir) == False:
os.makedirs(shop_install_dir)
#start of legacy code for Recultis 1.2
if os.path.isfile(recultis_dir+"steam.sh") == True:
shutil.move(recultis_dir+"steam.sh", shop_install_dir)
if os.path.isfile(recultis_dir+"steamcmd.sh") == True:
shutil.move(recultis_dir+"steamcmd.sh", shop_install_dir)
if os.path.isfile(recultis_dir+"steamcmd_linux.tar.gz") == True:
shutil.move(recultis_dir+"steamcmd_linux.tar.gz", shop_install_dir)
if os.path.isfile(recultis_dir+"steam_log.txt") == True:
shutil.move(recultis_dir+"steam_log.txt", shop_install_dir)
if os.path.isdir(recultis_dir+"linux32") == True:
shutil.move(recultis_dir+"linux32", shop_install_dir)
if os.path.isdir(recultis_dir+"linux64") == True:
shutil.move(recultis_dir+"linux64", shop_install_dir)
if os.path.isdir(recultis_dir+"package") == True:
shutil.move(recultis_dir+"package", shop_install_dir)
if os.path.isdir(recultis_dir+"public") == True:
shutil.move(recultis_dir+"public", shop_install_dir)
#end of legacy code for Recultis 1.2
os.chdir(shop_install_dir)
if login == "" or password == "":
steam_log_file = open("steam_log.txt", "w")
steam_log_file.write("Steamcmd Error. Login or password not provided.\n")
steam_log_file.close()
print("Steamcmd Error. Login or password not provided. try again with correct one.")
steam_error = 0
else:
steamcmd_install(shop_install_dir)
steam_error = 2
retry_nr = 0
while steam_error == 2:
steam_error = run(login, password, shop_install_dir, s_appid, game_dir)
if steam_error == 2:
print("Steamcmd error. Retry.")
retry_nr = retry_nr + 1
if retry_nr == 5:
print("Steamcmd error. Reinstall steamcmd.")
steamcmd_reinstall(shop_install_dir)
elif retry_nr == 8:
steam_error = 0
if steam_error == 0:
steam_log_file = open("steam_log.txt", "a")
steam_log_file.write("\nSteamcmd Error. Terminate.")
steam_log_file.close()
print("Steamcmd Error. Terminate.")
return steam_error
def steamcmd_install(shop_install_dir):
print("Installing SteamCMD")
if os.path.isfile(shop_install_dir+"steamcmd.sh") == False:
urllib.request.urlretrieve("http://media.steampowered.com/client/steamcmd_linux.tar.gz", shop_install_dir + "steamcmd_linux.tar.gz")
tar = tarfile.open(shop_install_dir + "steamcmd_linux.tar.gz")
tar.extractall()
tar.close()
def get_last_log_line():
wrong_lines = ["CWorkThreadPool"]
last_line_nr = -1
try:
steam_log_file = open("steam_log.txt", "r")
steam_log_lines = steam_log_file.readlines()
if len(steam_log_lines) > 0:
steam_last_line = steam_log_lines[last_line_nr]
for w_line in wrong_lines:
while w_line in steam_last_line:
last_line_nr -= 1
steam_last_line = steam_log_lines[last_line_nr]
else:
steam_last_line = ""
steam_log_file.close()
except FileNotFoundError:
steam_last_line = ""
return steam_last_line
def steam_guard():
while os.path.isfile(recultis_dir + "guard_key.txt") == False:
time.sleep(2)
print('Steam Guard Key detected. Verifying...')
steam_guard_file = open(recultis_dir + "guard_key.txt", "r")
steam_guard_code = steam_guard_file.readline()
steam_guard_file.close()
os.remove(recultis_dir + "guard_key.txt")
print(str(steam_guard_code).upper())
return str(steam_guard_code.upper())
def run(login, password, shop_install_dir, s_appid, game_dir):
if os.path.isfile(shop_install_dir+"steam_log.txt") == True:
os.remove(shop_install_dir+"steam_log.txt")
print("Running following steamcmd command:")
print("./steamcmd.sh +@sSteamCmdForcePlatformType windows +login '" + login + "' '******' +force_install_dir " + game_dir + " +app_update " + s_appid + " validate +quit")
print("Check " + shop_install_dir + "steam_log.txt for more details.")
steam_download = Popen("script -q -c \"./steamcmd.sh +@sSteamCmdForcePlatformType windows +login '" + login + "' '" + password + "' +force_install_dir " + game_dir + " +app_update " + s_appid + " validate +quit\" /dev/null", shell=True, stdout=open("steam_log.txt", "wb"), stdin=PIPE)
while steam_download.poll() is None:
time.sleep(2)
steam_last_line = get_last_log_line()
#Terminate the process if bad login or password
if "FAILED with result code" in steam_last_line:
steam_download.terminate()
return 0
#Terminate the process if not owning the game
elif "Failed to install app" in steam_last_line:
steam_download.terminate()
return 0
#Retry 5 times if steamcmd has memory access error
elif '$DEBUGGER "$STEAMEXE" "$@"' in steam_last_line:
return 2
#If computer is not registered on Steam, handle Steam Guard
elif 'Steam Guard' in steam_last_line:
steam_guard_code = steam_guard()
steam_download.stdin.write(bytes(steam_guard_code + '\n', 'ascii'))
steam_download.stdin.flush()
#if there is only 1 line after steamcmd finished working, it means it crashed.
if sum(1 for line in open('steam_log.txt')) == 1:
rc = 0
else:
rc = 1
return rc
def steamcmd_reinstall(shop_install_dir):
print("Reinstalling SteamCMD")
print("Removing SteamCMD")
if os.path.isfile(shop_install_dir+"steam.sh") == True:
os.remove(shop_install_dir+"steam.sh")
if os.path.isfile(shop_install_dir+"steamcmd.sh") == True:
os.remove(shop_install_dir+"steamcmd.sh")
if os.path.isfile(shop_install_dir+"steamcmd_linux.tar.gz") == True:
os.remove(shop_install_dir+"steamcmd_linux.tar.gz")
if os.path.isdir(shop_install_dir+"linux32") == True:
shutil.rmtree(shop_install_dir+"linux32")
if os.path.isdir(shop_install_dir+"linux64") == True:
shutil.rmtree(shop_install_dir+"linux64")
if os.path.isdir(shop_install_dir+"package") == True:
shutil.rmtree(shop_install_dir+"package")
if os.path.isdir(shop_install_dir+"public") == True:
shutil.rmtree(shop_install_dir+"public")
steamcmd_install(shop_install_dir)
def status():
if os.path.isdir(steam_dir) == True:
os.chdir(steam_dir)
else:
status = "Preparing SteamCMD"
percent = 0
return status, percent
status = "Downloading and installing game data"
percent = 0
steam_last_line = get_last_log_line()
if steam_last_line == "":
steam_last_line = "downloading, progress: 0,0 ("
#This code handle steamcmd status if everything is ok
if ("downloading, progress: " in steam_last_line) or ("validating, progress: " in steam_last_line):
steam_value = steam_last_line.split("progress: ")[1]
steam_value = steam_value.split(" (")[0]
steam_value = steam_value.split(",")[0]
steam_value = steam_value.split(".")[0]
steam_value = int(steam_value)
status = "Downloading and installing game data"
percent = steam_value
elif "Success!" in steam_last_line:
status = "Download of game data completed"
percent = 100
#this code handle steamcmd status if warning is present.
elif "Steam Guard" in steam_last_line:
status = "Warning: Waiting for Steam Guard authentication."
percent = 0
#this code handle steamcmd status if steam tool marked steam_log.txt file with error.
if "Steamcmd Error." in steam_last_line:
try:
steam_log_file = open("steam_log.txt", "r")
steam_log_lines = steam_log_file.readlines()
steam_error_line = steam_log_lines[-3]
steam_log_file.close()
except:
steam_error_line = "Steamcmd Error. Terminate."
if "FAILED with result code 5" in steam_error_line:
status = "Error: Steam - bad login or password. Please correct and start again."
percent = 0
elif "Login or password not provided." in steam_error_line:
status = "Error: Steam - Login or password not provided. Try again with correct one."
percent = 0
elif "Failed to install app" in steam_error_line:
status = "Error: Steam - you are not game owner. Please correct and start again."
percent = 0
elif "FAILED with result code 65" in steam_error_line:
status = "Error: Could not perform Steam Guard authentication. Please try again."
percent = 0
else:
status = "Error: Steamcmd internal error. Please contact Recultis project for support."
percent = 0
return status, percent
|
gpl-3.0
| 5,963,007,465,105,274,000 | -5,268,532,411,370,275,000 | 40.105769 | 285 | 0.69883 | false |
spl0k/supysonic
|
supysonic/cli.py
|
1
|
15855
|
# This file is part of Supysonic.
# Supysonic is a Python implementation of the Subsonic server API.
#
# Copyright (C) 2013-2021 Alban 'spl0k' Féron
#
# Distributed under terms of the GNU AGPLv3 license.
import argparse
import cmd
import getpass
import shlex
import sys
import time
from pony.orm import db_session, select
from pony.orm import ObjectNotFound
from .config import IniConfig
from .daemon.client import DaemonClient
from .daemon.exceptions import DaemonUnavailableError
from .db import Folder, User, init_database, release_database
from .managers.folder import FolderManager
from .managers.user import UserManager
from .scanner import Scanner
class TimedProgressDisplay:
def __init__(self, stdout, interval=5):
self.__stdout = stdout
self.__interval = interval
self.__last_display = 0
self.__last_len = 0
def __call__(self, name, scanned):
if time.time() - self.__last_display > self.__interval:
progress = "Scanning '{}': {} files scanned".format(name, scanned)
self.__stdout.write("\b" * self.__last_len)
self.__stdout.write(progress)
self.__stdout.flush()
self.__last_len = len(progress)
self.__last_display = time.time()
class CLIParser(argparse.ArgumentParser):
def error(self, message):
self.print_usage(sys.stderr)
raise RuntimeError(message)
class SupysonicCLI(cmd.Cmd):
prompt = "supysonic> "
def _make_do(self, command):
def method(obj, line):
try:
args = getattr(obj, command + "_parser").parse_args(shlex.split(line))
except RuntimeError as e:
self.write_error_line(str(e))
return
if hasattr(obj.__class__, command + "_subparsers"):
try:
func = getattr(obj, "{}_{}".format(command, args.action))
except AttributeError:
return obj.default(line)
return func(
**{key: vars(args)[key] for key in vars(args) if key != "action"}
)
else:
try:
func = getattr(obj, command)
except AttributeError:
return obj.default(line)
return func(**vars(args))
return method
def __init__(self, config, stderr=None, *args, **kwargs):
cmd.Cmd.__init__(self, *args, **kwargs)
if stderr is not None:
self.stderr = stderr
else:
self.stderr = sys.stderr
self.__config = config
self.__daemon = DaemonClient(config.DAEMON["socket"])
# Generate do_* and help_* methods
for parser_name in filter(
lambda attr: attr.endswith("_parser") and "_" not in attr[:-7],
dir(self.__class__),
):
command = parser_name[:-7]
if not hasattr(self.__class__, "do_" + command):
setattr(self.__class__, "do_" + command, self._make_do(command))
if hasattr(self.__class__, "do_" + command) and not hasattr(
self.__class__, "help_" + command
):
setattr(
self.__class__,
"help_" + command,
getattr(self.__class__, parser_name).print_help,
)
if hasattr(self.__class__, command + "_subparsers"):
for action, subparser in getattr(
self.__class__, command + "_subparsers"
).choices.items():
setattr(
self, "help_{} {}".format(command, action), subparser.print_help
)
def write_line(self, line=""):
self.stdout.write(line + "\n")
def write_error_line(self, line=""):
self.stderr.write(line + "\n")
def do_EOF(self, line):
return True
do_exit = do_EOF
def default(self, line):
self.write_line("Unknown command %s" % line.split()[0])
self.do_help(None)
def postloop(self):
self.write_line()
def completedefault(self, text, line, begidx, endidx):
command = line.split()[0]
parsers = getattr(self.__class__, command + "_subparsers", None)
if not parsers:
return []
num_words = len(line[len(command) : begidx].split())
if num_words == 0:
return [a for a in parsers.choices if a.startswith(text)]
return []
folder_parser = CLIParser(prog="folder", add_help=False)
folder_subparsers = folder_parser.add_subparsers(dest="action")
folder_subparsers.add_parser("list", help="Lists folders", add_help=False)
folder_add_parser = folder_subparsers.add_parser(
"add", help="Adds a folder", add_help=False
)
folder_add_parser.add_argument("name", help="Name of the folder to add")
folder_add_parser.add_argument(
"path", help="Path to the directory pointed by the folder"
)
folder_del_parser = folder_subparsers.add_parser(
"delete", help="Deletes a folder", add_help=False
)
folder_del_parser.add_argument("name", help="Name of the folder to delete")
folder_scan_parser = folder_subparsers.add_parser(
"scan", help="Run a scan on specified folders", add_help=False
)
folder_scan_parser.add_argument(
"folders",
metavar="folder",
nargs="*",
help="Folder(s) to be scanned. If ommitted, all folders are scanned",
)
folder_scan_parser.add_argument(
"-f",
"--force",
action="store_true",
help="Force scan of already know files even if they haven't changed",
)
folder_scan_target_group = folder_scan_parser.add_mutually_exclusive_group()
folder_scan_target_group.add_argument(
"--background",
action="store_true",
help="Scan the folder(s) in the background. Requires the daemon to be running.",
)
folder_scan_target_group.add_argument(
"--foreground",
action="store_true",
help="Scan the folder(s) in the foreground, blocking the processus while the scan is running.",
)
@db_session
def folder_list(self):
self.write_line("Name\t\tPath\n----\t\t----")
self.write_line(
"\n".join(
"{: <16}{}".format(f.name, f.path)
for f in Folder.select(lambda f: f.root)
)
)
@db_session
def folder_add(self, name, path):
try:
FolderManager.add(name, path)
self.write_line("Folder '{}' added".format(name))
except ValueError as e:
self.write_error_line(str(e))
@db_session
def folder_delete(self, name):
try:
FolderManager.delete_by_name(name)
self.write_line("Deleted folder '{}'".format(name))
except ObjectNotFound as e:
self.write_error_line(str(e))
def folder_scan(self, folders, force, background, foreground):
auto = not background and not foreground
if auto:
try:
self.__folder_scan_background(folders, force)
except DaemonUnavailableError:
self.write_error_line(
"Couldn't connect to the daemon, scanning in foreground"
)
self.__folder_scan_foreground(folders, force)
elif background:
try:
self.__folder_scan_background(folders, force)
except DaemonUnavailableError:
self.write_error_line(
"Couldn't connect to the daemon, please use the '--foreground' option"
)
elif foreground:
self.__folder_scan_foreground(folders, force)
def __folder_scan_background(self, folders, force):
self.__daemon.scan(folders, force)
def __folder_scan_foreground(self, folders, force):
try:
progress = self.__daemon.get_scanning_progress()
if progress is not None:
self.write_error_line(
"The daemon is currently scanning, can't start a scan now"
)
return
except DaemonUnavailableError:
pass
extensions = self.__config.BASE["scanner_extensions"]
if extensions:
extensions = extensions.split(" ")
scanner = Scanner(
force=force,
extensions=extensions,
follow_symlinks=self.__config.BASE["follow_symlinks"],
progress=TimedProgressDisplay(self.stdout),
on_folder_start=self.__unwatch_folder,
on_folder_end=self.__watch_folder,
)
if folders:
fstrs = folders
with db_session:
folders = select(f.name for f in Folder if f.root and f.name in fstrs)[
:
]
notfound = set(fstrs) - set(folders)
if notfound:
self.write_line("No such folder(s): " + " ".join(notfound))
for folder in folders:
scanner.queue_folder(folder)
else:
with db_session:
for folder in select(f.name for f in Folder if f.root):
scanner.queue_folder(folder)
scanner.run()
stats = scanner.stats()
self.write_line("\nScanning done")
self.write_line(
"Added: {0.artists} artists, {0.albums} albums, {0.tracks} tracks".format(
stats.added
)
)
self.write_line(
"Deleted: {0.artists} artists, {0.albums} albums, {0.tracks} tracks".format(
stats.deleted
)
)
if stats.errors:
self.write_line("Errors in:")
for err in stats.errors:
self.write_line("- " + err)
def __unwatch_folder(self, folder):
try:
self.__daemon.remove_watched_folder(folder.path)
except DaemonUnavailableError:
pass
def __watch_folder(self, folder):
try:
self.__daemon.add_watched_folder(folder.path)
except DaemonUnavailableError:
pass
user_parser = CLIParser(prog="user", add_help=False)
user_subparsers = user_parser.add_subparsers(dest="action")
user_subparsers.add_parser("list", help="List users", add_help=False)
user_add_parser = user_subparsers.add_parser(
"add", help="Adds a user", add_help=False
)
user_add_parser.add_argument("name", help="Name/login of the user to add")
user_add_parser.add_argument(
"-p", "--password", help="Specifies the user's password"
)
user_add_parser.add_argument(
"-e", "--email", default="", help="Sets the user's email address"
)
user_del_parser = user_subparsers.add_parser(
"delete", help="Deletes a user", add_help=False
)
user_del_parser.add_argument("name", help="Name/login of the user to delete")
user_roles_parser = user_subparsers.add_parser(
"setroles", help="Enable/disable rights for a user", add_help=False
)
user_roles_parser.add_argument(
"name", help="Name/login of the user to grant/revoke admin rights"
)
user_roles_admin_group = user_roles_parser.add_mutually_exclusive_group()
user_roles_admin_group.add_argument(
"-A", "--admin", action="store_true", help="Grant admin rights"
)
user_roles_admin_group.add_argument(
"-a", "--noadmin", action="store_true", help="Revoke admin rights"
)
user_roles_jukebox_group = user_roles_parser.add_mutually_exclusive_group()
user_roles_jukebox_group.add_argument(
"-J", "--jukebox", action="store_true", help="Grant jukebox rights"
)
user_roles_jukebox_group.add_argument(
"-j", "--nojukebox", action="store_true", help="Revoke jukebox rights"
)
user_pass_parser = user_subparsers.add_parser(
"changepass", help="Changes a user's password", add_help=False
)
user_pass_parser.add_argument(
"name", help="Name/login of the user to which change the password"
)
user_pass_parser.add_argument("password", nargs="?", help="New password")
user_rename_parser = user_subparsers.add_parser(
"rename", help="Rename a user", add_help=False
)
user_rename_parser.add_argument("name", help="Name of the user to rename")
user_rename_parser.add_argument("newname", help="New name for the user")
@db_session
def user_list(self):
self.write_line("Name\t\tAdmin\tJukebox\tEmail")
self.write_line("----\t\t-----\t-------\t-----")
self.write_line(
"\n".join(
"{: <16}{}\t{}\t{}".format(
u.name, "*" if u.admin else "", "*" if u.jukebox else "", u.mail
)
for u in User.select()
)
)
def _ask_password(self): # pragma: nocover
password = getpass.getpass()
confirm = getpass.getpass("Confirm password: ")
if password != confirm:
raise ValueError("Passwords don't match")
return password
@db_session
def user_add(self, name, password, email):
try:
if not password:
password = self._ask_password() # pragma: nocover
UserManager.add(name, password, mail=email)
except ValueError as e:
self.write_error_line(str(e))
@db_session
def user_delete(self, name):
try:
UserManager.delete_by_name(name)
self.write_line("Deleted user '{}'".format(name))
except ObjectNotFound as e:
self.write_error_line(str(e))
@db_session
def user_setroles(self, name, admin, noadmin, jukebox, nojukebox):
user = User.get(name=name)
if user is None:
self.write_error_line("No such user")
else:
if admin:
user.admin = True
self.write_line("Granted '{}' admin rights".format(name))
elif noadmin:
user.admin = False
self.write_line("Revoked '{}' admin rights".format(name))
if jukebox:
user.jukebox = True
self.write_line("Granted '{}' jukebox rights".format(name))
elif nojukebox:
user.jukebox = False
self.write_line("Revoked '{}' jukebox rights".format(name))
@db_session
def user_changepass(self, name, password):
try:
if not password:
password = self._ask_password() # pragma: nocover
UserManager.change_password2(name, password)
self.write_line("Successfully changed '{}' password".format(name))
except ObjectNotFound as e:
self.write_error_line(str(e))
@db_session
def user_rename(self, name, newname):
if not name or not newname:
self.write_error_line("Missing user current name or new name")
return
if name == newname:
return
user = User.get(name=name)
if user is None:
self.write_error_line("No such user")
return
if User.get(name=newname) is not None:
self.write_error_line("This name is already taken")
return
user.name = newname
self.write_line("User '{}' renamed to '{}'".format(name, newname))
def main():
config = IniConfig.from_common_locations()
init_database(config.BASE["database_uri"])
cli = SupysonicCLI(config)
if len(sys.argv) > 1:
cli.onecmd(" ".join(shlex.quote(arg) for arg in sys.argv[1:]))
else:
cli.cmdloop()
release_database()
if __name__ == "__main__":
main()
|
agpl-3.0
| 2,132,275,354,015,632,400 | 4,096,843,548,026,743,300 | 33.615721 | 103 | 0.563959 | false |
willingc/oh-mainline
|
vendor/packages/twisted/twisted/test/test_digestauth.py
|
29
|
23460
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.cred._digest} and the associated bits in
L{twisted.cred.credentials}.
"""
from zope.interface.verify import verifyObject
from twisted.trial.unittest import TestCase
from twisted.python.hashlib import md5, sha1
from twisted.internet.address import IPv4Address
from twisted.cred.error import LoginFailed
from twisted.cred.credentials import calcHA1, calcHA2, IUsernameDigestHash
from twisted.cred.credentials import calcResponse, DigestCredentialFactory
def b64encode(s):
return s.encode('base64').strip()
class FakeDigestCredentialFactory(DigestCredentialFactory):
"""
A Fake Digest Credential Factory that generates a predictable
nonce and opaque
"""
def __init__(self, *args, **kwargs):
super(FakeDigestCredentialFactory, self).__init__(*args, **kwargs)
self.privateKey = "0"
def _generateNonce(self):
"""
Generate a static nonce
"""
return '178288758716122392881254770685'
def _getTime(self):
"""
Return a stable time
"""
return 0
class DigestAuthTests(TestCase):
"""
L{TestCase} mixin class which defines a number of tests for
L{DigestCredentialFactory}. Because this mixin defines C{setUp}, it
must be inherited before L{TestCase}.
"""
def setUp(self):
"""
Create a DigestCredentialFactory for testing
"""
self.username = "foobar"
self.password = "bazquux"
self.realm = "test realm"
self.algorithm = "md5"
self.cnonce = "29fc54aa1641c6fa0e151419361c8f23"
self.qop = "auth"
self.uri = "/write/"
self.clientAddress = IPv4Address('TCP', '10.2.3.4', 43125)
self.method = 'GET'
self.credentialFactory = DigestCredentialFactory(
self.algorithm, self.realm)
def test_MD5HashA1(self, _algorithm='md5', _hash=md5):
"""
L{calcHA1} accepts the C{'md5'} algorithm and returns an MD5 hash of
its parameters, excluding the nonce and cnonce.
"""
nonce = 'abc123xyz'
hashA1 = calcHA1(_algorithm, self.username, self.realm, self.password,
nonce, self.cnonce)
a1 = '%s:%s:%s' % (self.username, self.realm, self.password)
expected = _hash(a1).hexdigest()
self.assertEqual(hashA1, expected)
def test_MD5SessionHashA1(self):
"""
L{calcHA1} accepts the C{'md5-sess'} algorithm and returns an MD5 hash
of its parameters, including the nonce and cnonce.
"""
nonce = 'xyz321abc'
hashA1 = calcHA1('md5-sess', self.username, self.realm, self.password,
nonce, self.cnonce)
a1 = '%s:%s:%s' % (self.username, self.realm, self.password)
ha1 = md5(a1).digest()
a1 = '%s:%s:%s' % (ha1, nonce, self.cnonce)
expected = md5(a1).hexdigest()
self.assertEqual(hashA1, expected)
def test_SHAHashA1(self):
"""
L{calcHA1} accepts the C{'sha'} algorithm and returns a SHA hash of its
parameters, excluding the nonce and cnonce.
"""
self.test_MD5HashA1('sha', sha1)
def test_MD5HashA2Auth(self, _algorithm='md5', _hash=md5):
"""
L{calcHA2} accepts the C{'md5'} algorithm and returns an MD5 hash of
its arguments, excluding the entity hash for QOP other than
C{'auth-int'}.
"""
method = 'GET'
hashA2 = calcHA2(_algorithm, method, self.uri, 'auth', None)
a2 = '%s:%s' % (method, self.uri)
expected = _hash(a2).hexdigest()
self.assertEqual(hashA2, expected)
def test_MD5HashA2AuthInt(self, _algorithm='md5', _hash=md5):
"""
L{calcHA2} accepts the C{'md5'} algorithm and returns an MD5 hash of
its arguments, including the entity hash for QOP of C{'auth-int'}.
"""
method = 'GET'
hentity = 'foobarbaz'
hashA2 = calcHA2(_algorithm, method, self.uri, 'auth-int', hentity)
a2 = '%s:%s:%s' % (method, self.uri, hentity)
expected = _hash(a2).hexdigest()
self.assertEqual(hashA2, expected)
def test_MD5SessHashA2Auth(self):
"""
L{calcHA2} accepts the C{'md5-sess'} algorithm and QOP of C{'auth'} and
returns the same value as it does for the C{'md5'} algorithm.
"""
self.test_MD5HashA2Auth('md5-sess')
def test_MD5SessHashA2AuthInt(self):
"""
L{calcHA2} accepts the C{'md5-sess'} algorithm and QOP of C{'auth-int'}
and returns the same value as it does for the C{'md5'} algorithm.
"""
self.test_MD5HashA2AuthInt('md5-sess')
def test_SHAHashA2Auth(self):
"""
L{calcHA2} accepts the C{'sha'} algorithm and returns a SHA hash of
its arguments, excluding the entity hash for QOP other than
C{'auth-int'}.
"""
self.test_MD5HashA2Auth('sha', sha1)
def test_SHAHashA2AuthInt(self):
"""
L{calcHA2} accepts the C{'sha'} algorithm and returns a SHA hash of
its arguments, including the entity hash for QOP of C{'auth-int'}.
"""
self.test_MD5HashA2AuthInt('sha', sha1)
def test_MD5HashResponse(self, _algorithm='md5', _hash=md5):
"""
L{calcResponse} accepts the C{'md5'} algorithm and returns an MD5 hash
of its parameters, excluding the nonce count, client nonce, and QoP
value if the nonce count and client nonce are C{None}
"""
hashA1 = 'abc123'
hashA2 = '789xyz'
nonce = 'lmnopq'
response = '%s:%s:%s' % (hashA1, nonce, hashA2)
expected = _hash(response).hexdigest()
digest = calcResponse(hashA1, hashA2, _algorithm, nonce, None, None,
None)
self.assertEqual(expected, digest)
def test_MD5SessionHashResponse(self):
"""
L{calcResponse} accepts the C{'md5-sess'} algorithm and returns an MD5
hash of its parameters, excluding the nonce count, client nonce, and
QoP value if the nonce count and client nonce are C{None}
"""
self.test_MD5HashResponse('md5-sess')
def test_SHAHashResponse(self):
"""
L{calcResponse} accepts the C{'sha'} algorithm and returns a SHA hash
of its parameters, excluding the nonce count, client nonce, and QoP
value if the nonce count and client nonce are C{None}
"""
self.test_MD5HashResponse('sha', sha1)
def test_MD5HashResponseExtra(self, _algorithm='md5', _hash=md5):
"""
L{calcResponse} accepts the C{'md5'} algorithm and returns an MD5 hash
of its parameters, including the nonce count, client nonce, and QoP
value if they are specified.
"""
hashA1 = 'abc123'
hashA2 = '789xyz'
nonce = 'lmnopq'
nonceCount = '00000004'
clientNonce = 'abcxyz123'
qop = 'auth'
response = '%s:%s:%s:%s:%s:%s' % (
hashA1, nonce, nonceCount, clientNonce, qop, hashA2)
expected = _hash(response).hexdigest()
digest = calcResponse(
hashA1, hashA2, _algorithm, nonce, nonceCount, clientNonce, qop)
self.assertEqual(expected, digest)
def test_MD5SessionHashResponseExtra(self):
"""
L{calcResponse} accepts the C{'md5-sess'} algorithm and returns an MD5
hash of its parameters, including the nonce count, client nonce, and
QoP value if they are specified.
"""
self.test_MD5HashResponseExtra('md5-sess')
def test_SHAHashResponseExtra(self):
"""
L{calcResponse} accepts the C{'sha'} algorithm and returns a SHA hash
of its parameters, including the nonce count, client nonce, and QoP
value if they are specified.
"""
self.test_MD5HashResponseExtra('sha', sha1)
def formatResponse(self, quotes=True, **kw):
"""
Format all given keyword arguments and their values suitably for use as
the value of an HTTP header.
@types quotes: C{bool}
@param quotes: A flag indicating whether to quote the values of each
field in the response.
@param **kw: Keywords and C{str} values which will be treated as field
name/value pairs to include in the result.
@rtype: C{str}
@return: The given fields formatted for use as an HTTP header value.
"""
if 'username' not in kw:
kw['username'] = self.username
if 'realm' not in kw:
kw['realm'] = self.realm
if 'algorithm' not in kw:
kw['algorithm'] = self.algorithm
if 'qop' not in kw:
kw['qop'] = self.qop
if 'cnonce' not in kw:
kw['cnonce'] = self.cnonce
if 'uri' not in kw:
kw['uri'] = self.uri
if quotes:
quote = '"'
else:
quote = ''
return ', '.join([
'%s=%s%s%s' % (k, quote, v, quote)
for (k, v)
in kw.iteritems()
if v is not None])
def getDigestResponse(self, challenge, ncount):
"""
Calculate the response for the given challenge
"""
nonce = challenge.get('nonce')
algo = challenge.get('algorithm').lower()
qop = challenge.get('qop')
ha1 = calcHA1(
algo, self.username, self.realm, self.password, nonce, self.cnonce)
ha2 = calcHA2(algo, "GET", self.uri, qop, None)
expected = calcResponse(ha1, ha2, algo, nonce, ncount, self.cnonce, qop)
return expected
def test_response(self, quotes=True):
"""
L{DigestCredentialFactory.decode} accepts a digest challenge response
and parses it into an L{IUsernameHashedPassword} provider.
"""
challenge = self.credentialFactory.getChallenge(self.clientAddress.host)
nc = "00000001"
clientResponse = self.formatResponse(
quotes=quotes,
nonce=challenge['nonce'],
response=self.getDigestResponse(challenge, nc),
nc=nc,
opaque=challenge['opaque'])
creds = self.credentialFactory.decode(
clientResponse, self.method, self.clientAddress.host)
self.assertTrue(creds.checkPassword(self.password))
self.assertFalse(creds.checkPassword(self.password + 'wrong'))
def test_responseWithoutQuotes(self):
"""
L{DigestCredentialFactory.decode} accepts a digest challenge response
which does not quote the values of its fields and parses it into an
L{IUsernameHashedPassword} provider in the same way it would a
response which included quoted field values.
"""
self.test_response(False)
def test_caseInsensitiveAlgorithm(self):
"""
The case of the algorithm value in the response is ignored when
checking the credentials.
"""
self.algorithm = 'MD5'
self.test_response()
def test_md5DefaultAlgorithm(self):
"""
The algorithm defaults to MD5 if it is not supplied in the response.
"""
self.algorithm = None
self.test_response()
def test_responseWithoutClientIP(self):
"""
L{DigestCredentialFactory.decode} accepts a digest challenge response
even if the client address it is passed is C{None}.
"""
challenge = self.credentialFactory.getChallenge(None)
nc = "00000001"
clientResponse = self.formatResponse(
nonce=challenge['nonce'],
response=self.getDigestResponse(challenge, nc),
nc=nc,
opaque=challenge['opaque'])
creds = self.credentialFactory.decode(clientResponse, self.method, None)
self.assertTrue(creds.checkPassword(self.password))
self.assertFalse(creds.checkPassword(self.password + 'wrong'))
def test_multiResponse(self):
"""
L{DigestCredentialFactory.decode} handles multiple responses to a
single challenge.
"""
challenge = self.credentialFactory.getChallenge(self.clientAddress.host)
nc = "00000001"
clientResponse = self.formatResponse(
nonce=challenge['nonce'],
response=self.getDigestResponse(challenge, nc),
nc=nc,
opaque=challenge['opaque'])
creds = self.credentialFactory.decode(clientResponse, self.method,
self.clientAddress.host)
self.assertTrue(creds.checkPassword(self.password))
self.assertFalse(creds.checkPassword(self.password + 'wrong'))
nc = "00000002"
clientResponse = self.formatResponse(
nonce=challenge['nonce'],
response=self.getDigestResponse(challenge, nc),
nc=nc,
opaque=challenge['opaque'])
creds = self.credentialFactory.decode(clientResponse, self.method,
self.clientAddress.host)
self.assertTrue(creds.checkPassword(self.password))
self.assertFalse(creds.checkPassword(self.password + 'wrong'))
def test_failsWithDifferentMethod(self):
"""
L{DigestCredentialFactory.decode} returns an L{IUsernameHashedPassword}
provider which rejects a correct password for the given user if the
challenge response request is made using a different HTTP method than
was used to request the initial challenge.
"""
challenge = self.credentialFactory.getChallenge(self.clientAddress.host)
nc = "00000001"
clientResponse = self.formatResponse(
nonce=challenge['nonce'],
response=self.getDigestResponse(challenge, nc),
nc=nc,
opaque=challenge['opaque'])
creds = self.credentialFactory.decode(clientResponse, 'POST',
self.clientAddress.host)
self.assertFalse(creds.checkPassword(self.password))
self.assertFalse(creds.checkPassword(self.password + 'wrong'))
def test_noUsername(self):
"""
L{DigestCredentialFactory.decode} raises L{LoginFailed} if the response
has no username field or if the username field is empty.
"""
# Check for no username
e = self.assertRaises(
LoginFailed,
self.credentialFactory.decode,
self.formatResponse(username=None),
self.method, self.clientAddress.host)
self.assertEqual(str(e), "Invalid response, no username given.")
# Check for an empty username
e = self.assertRaises(
LoginFailed,
self.credentialFactory.decode,
self.formatResponse(username=""),
self.method, self.clientAddress.host)
self.assertEqual(str(e), "Invalid response, no username given.")
def test_noNonce(self):
"""
L{DigestCredentialFactory.decode} raises L{LoginFailed} if the response
has no nonce.
"""
e = self.assertRaises(
LoginFailed,
self.credentialFactory.decode,
self.formatResponse(opaque="abc123"),
self.method, self.clientAddress.host)
self.assertEqual(str(e), "Invalid response, no nonce given.")
def test_noOpaque(self):
"""
L{DigestCredentialFactory.decode} raises L{LoginFailed} if the response
has no opaque.
"""
e = self.assertRaises(
LoginFailed,
self.credentialFactory.decode,
self.formatResponse(),
self.method, self.clientAddress.host)
self.assertEqual(str(e), "Invalid response, no opaque given.")
def test_checkHash(self):
"""
L{DigestCredentialFactory.decode} returns an L{IUsernameDigestHash}
provider which can verify a hash of the form 'username:realm:password'.
"""
challenge = self.credentialFactory.getChallenge(self.clientAddress.host)
nc = "00000001"
clientResponse = self.formatResponse(
nonce=challenge['nonce'],
response=self.getDigestResponse(challenge, nc),
nc=nc,
opaque=challenge['opaque'])
creds = self.credentialFactory.decode(clientResponse, self.method,
self.clientAddress.host)
self.assertTrue(verifyObject(IUsernameDigestHash, creds))
cleartext = '%s:%s:%s' % (self.username, self.realm, self.password)
hash = md5(cleartext)
self.assertTrue(creds.checkHash(hash.hexdigest()))
hash.update('wrong')
self.assertFalse(creds.checkHash(hash.hexdigest()))
def test_invalidOpaque(self):
"""
L{DigestCredentialFactory.decode} raises L{LoginFailed} when the opaque
value does not contain all the required parts.
"""
credentialFactory = FakeDigestCredentialFactory(self.algorithm,
self.realm)
challenge = credentialFactory.getChallenge(self.clientAddress.host)
exc = self.assertRaises(
LoginFailed,
credentialFactory._verifyOpaque,
'badOpaque',
challenge['nonce'],
self.clientAddress.host)
self.assertEqual(str(exc), 'Invalid response, invalid opaque value')
badOpaque = 'foo-' + b64encode('nonce,clientip')
exc = self.assertRaises(
LoginFailed,
credentialFactory._verifyOpaque,
badOpaque,
challenge['nonce'],
self.clientAddress.host)
self.assertEqual(str(exc), 'Invalid response, invalid opaque value')
exc = self.assertRaises(
LoginFailed,
credentialFactory._verifyOpaque,
'',
challenge['nonce'],
self.clientAddress.host)
self.assertEqual(str(exc), 'Invalid response, invalid opaque value')
badOpaque = (
'foo-' + b64encode('%s,%s,foobar' % (
challenge['nonce'],
self.clientAddress.host)))
exc = self.assertRaises(
LoginFailed,
credentialFactory._verifyOpaque,
badOpaque,
challenge['nonce'],
self.clientAddress.host)
self.assertEqual(
str(exc), 'Invalid response, invalid opaque/time values')
def test_incompatibleNonce(self):
"""
L{DigestCredentialFactory.decode} raises L{LoginFailed} when the given
nonce from the response does not match the nonce encoded in the opaque.
"""
credentialFactory = FakeDigestCredentialFactory(self.algorithm, self.realm)
challenge = credentialFactory.getChallenge(self.clientAddress.host)
badNonceOpaque = credentialFactory._generateOpaque(
'1234567890',
self.clientAddress.host)
exc = self.assertRaises(
LoginFailed,
credentialFactory._verifyOpaque,
badNonceOpaque,
challenge['nonce'],
self.clientAddress.host)
self.assertEqual(
str(exc),
'Invalid response, incompatible opaque/nonce values')
exc = self.assertRaises(
LoginFailed,
credentialFactory._verifyOpaque,
badNonceOpaque,
'',
self.clientAddress.host)
self.assertEqual(
str(exc),
'Invalid response, incompatible opaque/nonce values')
def test_incompatibleClientIP(self):
"""
L{DigestCredentialFactory.decode} raises L{LoginFailed} when the
request comes from a client IP other than what is encoded in the
opaque.
"""
credentialFactory = FakeDigestCredentialFactory(self.algorithm, self.realm)
challenge = credentialFactory.getChallenge(self.clientAddress.host)
badAddress = '10.0.0.1'
# Sanity check
self.assertNotEqual(self.clientAddress.host, badAddress)
badNonceOpaque = credentialFactory._generateOpaque(
challenge['nonce'], badAddress)
self.assertRaises(
LoginFailed,
credentialFactory._verifyOpaque,
badNonceOpaque,
challenge['nonce'],
self.clientAddress.host)
def test_oldNonce(self):
"""
L{DigestCredentialFactory.decode} raises L{LoginFailed} when the given
opaque is older than C{DigestCredentialFactory.CHALLENGE_LIFETIME_SECS}
"""
credentialFactory = FakeDigestCredentialFactory(self.algorithm,
self.realm)
challenge = credentialFactory.getChallenge(self.clientAddress.host)
key = '%s,%s,%s' % (challenge['nonce'],
self.clientAddress.host,
'-137876876')
digest = md5(key + credentialFactory.privateKey).hexdigest()
ekey = b64encode(key)
oldNonceOpaque = '%s-%s' % (digest, ekey.strip('\n'))
self.assertRaises(
LoginFailed,
credentialFactory._verifyOpaque,
oldNonceOpaque,
challenge['nonce'],
self.clientAddress.host)
def test_mismatchedOpaqueChecksum(self):
"""
L{DigestCredentialFactory.decode} raises L{LoginFailed} when the opaque
checksum fails verification.
"""
credentialFactory = FakeDigestCredentialFactory(self.algorithm,
self.realm)
challenge = credentialFactory.getChallenge(self.clientAddress.host)
key = '%s,%s,%s' % (challenge['nonce'],
self.clientAddress.host,
'0')
digest = md5(key + 'this is not the right pkey').hexdigest()
badChecksum = '%s-%s' % (digest, b64encode(key))
self.assertRaises(
LoginFailed,
credentialFactory._verifyOpaque,
badChecksum,
challenge['nonce'],
self.clientAddress.host)
def test_incompatibleCalcHA1Options(self):
"""
L{calcHA1} raises L{TypeError} when any of the pszUsername, pszRealm,
or pszPassword arguments are specified with the preHA1 keyword
argument.
"""
arguments = (
("user", "realm", "password", "preHA1"),
(None, "realm", None, "preHA1"),
(None, None, "password", "preHA1"),
)
for pszUsername, pszRealm, pszPassword, preHA1 in arguments:
self.assertRaises(
TypeError,
calcHA1,
"md5",
pszUsername,
pszRealm,
pszPassword,
"nonce",
"cnonce",
preHA1=preHA1)
def test_noNewlineOpaque(self):
"""
L{DigestCredentialFactory._generateOpaque} returns a value without
newlines, regardless of the length of the nonce.
"""
opaque = self.credentialFactory._generateOpaque(
"long nonce " * 10, None)
self.assertNotIn('\n', opaque)
|
agpl-3.0
| -2,125,291,549,392,893,000 | 2,792,995,141,737,368,600 | 33.962742 | 83 | 0.596377 | false |
kashyap32/scrapy
|
tests/test_webclient.py
|
112
|
12875
|
"""
from twisted.internet import defer
Tests borrowed from the twisted.web.client tests.
"""
import os
from six.moves.urllib.parse import urlparse
from twisted.trial import unittest
from twisted.web import server, static, error, util
from twisted.internet import reactor, defer
from twisted.test.proto_helpers import StringTransport
from twisted.python.filepath import FilePath
from twisted.protocols.policies import WrappingFactory
from scrapy.core.downloader import webclient as client
from scrapy.http import Request, Headers
def getPage(url, contextFactory=None, *args, **kwargs):
"""Adapted version of twisted.web.client.getPage"""
def _clientfactory(*args, **kwargs):
timeout = kwargs.pop('timeout', 0)
f = client.ScrapyHTTPClientFactory(Request(*args, **kwargs), timeout=timeout)
f.deferred.addCallback(lambda r: r.body)
return f
from twisted.web.client import _makeGetterFactory
return _makeGetterFactory(url, _clientfactory,
contextFactory=contextFactory, *args, **kwargs).deferred
class ParseUrlTestCase(unittest.TestCase):
"""Test URL parsing facility and defaults values."""
def _parse(self, url):
f = client.ScrapyHTTPClientFactory(Request(url))
return (f.scheme, f.netloc, f.host, f.port, f.path)
def testParse(self):
lip = '127.0.0.1'
tests = (
("http://127.0.0.1?c=v&c2=v2#fragment", ('http', lip, lip, 80, '/?c=v&c2=v2')),
("http://127.0.0.1/?c=v&c2=v2#fragment", ('http', lip, lip, 80, '/?c=v&c2=v2')),
("http://127.0.0.1/foo?c=v&c2=v2#frag", ('http', lip, lip, 80, '/foo?c=v&c2=v2')),
("http://127.0.0.1:100?c=v&c2=v2#fragment", ('http', lip+':100', lip, 100, '/?c=v&c2=v2')),
("http://127.0.0.1:100/?c=v&c2=v2#frag", ('http', lip+':100', lip, 100, '/?c=v&c2=v2')),
("http://127.0.0.1:100/foo?c=v&c2=v2#frag", ('http', lip+':100', lip, 100, '/foo?c=v&c2=v2')),
("http://127.0.0.1", ('http', lip, lip, 80, '/')),
("http://127.0.0.1/", ('http', lip, lip, 80, '/')),
("http://127.0.0.1/foo", ('http', lip, lip, 80, '/foo')),
("http://127.0.0.1?param=value", ('http', lip, lip, 80, '/?param=value')),
("http://127.0.0.1/?param=value", ('http', lip, lip, 80, '/?param=value')),
("http://127.0.0.1:12345/foo", ('http', lip+':12345', lip, 12345, '/foo')),
("http://spam:12345/foo", ('http', 'spam:12345', 'spam', 12345, '/foo')),
("http://spam.test.org/foo", ('http', 'spam.test.org', 'spam.test.org', 80, '/foo')),
("https://127.0.0.1/foo", ('https', lip, lip, 443, '/foo')),
("https://127.0.0.1/?param=value", ('https', lip, lip, 443, '/?param=value')),
("https://127.0.0.1:12345/", ('https', lip+':12345', lip, 12345, '/')),
("http://scrapytest.org/foo ", ('http', 'scrapytest.org', 'scrapytest.org', 80, '/foo')),
("http://egg:7890 ", ('http', 'egg:7890', 'egg', 7890, '/')),
)
for url, test in tests:
self.assertEquals(client._parse(url), test, url)
def test_externalUnicodeInterference(self):
"""
L{client._parse} should return C{str} for the scheme, host, and path
elements of its return tuple, even when passed an URL which has
previously been passed to L{urlparse} as a C{unicode} string.
"""
badInput = u'http://example.com/path'
goodInput = badInput.encode('ascii')
urlparse(badInput)
scheme, netloc, host, port, path = self._parse(goodInput)
self.assertTrue(isinstance(scheme, str))
self.assertTrue(isinstance(netloc, str))
self.assertTrue(isinstance(host, str))
self.assertTrue(isinstance(path, str))
self.assertTrue(isinstance(port, int))
class ScrapyHTTPPageGetterTests(unittest.TestCase):
def test_earlyHeaders(self):
# basic test stolen from twisted HTTPageGetter
factory = client.ScrapyHTTPClientFactory(Request(
url='http://foo/bar',
body="some data",
headers={
'Host': 'example.net',
'User-Agent': 'fooble',
'Cookie': 'blah blah',
'Content-Length': '12981',
'Useful': 'value'}))
self._test(factory,
"GET /bar HTTP/1.0\r\n"
"Content-Length: 9\r\n"
"Useful: value\r\n"
"Connection: close\r\n"
"User-Agent: fooble\r\n"
"Host: example.net\r\n"
"Cookie: blah blah\r\n"
"\r\n"
"some data")
# test minimal sent headers
factory = client.ScrapyHTTPClientFactory(Request('http://foo/bar'))
self._test(factory,
"GET /bar HTTP/1.0\r\n"
"Host: foo\r\n"
"\r\n")
# test a simple POST with body and content-type
factory = client.ScrapyHTTPClientFactory(Request(
method='POST',
url='http://foo/bar',
body='name=value',
headers={'Content-Type': 'application/x-www-form-urlencoded'}))
self._test(factory,
"POST /bar HTTP/1.0\r\n"
"Host: foo\r\n"
"Connection: close\r\n"
"Content-Type: application/x-www-form-urlencoded\r\n"
"Content-Length: 10\r\n"
"\r\n"
"name=value")
# test a POST method with no body provided
factory = client.ScrapyHTTPClientFactory(Request(
method='POST',
url='http://foo/bar'
))
self._test(factory,
"POST /bar HTTP/1.0\r\n"
"Host: foo\r\n"
"Content-Length: 0\r\n"
"\r\n")
# test with single and multivalued headers
factory = client.ScrapyHTTPClientFactory(Request(
url='http://foo/bar',
headers={
'X-Meta-Single': 'single',
'X-Meta-Multivalued': ['value1', 'value2'],
}))
self._test(factory,
"GET /bar HTTP/1.0\r\n"
"Host: foo\r\n"
"X-Meta-Multivalued: value1\r\n"
"X-Meta-Multivalued: value2\r\n"
"X-Meta-Single: single\r\n"
"\r\n")
# same test with single and multivalued headers but using Headers class
factory = client.ScrapyHTTPClientFactory(Request(
url='http://foo/bar',
headers=Headers({
'X-Meta-Single': 'single',
'X-Meta-Multivalued': ['value1', 'value2'],
})))
self._test(factory,
"GET /bar HTTP/1.0\r\n"
"Host: foo\r\n"
"X-Meta-Multivalued: value1\r\n"
"X-Meta-Multivalued: value2\r\n"
"X-Meta-Single: single\r\n"
"\r\n")
def _test(self, factory, testvalue):
transport = StringTransport()
protocol = client.ScrapyHTTPPageGetter()
protocol.factory = factory
protocol.makeConnection(transport)
self.assertEqual(
set(transport.value().splitlines()),
set(testvalue.splitlines()))
return testvalue
def test_non_standard_line_endings(self):
# regression test for: http://dev.scrapy.org/ticket/258
factory = client.ScrapyHTTPClientFactory(Request(
url='http://foo/bar'))
protocol = client.ScrapyHTTPPageGetter()
protocol.factory = factory
protocol.headers = Headers()
protocol.dataReceived("HTTP/1.0 200 OK\n")
protocol.dataReceived("Hello: World\n")
protocol.dataReceived("Foo: Bar\n")
protocol.dataReceived("\n")
self.assertEqual(protocol.headers,
Headers({'Hello': ['World'], 'Foo': ['Bar']}))
from twisted.web.test.test_webclient import ForeverTakingResource, \
ErrorResource, NoLengthResource, HostHeaderResource, \
PayloadResource, BrokenDownloadResource
class WebClientTestCase(unittest.TestCase):
def _listen(self, site):
return reactor.listenTCP(0, site, interface="127.0.0.1")
def setUp(self):
name = self.mktemp()
os.mkdir(name)
FilePath(name).child("file").setContent("0123456789")
r = static.File(name)
r.putChild("redirect", util.Redirect("/file"))
r.putChild("wait", ForeverTakingResource())
r.putChild("error", ErrorResource())
r.putChild("nolength", NoLengthResource())
r.putChild("host", HostHeaderResource())
r.putChild("payload", PayloadResource())
r.putChild("broken", BrokenDownloadResource())
self.site = server.Site(r, timeout=None)
self.wrapper = WrappingFactory(self.site)
self.port = self._listen(self.wrapper)
self.portno = self.port.getHost().port
def tearDown(self):
return self.port.stopListening()
def getURL(self, path):
return "http://127.0.0.1:%d/%s" % (self.portno, path)
def testPayload(self):
s = "0123456789" * 10
return getPage(self.getURL("payload"), body=s).addCallback(self.assertEquals, s)
def testHostHeader(self):
# if we pass Host header explicitly, it should be used, otherwise
# it should extract from url
return defer.gatherResults([
getPage(self.getURL("host")).addCallback(self.assertEquals, "127.0.0.1:%d" % self.portno),
getPage(self.getURL("host"), headers={"Host": "www.example.com"}).addCallback(self.assertEquals, "www.example.com")])
def test_getPage(self):
"""
L{client.getPage} returns a L{Deferred} which is called back with
the body of the response if the default method B{GET} is used.
"""
d = getPage(self.getURL("file"))
d.addCallback(self.assertEquals, "0123456789")
return d
def test_getPageHead(self):
"""
L{client.getPage} returns a L{Deferred} which is called back with
the empty string if the method is C{HEAD} and there is a successful
response code.
"""
def _getPage(method):
return getPage(self.getURL("file"), method=method)
return defer.gatherResults([
_getPage("head").addCallback(self.assertEqual, ""),
_getPage("HEAD").addCallback(self.assertEqual, "")])
def test_timeoutNotTriggering(self):
"""
When a non-zero timeout is passed to L{getPage} and the page is
retrieved before the timeout period elapses, the L{Deferred} is
called back with the contents of the page.
"""
d = getPage(self.getURL("host"), timeout=100)
d.addCallback(self.assertEquals, "127.0.0.1:%d" % self.portno)
return d
def test_timeoutTriggering(self):
"""
When a non-zero timeout is passed to L{getPage} and that many
seconds elapse before the server responds to the request. the
L{Deferred} is errbacked with a L{error.TimeoutError}.
"""
finished = self.assertFailure(
getPage(self.getURL("wait"), timeout=0.000001),
defer.TimeoutError)
def cleanup(passthrough):
# Clean up the server which is hanging around not doing
# anything.
connected = self.wrapper.protocols.keys()
# There might be nothing here if the server managed to already see
# that the connection was lost.
if connected:
connected[0].transport.loseConnection()
return passthrough
finished.addBoth(cleanup)
return finished
def testNotFound(self):
return getPage(self.getURL('notsuchfile')).addCallback(self._cbNoSuchFile)
def _cbNoSuchFile(self, pageData):
self.assert_('404 - No Such Resource' in pageData)
def testFactoryInfo(self):
url = self.getURL('file')
scheme, netloc, host, port, path = client._parse(url)
factory = client.ScrapyHTTPClientFactory(Request(url))
reactor.connectTCP(host, port, factory)
return factory.deferred.addCallback(self._cbFactoryInfo, factory)
def _cbFactoryInfo(self, ignoredResult, factory):
self.assertEquals(factory.status, '200')
self.assert_(factory.version.startswith('HTTP/'))
self.assertEquals(factory.message, 'OK')
self.assertEquals(factory.response_headers['content-length'], '10')
def testRedirect(self):
return getPage(self.getURL("redirect")).addCallback(self._cbRedirect)
def _cbRedirect(self, pageData):
self.assertEquals(pageData,
'\n<html>\n <head>\n <meta http-equiv="refresh" content="0;URL=/file">\n'
' </head>\n <body bgcolor="#FFFFFF" text="#000000">\n '
'<a href="/file">click here</a>\n </body>\n</html>\n')
|
bsd-3-clause
| -7,281,295,063,826,596,000 | -1,761,567,311,732,763,000 | 38.253049 | 129 | 0.583068 | false |
javierTerry/PyGithub
|
github/Legacy.py
|
72
|
7248
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Steve English <[email protected]> #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import urlparse
import github.PaginatedList
class PaginatedList(github.PaginatedList.PaginatedListBase):
def __init__(self, url, args, requester, key, convert, contentClass):
github.PaginatedList.PaginatedListBase.__init__(self)
self.__url = url
self.__args = args
self.__requester = requester
self.__key = key
self.__convert = convert
self.__contentClass = contentClass
self.__nextPage = 0
self.__continue = True
def _couldGrow(self):
return self.__continue
def _fetchNextPage(self):
page = self.__nextPage
self.__nextPage += 1
return self.get_page(page)
def get_page(self, page):
assert isinstance(page, (int, long)), page
args = dict(self.__args)
if page != 0:
args["start_page"] = page + 1
headers, data = self.__requester.requestJsonAndCheck(
"GET",
self.__url,
parameters=args
)
self.__continue = len(data[self.__key]) > 0
return [
self.__contentClass(self.__requester, headers, self.__convert(element), completed=False)
for element in data[self.__key]
]
def convertUser(attributes):
convertedAttributes = {
"login": attributes["login"],
"url": "/users/" + attributes["login"],
}
if "gravatar_id" in attributes: # pragma no branch
convertedAttributes["gravatar_id"] = attributes["gravatar_id"]
if "followers" in attributes: # pragma no branch
convertedAttributes["followers"] = attributes["followers"]
if "repos" in attributes: # pragma no branch
convertedAttributes["public_repos"] = attributes["repos"]
if "name" in attributes: # pragma no branch
convertedAttributes["name"] = attributes["name"]
if "created_at" in attributes: # pragma no branch
convertedAttributes["created_at"] = attributes["created_at"]
if "location" in attributes: # pragma no branch
convertedAttributes["location"] = attributes["location"]
return convertedAttributes
def convertRepo(attributes):
convertedAttributes = {
"owner": {"login": attributes["owner"], "url": "/users/" + attributes["owner"]},
"url": "/repos/" + attributes["owner"] + "/" + attributes["name"],
}
if "pushed_at" in attributes: # pragma no branch
convertedAttributes["pushed_at"] = attributes["pushed_at"]
if "homepage" in attributes: # pragma no branch
convertedAttributes["homepage"] = attributes["homepage"]
if "created_at" in attributes: # pragma no branch
convertedAttributes["created_at"] = attributes["created_at"]
if "watchers" in attributes: # pragma no branch
convertedAttributes["watchers"] = attributes["watchers"]
if "has_downloads" in attributes: # pragma no branch
convertedAttributes["has_downloads"] = attributes["has_downloads"]
if "fork" in attributes: # pragma no branch
convertedAttributes["fork"] = attributes["fork"]
if "has_issues" in attributes: # pragma no branch
convertedAttributes["has_issues"] = attributes["has_issues"]
if "has_wiki" in attributes: # pragma no branch
convertedAttributes["has_wiki"] = attributes["has_wiki"]
if "forks" in attributes: # pragma no branch
convertedAttributes["forks"] = attributes["forks"]
if "size" in attributes: # pragma no branch
convertedAttributes["size"] = attributes["size"]
if "private" in attributes: # pragma no branch
convertedAttributes["private"] = attributes["private"]
if "open_issues" in attributes: # pragma no branch
convertedAttributes["open_issues"] = attributes["open_issues"]
if "description" in attributes: # pragma no branch
convertedAttributes["description"] = attributes["description"]
if "language" in attributes: # pragma no branch
convertedAttributes["language"] = attributes["language"]
if "name" in attributes: # pragma no branch
convertedAttributes["name"] = attributes["name"]
return convertedAttributes
def convertIssue(attributes):
convertedAttributes = {
"number": attributes["number"],
"url": "/repos" + urlparse.urlparse(attributes["html_url"]).path,
"user": {"login": attributes["user"], "url": "/users/" + attributes["user"]},
}
if "labels" in attributes: # pragma no branch
convertedAttributes["labels"] = [{"name": label} for label in attributes["labels"]]
if "title" in attributes: # pragma no branch
convertedAttributes["title"] = attributes["title"]
if "created_at" in attributes: # pragma no branch
convertedAttributes["created_at"] = attributes["created_at"]
if "comments" in attributes: # pragma no branch
convertedAttributes["comments"] = attributes["comments"]
if "body" in attributes: # pragma no branch
convertedAttributes["body"] = attributes["body"]
if "updated_at" in attributes: # pragma no branch
convertedAttributes["updated_at"] = attributes["updated_at"]
if "state" in attributes: # pragma no branch
convertedAttributes["state"] = attributes["state"]
return convertedAttributes
|
gpl-3.0
| -4,813,327,838,354,566,000 | 5,445,570,949,751,496,000 | 47.644295 | 100 | 0.575745 | false |
350dotorg/Django
|
django/contrib/redirects/middleware.py
|
447
|
1105
|
from django.contrib.redirects.models import Redirect
from django import http
from django.conf import settings
class RedirectFallbackMiddleware(object):
def process_response(self, request, response):
if response.status_code != 404:
return response # No need to check for a redirect for non-404 responses.
path = request.get_full_path()
try:
r = Redirect.objects.get(site__id__exact=settings.SITE_ID, old_path=path)
except Redirect.DoesNotExist:
r = None
if r is None and settings.APPEND_SLASH:
# Try removing the trailing slash.
try:
r = Redirect.objects.get(site__id__exact=settings.SITE_ID,
old_path=path[:path.rfind('/')]+path[path.rfind('/')+1:])
except Redirect.DoesNotExist:
pass
if r is not None:
if r.new_path == '':
return http.HttpResponseGone()
return http.HttpResponsePermanentRedirect(r.new_path)
# No redirect was found. Return the response.
return response
|
bsd-3-clause
| -1,676,129,636,543,134,200 | -565,424,783,123,447,100 | 39.925926 | 85 | 0.608145 | false |
quamilek/django-custard
|
custard/tests/settings.py
|
3
|
1394
|
# Django settings for testproject project.
import os
DIRNAME = os.path.dirname(__file__)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = True
ADMINS = ()
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(DIRNAME, 'db.sqlite3'),
'TEST_NAME': os.path.join(DIRNAME, 'test_db.sqlite3'),
}
}
TIME_ZONE = 'Europe/Rome'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = ''
MEDIA_URL = ''
SECRET_KEY = 'vaO4Y<g#YRWG8;Md8noiLp>.w(w~q_b=|1`?9<x>0KxA%UB!63'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'custard.tests.urls'
TEMPLATE_DIRS = ()
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'custard',
'custard.tests',
)
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
STATIC_URL = '/static/'
|
mit
| -2,381,574,024,016,503,000 | 4,674,000,678,327,507,000 | 21.852459 | 65 | 0.686514 | false |
Xeralux/tensorflow
|
tensorflow/contrib/metrics/python/metrics/classification.py
|
111
|
2647
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classification metrics library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# TODO(nsilberman): move into metrics/python/ops/
def accuracy(predictions, labels, weights=None, name=None):
"""Computes the percentage of times that predictions matches labels.
Args:
predictions: the predicted values, a `Tensor` whose dtype and shape
matches 'labels'.
labels: the ground truth values, a `Tensor` of any shape and
bool, integer, or string dtype.
weights: None or `Tensor` of float values to reweight the accuracy.
name: A name for the operation (optional).
Returns:
Accuracy `Tensor`.
Raises:
ValueError: if dtypes don't match or
if dtype is not bool, integer, or string.
"""
if not (labels.dtype.is_integer or
labels.dtype in (dtypes.bool, dtypes.string)):
raise ValueError(
'Labels should have bool, integer, or string dtype, not %r' %
labels.dtype)
if not labels.dtype.is_compatible_with(predictions.dtype):
raise ValueError('Dtypes of predictions and labels should match. '
'Given: predictions (%r) and labels (%r)' %
(predictions.dtype, labels.dtype))
with ops.name_scope(name, 'accuracy', values=[predictions, labels]):
is_correct = math_ops.cast(
math_ops.equal(predictions, labels), dtypes.float32)
if weights is not None:
is_correct = math_ops.multiply(is_correct, weights)
num_values = math_ops.multiply(weights, array_ops.ones_like(is_correct))
return math_ops.div(math_ops.reduce_sum(is_correct),
math_ops.reduce_sum(num_values))
return math_ops.reduce_mean(is_correct)
|
apache-2.0
| 651,643,013,983,662,800 | -8,133,499,863,354,866,000 | 40.359375 | 80 | 0.678504 | false |
groschovskiy/personfinder
|
app/pytz/zoneinfo/America/Winnipeg.py
|
9
|
8004
|
'''tzinfo timezone information for America/Winnipeg.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Winnipeg(DstTzInfo):
'''America/Winnipeg timezone definition. See datetime.tzinfo for details'''
zone = 'America/Winnipeg'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1916,4,23,6,0,0),
d(1916,9,17,5,0,0),
d(1918,4,14,8,0,0),
d(1918,10,31,7,0,0),
d(1937,5,16,8,0,0),
d(1937,9,26,7,0,0),
d(1942,2,9,8,0,0),
d(1945,8,14,23,0,0),
d(1945,9,30,7,0,0),
d(1946,5,12,8,0,0),
d(1946,10,13,7,0,0),
d(1947,4,27,8,0,0),
d(1947,9,28,7,0,0),
d(1948,4,25,8,0,0),
d(1948,9,26,7,0,0),
d(1949,4,24,8,0,0),
d(1949,9,25,7,0,0),
d(1950,5,1,8,0,0),
d(1950,9,30,7,0,0),
d(1951,4,29,8,0,0),
d(1951,9,30,7,0,0),
d(1952,4,27,8,0,0),
d(1952,9,28,7,0,0),
d(1953,4,26,8,0,0),
d(1953,9,27,7,0,0),
d(1954,4,25,8,0,0),
d(1954,9,26,7,0,0),
d(1955,4,24,8,0,0),
d(1955,9,25,7,0,0),
d(1956,4,29,8,0,0),
d(1956,9,30,7,0,0),
d(1957,4,28,8,0,0),
d(1957,9,29,7,0,0),
d(1958,4,27,8,0,0),
d(1958,9,28,7,0,0),
d(1959,4,26,8,0,0),
d(1959,10,25,7,0,0),
d(1960,4,24,8,0,0),
d(1960,9,25,7,0,0),
d(1963,4,28,8,0,0),
d(1963,9,22,7,0,0),
d(1966,4,24,8,0,0),
d(1966,10,30,8,0,0),
d(1967,4,30,8,0,0),
d(1967,10,29,8,0,0),
d(1968,4,28,8,0,0),
d(1968,10,27,8,0,0),
d(1969,4,27,8,0,0),
d(1969,10,26,8,0,0),
d(1970,4,26,8,0,0),
d(1970,10,25,8,0,0),
d(1971,4,25,8,0,0),
d(1971,10,31,8,0,0),
d(1972,4,30,8,0,0),
d(1972,10,29,8,0,0),
d(1973,4,29,8,0,0),
d(1973,10,28,8,0,0),
d(1974,4,28,8,0,0),
d(1974,10,27,8,0,0),
d(1975,4,27,8,0,0),
d(1975,10,26,8,0,0),
d(1976,4,25,8,0,0),
d(1976,10,31,8,0,0),
d(1977,4,24,8,0,0),
d(1977,10,30,8,0,0),
d(1978,4,30,8,0,0),
d(1978,10,29,8,0,0),
d(1979,4,29,8,0,0),
d(1979,10,28,8,0,0),
d(1980,4,27,8,0,0),
d(1980,10,26,8,0,0),
d(1981,4,26,8,0,0),
d(1981,10,25,8,0,0),
d(1982,4,25,8,0,0),
d(1982,10,31,8,0,0),
d(1983,4,24,8,0,0),
d(1983,10,30,8,0,0),
d(1984,4,29,8,0,0),
d(1984,10,28,8,0,0),
d(1985,4,28,8,0,0),
d(1985,10,27,8,0,0),
d(1986,4,27,8,0,0),
d(1986,10,26,8,0,0),
d(1987,4,5,8,0,0),
d(1987,10,25,8,0,0),
d(1988,4,3,8,0,0),
d(1988,10,30,8,0,0),
d(1989,4,2,8,0,0),
d(1989,10,29,8,0,0),
d(1990,4,1,8,0,0),
d(1990,10,28,8,0,0),
d(1991,4,7,8,0,0),
d(1991,10,27,8,0,0),
d(1992,4,5,8,0,0),
d(1992,10,25,8,0,0),
d(1993,4,4,8,0,0),
d(1993,10,31,8,0,0),
d(1994,4,3,8,0,0),
d(1994,10,30,8,0,0),
d(1995,4,2,8,0,0),
d(1995,10,29,8,0,0),
d(1996,4,7,8,0,0),
d(1996,10,27,8,0,0),
d(1997,4,6,8,0,0),
d(1997,10,26,8,0,0),
d(1998,4,5,8,0,0),
d(1998,10,25,8,0,0),
d(1999,4,4,8,0,0),
d(1999,10,31,8,0,0),
d(2000,4,2,8,0,0),
d(2000,10,29,8,0,0),
d(2001,4,1,8,0,0),
d(2001,10,28,8,0,0),
d(2002,4,7,8,0,0),
d(2002,10,27,8,0,0),
d(2003,4,6,8,0,0),
d(2003,10,26,8,0,0),
d(2004,4,4,8,0,0),
d(2004,10,31,8,0,0),
d(2005,4,3,8,0,0),
d(2005,10,30,8,0,0),
d(2006,1,1,6,0,0),
d(2006,4,2,8,0,0),
d(2006,10,29,7,0,0),
d(2007,3,11,8,0,0),
d(2007,11,4,7,0,0),
d(2008,3,9,8,0,0),
d(2008,11,2,7,0,0),
d(2009,3,8,8,0,0),
d(2009,11,1,7,0,0),
d(2010,3,14,8,0,0),
d(2010,11,7,7,0,0),
d(2011,3,13,8,0,0),
d(2011,11,6,7,0,0),
d(2012,3,11,8,0,0),
d(2012,11,4,7,0,0),
d(2013,3,10,8,0,0),
d(2013,11,3,7,0,0),
d(2014,3,9,8,0,0),
d(2014,11,2,7,0,0),
d(2015,3,8,8,0,0),
d(2015,11,1,7,0,0),
d(2016,3,13,8,0,0),
d(2016,11,6,7,0,0),
d(2017,3,12,8,0,0),
d(2017,11,5,7,0,0),
d(2018,3,11,8,0,0),
d(2018,11,4,7,0,0),
d(2019,3,10,8,0,0),
d(2019,11,3,7,0,0),
d(2020,3,8,8,0,0),
d(2020,11,1,7,0,0),
d(2021,3,14,8,0,0),
d(2021,11,7,7,0,0),
d(2022,3,13,8,0,0),
d(2022,11,6,7,0,0),
d(2023,3,12,8,0,0),
d(2023,11,5,7,0,0),
d(2024,3,10,8,0,0),
d(2024,11,3,7,0,0),
d(2025,3,9,8,0,0),
d(2025,11,2,7,0,0),
d(2026,3,8,8,0,0),
d(2026,11,1,7,0,0),
d(2027,3,14,8,0,0),
d(2027,11,7,7,0,0),
d(2028,3,12,8,0,0),
d(2028,11,5,7,0,0),
d(2029,3,11,8,0,0),
d(2029,11,4,7,0,0),
d(2030,3,10,8,0,0),
d(2030,11,3,7,0,0),
d(2031,3,9,8,0,0),
d(2031,11,2,7,0,0),
d(2032,3,14,8,0,0),
d(2032,11,7,7,0,0),
d(2033,3,13,8,0,0),
d(2033,11,6,7,0,0),
d(2034,3,12,8,0,0),
d(2034,11,5,7,0,0),
d(2035,3,11,8,0,0),
d(2035,11,4,7,0,0),
d(2036,3,9,8,0,0),
d(2036,11,2,7,0,0),
d(2037,3,8,8,0,0),
d(2037,11,1,7,0,0),
]
_transition_info = [
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CWT'),
i(-18000,3600,'CPT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
]
Winnipeg = Winnipeg()
|
apache-2.0
| 1,011,624,977,888,427,100 | -3,919,709,658,295,088,000 | 19.418367 | 79 | 0.563843 | false |
laszlokiraly/OffenesParlament
|
offenesparlament/op_scraper/scraper/parlament/spiders/administrations.py
|
2
|
6360
|
# -*- coding: utf-8 -*-
import scrapy
from ansicolor import red
from ansicolor import cyan
from ansicolor import green
from ansicolor import blue
from django.db.models import Q
from urllib import urlencode
from parlament.settings import BASE_HOST
from parlament.spiders.persons import PersonsSpider
from parlament.resources.extractors.law import *
from parlament.resources.extractors.prelaw import *
from parlament.resources.extractors.person import *
from parlament.resources.extractors.opinion import *
from parlament.resources.extractors.administration import *
from op_scraper.models import Person
from op_scraper.models import Function
from op_scraper.models import Mandate
from op_scraper.models import Administration
from op_scraper.models import LegislativePeriod
class AdministrationsSpider(PersonsSpider):
BASE_URL = "{}/{}".format(BASE_HOST, "WWER/BREG/REG/filter.psp")
URLOPTIONS_ADMIN = {
'jsMode': '',
'xdocumentUri': '/WWER/BREG/REG/index.shtml',
'REG': '0',
'anwenden': 'Anwenden',
'FUNK': 'ALLE',
'RESS': 'ALLE',
'SUCH': '',
'listeId': '16',
'FBEZ': 'FW_016',
'pageNumber': '',
}
LLP = []
name = "administrations"
title = "Administrations (Regierungen) Spider"
persons_scraped = []
def __init__(self, **kw):
super(AdministrationsSpider, self).__init__(**kw)
self.start_urls = self.get_urls()
self.cookies_seen = set()
self.idlist = {}
#self.print_debug()
def get_urls(self):
"""
Overwritten from BaseSpider for non-LLP-based retrieval
"""
urls = []
url_options = urlencode(self.URLOPTIONS_ADMIN)
url = "{}?{}".format(self.BASE_URL, url_options)
urls.append(url)
return urls
def parse(self, response):
persons = ADMINISTRATION.LIST.xt(response)
callback_requests = []
self.logger.info(
"Scraping {} persons".format(len(persons)))
# Iterate all persons
for p in persons:
# Extract basic data
parl_id = p['source_link'].split('/')[-2]
p['source_link'] = "{}{}".format(BASE_HOST, p['source_link'])
# Create or update simple person's item
person_data = {
'reversed_name': p['reversed_name']
}
person_item, created_person = Person.objects.update_or_create(
source_link=p['source_link'],
parl_id=parl_id,
defaults=person_data
)
if created_person:
self.logger.debug(u"Created Person {}".format(
green(u'[{}]'.format(p['reversed_name']))))
else:
self.logger.debug(u"Updated Person {}".format(
green(u"[{}]".format(p['reversed_name']))
))
mandate = p['mandate']
administration_item = self.get_administration_item(mandate)
function_item, f_created = Function.objects.get_or_create(
short=mandate['short'],
title=mandate['title'])
if f_created:
self.logger.debug(u"Created function {}".format(
green(u'[{}]'.format(function_item.short))))
# Create and append mandate
try:
mandate_item, m_created = Mandate.objects.update_or_create(
person=person_item,
function=function_item,
administration=administration_item)
# Let's try to find a matching LLP for this administration so we can
# add it to this mandate
try:
llps = LegislativePeriod.objects\
.filter(
start_date__lte=mandate[
'administration']['end_date']
or datetime.date.today())\
.filter(
Q(end_date__isnull=True) | Q(
end_date__gte=mandate[
'administration']['start_date']
))\
.all()
if llps:
# always pick the latest, in case the adminstration
# overlapped
mandate_item.legislative_period = llps[
llps.count() - 1]
mandate_item.save()
except Exception as e:
# # nope, that didn't work, but nevermind #passiveaggressivecomment
# print e.message
# import ipdb
# ipdb.set_trace()
pass
except:
self.logger.warning(
red("Error saving Mandate {} ({})".format(function_item, administration_item)))
import ipdb
ipdb.set_trace()
person_item.save()
# First time we encounter a person, we scan her detail page too
if not parl_id in self.persons_scraped:
# Create Detail Page request
req = scrapy.Request(p['source_link'],
callback=self.parse_person_detail)
req.meta['person'] = {
'reversed_name': p['reversed_name'],
'source_link': p['source_link'],
'parl_id': parl_id
}
callback_requests.append(req)
self.persons_scraped.append(parl_id)
return callback_requests
def get_administration_item(self, mandate):
# Do we have this administration already?
admin_data = {
'start_date': mandate['administration']['start_date'],
'end_date': mandate['administration']['end_date']
}
admin_item, created = Administration.objects.update_or_create(
title=mandate['administration']['title'][0],
defaults=admin_data)
if created:
admin_item.save()
self.logger.debug(u"Created administration {}".format(
green(u'[{}]'.format(admin_item.title))))
return admin_item
|
bsd-2-clause
| -3,591,083,793,539,792,400 | 135,847,996,502,249,780 | 33.945055 | 99 | 0.522327 | false |
PriceElectronics/linux-imx
|
tools/perf/scripts/python/failed-syscalls-by-pid.py
|
11180
|
2058
|
# failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
|
gpl-2.0
| -1,327,133,766,004,623,600 | 8,859,095,346,389,711,000 | 27.191781 | 112 | 0.609815 | false |
Heufneutje/PyHeufyBot
|
heufybot/modules/util/urlfollow.py
|
1
|
6495
|
from twisted.plugin import IPlugin
from heufybot.channel import IRCChannel
from heufybot.moduleinterface import BotModule, IBotModule
from heufybot.utils.dummycontextmanager import DummyContextManager
from heufybot.utils.signaltimeout import TimeoutException
from zope.interface import implements
from bs4 import BeautifulSoup
from isodate import parse_duration
from urlparse import urlparse
import re, sys, time
if sys.platform != "win32":
from heufybot.utils.signaltimeout import SignalTimeout as SignalTimeout
else:
SignalTimeout = None
class URLFollow(BotModule):
implements(IPlugin, IBotModule)
name = "URLFollow"
def actions(self):
return [ ("ctcp-message", 20, self.searchActions),
("message-channel", 20, self.searchChannelMessage),
("message-user", 20, self.searchPrivateMessage) ]
def searchPrivateMessage(self, server, user, messageBody):
self._searchURLs(server, user.nick, messageBody)
def searchChannelMessage(self, server, channel, user, body):
self._searchURLs(server, channel.name, body)
def searchActions(self, server, source, user, body):
if not body.upper().startswith("ACTION"):
return
if isinstance(source, IRCChannel):
self._searchURLs(server, source.name, body)
else:
self._searchURLs(server, user.nick, body)
def load(self):
self.imgurClientID = None
self.ytKey = None
if "api-keys" not in self.bot.storage:
self.bot.storage["api-keys"] = {}
if "imgur" in self.bot.storage["api-keys"]:
self.imgurClientID = self.bot.storage["api-keys"]["imgur"]
if "google" in self.bot.storage["api-keys"]:
self.ytKey = self.bot.storage["api-keys"]["google"]
def _searchURLs(self, server, source, body):
if not self.bot.moduleHandler.useModuleOnServer(self.name, server):
return
regex = re.compile(r"(https?://|www\.)[^\s]+", re.IGNORECASE)
for url in filter(regex.match, body.split(" ")):
response = self._handleURL(url)
if response:
self.bot.servers[server].outputHandler.cmdPRIVMSG(source, response)
def _handleURL(self, url):
ytMatch = re.search(r"(youtube\.com/watch.+v=|youtu\.be/)(?P<videoID>[^&#\?]{11})", url)
if ytMatch:
return self._handleYouTube(ytMatch.group("videoID"))
imgurMatch = re.search(r"(i\.)?imgur\.com/(?P<imgurID>[^\.]+)", url)
if imgurMatch:
return self._handleImgur(imgurMatch.group("imgurID"))
if not re.search("\.(jpe?g|gif|png|bmp)$", url):
return self._handleGeneric(url)
return None
def _handleGeneric(self, url):
with SignalTimeout(5) if SignalTimeout is not None else DummyContextManager():
try:
result = self.bot.moduleHandler.runActionUntilValue("fetch-url", url)
if not result or result.status_code != 200:
return None
parsed_uri = urlparse(result.url)
soup = BeautifulSoup(result.content)
title = soup.find("title").text.encode("utf-8", "ignore").replace("\r", "").replace("\n", " ")
if len(title) > 300:
title = title[:297] + "..."
return "[URL] {} (at host: {}).".format(title, parsed_uri.hostname)
except TimeoutException:
return "The operation timed out."
def _handleYouTube(self, videoID):
params = {
"id": videoID,
"fields": "items(id,snippet(title,description),contentDetails(duration))",
"parts": "snippet,contentDetails",
}
if self.ytKey:
params["key"] = self.ytKey
url = "https://www.googleapis.com/youtube/v3/videos"
result = self.bot.moduleHandler.runActionUntilValue("fetch-url", url, params)
if not result:
return None
j = result.json()
if len(j["items"]) < 1:
return None
snippet = j["items"][0]["snippet"]
title = snippet["title"].replace("\r", "").replace("\n", " ").encode("utf-8", "ignore")
description = snippet["description"].replace("\r", "").replace("\n", " ").encode("utf-8", "ignore")
durSeconds = parse_duration(j["items"][0]["contentDetails"]["duration"]).total_seconds()
if len(description) > 149:
description = description[:147] + "..."
if durSeconds < 3600:
duration = time.strftime("%M:%S", time.gmtime(durSeconds))
else:
duration = time.strftime("%H:%M:%S", time.gmtime(durSeconds))
return "[YouTube] {} | {} | {}".format(title, duration, description)
def _handleImgur(self, imgurID):
if not self.imgurClientID:
return
albumLink = False
if imgurID.startswith("gallery/"):
imgurID = imgurID.replace("gallery/", "")
url = "https://api.imgur.com/3/gallery/{}".format(imgurID)
elif imgurID.startswith("a/"):
imgurID = imgurID.replace("a/", "")
url = "https://api.imgur.com/3/album/{}".format(imgurID)
albumLink = True
else:
url = "https://api.imgur.com/3/image/{}".format(imgurID)
headers = { "Authorization": "Client-ID {}".format(self.imgurClientID) }
result = self.bot.moduleHandler.runActionUntilValue("fetch-url", url, None, headers)
if not result:
return
j = result.json()
if j["status"] != 200:
return
j = j["data"]
data = []
if j["title"]:
data.append("{}".format(j["title"].encode("utf-8", "ignore")))
else:
data.append("No title")
if j["nsfw"]:
data.append("NSFW!")
if albumLink:
data.append("Album: {} images".format(j["images_count"]))
elif "is_album" in j and j["is_album"]:
data.append("Album: {} images".format(len(j["images"])))
if "animated" in j and j["animated"]:
data.append("Animated!")
if "width" in j and "height" in j:
data.append("{}x{}".format(j["width"], j["height"]))
if "size" in j:
data.append("Size: {} kB".format(int(j["size"])/1024))
data.append("Views: {}".format(j["views"]))
return "[Imgur] {}".format(" | ".join(data))
urlFollow = URLFollow()
|
mit
| -4,678,642,470,767,374,000 | -7,577,154,258,392,654,000 | 40.903226 | 110 | 0.578907 | false |
bygloam/yapf
|
yapf/yapflib/format_decision_state.py
|
1
|
14929
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements a format decision state object that manages whitespace decisions.
Each token is processed one at a time, at which point its whitespace formatting
decisions are made. A graph of potential whitespace formattings is created,
where each node in the graph is a format decision state object. The heuristic
tries formatting the token with and without a newline before it to determine
which one has the least penalty. Therefore, the format decision state object for
each decision needs to be its own unique copy.
Once the heuristic determines the best formatting, it makes a non-dry run pass
through the code to commit the whitespace formatting.
FormatDecisionState: main class exported by this module.
"""
import copy
from yapf.yapflib import format_token
from yapf.yapflib import style
class FormatDecisionState(object):
"""The current state when indenting an unwrapped line.
The FormatDecisionState object is meant to be copied instead of referenced.
Attributes:
first_indent: The indent of the first token.
column: The number of used columns in the current line.
next_token: The next token to be formatted.
paren_level: The level of nesting inside (), [], and {}.
start_of_line_level: The paren_level at the start of this line.
lowest_level_on_line: The lowest paren_level on the current line.
newline: Indicates if a newline is added along the edge to this format
decision state node.
previous: The previous format decision state in the decision tree.
stack: A stack (of _ParenState) keeping track of properties applying to
parenthesis levels.
ignore_stack_for_comparison: Ignore the stack of _ParenState for state
comparison.
"""
def __init__(self, line, first_indent):
"""Initializer.
Initializes to the state after placing the first token from 'line' at
'first_indent'.
Arguments:
line: (UnwrappedLine) The unwrapped line we're currently processing.
first_indent: (int) The indent of the first token.
"""
self.next_token = line.first
self.column = first_indent
self.paren_level = 0
self.start_of_line_level = 0
self.lowest_level_on_line = 0
self.ignore_stack_for_comparison = False
self.stack = [_ParenState(first_indent, first_indent)]
self.first_indent = first_indent
self.newline = False
self.previous = None
self._MoveStateToNextToken()
def Clone(self):
new = copy.copy(self)
new.stack = copy.deepcopy(self.stack)
return new
def __eq__(self, other):
# Note: 'first_indent' is implicit in the stack. Also, we ignore 'previous',
# because it shouldn't have a bearing on this comparison. (I.e., it will
# report equal if 'next_token' does.)
return (self.next_token == other.next_token and
self.column == other.column and
self.paren_level == other.paren_level and
self.start_of_line_level == other.start_of_line_level and
self.lowest_level_on_line == other.lowest_level_on_line and
(self.ignore_stack_for_comparison or
other.ignore_stack_for_comparison or self.stack == other.stack))
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.next_token, self.column, self.paren_level,
self.start_of_line_level, self.lowest_level_on_line))
def __repr__(self):
return ('column::%d, next_token::%s, paren_level::%d, stack::[\n\t%s' %
(self.column, repr(self.next_token), self.paren_level,
'\n\t'.join(repr(s) for s in self.stack) + ']'))
def CanSplit(self):
"""Returns True if the line can be split before the next token."""
current = self.next_token
if not current.can_break_before:
return False
return True
def MustSplit(self):
"""Returns True if the line must split before the next token."""
current = self.next_token
previous_token = current.previous_token
if current.must_break_before:
return True
if (self.stack[-1].split_before_closing_bracket and
# FIXME(morbo): Use the 'matching_bracket' instead of this.
# FIXME(morbo): Don't forget about tuples!
current.value in ']}'):
# Split if we need to split before the closing bracket and the next
# token is a closing bracket.
return True
if previous_token:
length = _GetLengthToMatchingParen(previous_token)
if (previous_token.value == '{' and # TODO(morbo): List initializers?
length + self.column > style.Get('COLUMN_LIMIT')):
return True
# TODO(morbo): This should be controlled with a knob.
if (format_token.Subtype.DICTIONARY_KEY in current.subtypes and
not current.is_comment):
# Place each dictionary entry on its own line.
return True
# TODO(morbo): This should be controlled with a knob.
if format_token.Subtype.DICT_SET_GENERATOR in current.subtypes:
return True
if (previous_token.value != '(' and
format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST in
current.subtypes):
return style.Get('SPLIT_BEFORE_NAMED_ASSIGNS')
if (previous_token.value in '{[(' and
current.lineno != previous_token.lineno):
self.stack[-1].split_before_closing_bracket = True
return True
return False
def AddTokenToState(self, newline, dry_run, must_split=False):
"""Add a token to the format decision state.
Allow the heuristic to try out adding the token with and without a newline.
Later on, the algorithm will determine which one has the lowest penalty.
Arguments:
newline: (bool) Add the token on a new line if True.
dry_run: (bool) Don't commit whitespace changes to the FormatToken if
True.
must_split: (bool) A newline was required before this token.
Returns:
The penalty of splitting after the current token.
"""
penalty = 0
if newline:
penalty = self._AddTokenOnNewline(dry_run, must_split)
else:
self._AddTokenOnCurrentLine(dry_run)
return self._MoveStateToNextToken() + penalty
def _AddTokenOnCurrentLine(self, dry_run):
"""Puts the token on the current line.
Appends the next token to the state and updates information necessary for
indentation.
Arguments:
dry_run: (bool) Commit whitespace changes to the FormatToken if True.
"""
current = self.next_token
previous = current.previous_token
spaces = current.spaces_required_before
if not dry_run:
current.AddWhitespacePrefix(newlines_before=0, spaces=spaces)
if previous.OpensScope():
if not current.is_comment:
# Align closing scopes that are on a newline with the opening scope:
#
# foo = [a,
# b,
# ]
self.stack[-1].closing_scope_indent = previous.column
if style.Get('ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT'):
self.stack[-1].closing_scope_indent += 1
self.stack[-1].indent = self.column + spaces
else:
self.stack[-1].closing_scope_indent = (
self.stack[-1].indent - style.Get('CONTINUATION_INDENT_WIDTH')
)
self.column += spaces
def _AddTokenOnNewline(self, dry_run, must_split):
"""Adds a line break and necessary indentation.
Appends the next token to the state and updates information necessary for
indentation.
Arguments:
dry_run: (bool) Don't commit whitespace changes to the FormatToken if
True.
must_split: (bool) A newline was required before this token.
Returns:
The split penalty for splitting after the current state.
"""
current = self.next_token
previous = current.previous_token
self.column = self._GetNewlineColumn()
if not dry_run:
current.AddWhitespacePrefix(newlines_before=1, spaces=self.column)
if not current.is_comment:
self.stack[-1].last_space = self.column
self.start_of_line_level = self.paren_level
self.lowest_level_on_line = self.paren_level
# Any break on this level means that the parent level has been broken and we
# need to avoid bin packing there.
for paren_state in self.stack:
paren_state.split_before_parameter = True
if (previous.value != ',' and not previous.is_binary_op and
not current.is_binary_op and not previous.OpensScope()):
self.stack[-1].split_before_parameter = True
if (previous.OpensScope() or
(previous.is_comment and previous.previous_token is not None and
previous.previous_token.OpensScope())):
self.stack[-1].closing_scope_indent = max(
0, self.stack[-1].indent - style.Get('CONTINUATION_INDENT_WIDTH'))
self.stack[-1].split_before_closing_bracket = True
# Calculate the split penalty.
penalty = current.split_penalty
# Add a penalty for each increasing newline we add.
last = self.stack[-1]
penalty += (
style.Get('SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT') * last.num_line_splits
)
if not must_split and current.value not in {'if', 'for'}:
# Don't penalize for a must split or for splitting before an
# if-expression or list comprehension.
last.num_line_splits += 1
return penalty + 10
def _GetNewlineColumn(self):
"""Return the new column on the newline."""
current = self.next_token
previous = current.previous_token
top_of_stack = self.stack[-1]
if current.spaces_required_before > 2:
return current.spaces_required_before
if current.OpensScope():
return self.first_indent if not self.paren_level else top_of_stack.indent
if current.ClosesScope():
if (previous.OpensScope() or
(previous.is_comment and previous.previous_token is not None and
previous.previous_token.OpensScope())):
return max(
0, self.stack[-1].indent - style.Get('CONTINUATION_INDENT_WIDTH'))
return top_of_stack.closing_scope_indent
if (previous and previous.is_string and current.is_string and
format_token.Subtype.DICTIONARY_VALUE in current.subtypes):
return previous.column
if format_token.Subtype.IF_TEST_EXPR in current.subtypes:
return top_of_stack.indent + style.Get('INDENT_IF_EXPR_CONTINUATION')
return top_of_stack.indent
def _MoveStateToNextToken(self):
"""Calculate format decision state information and move onto the next token.
Before moving onto the next token, we first calculate the format decision
state given the current token and its formatting decisions. Then the format
decision state is set up so that the next token can be added.
Returns:
The penalty for the number of characters over the column limit.
"""
current = self.next_token
if not current.OpensScope() and not current.ClosesScope():
self.lowest_level_on_line = min(self.lowest_level_on_line,
self.paren_level)
# If we encounter an opening bracket, we add a level to our stack to prepare
# for the subsequent tokens.
if current.OpensScope():
last = self.stack[-1]
new_indent = style.Get('CONTINUATION_INDENT_WIDTH') + last.last_space
self.stack.append(_ParenState(new_indent, self.stack[-1].last_space))
self.stack[-1].break_before_paremeter = False
self.paren_level += 1
# If we encounter a closing bracket, we can remove a level from our
# parenthesis stack.
if len(self.stack) > 1 and current.ClosesScope():
self.stack[-2].last_space = self.stack[-1].last_space
self.stack.pop()
self.paren_level -= 1
is_multiline_string = current.is_string and '\n' in current.value
if is_multiline_string:
# This is a multiline string. Only look at the first line.
self.column += len(current.value.split('\n')[0])
else:
self.column += len(current.value)
self.next_token = self.next_token.next_token
# Calculate the penalty for overflowing the column limit.
penalty = 0
if self.column > style.Get('COLUMN_LIMIT') and not current.is_comment:
excess_characters = self.column - style.Get('COLUMN_LIMIT')
penalty = style.Get('SPLIT_PENALTY_EXCESS_CHARACTER') * excess_characters
if is_multiline_string:
# If this is a multiline string, the column is actually the
# end of the last line in the string.
self.column = len(current.value.split('\n')[-1])
return penalty
def _GetLengthToMatchingParen(token):
"""Returns the length from one bracket to the matching bracket.
Arguments:
token: (FormatToken) The opening bracket token.
Returns:
The length to the closing paren or up to the first point where we can split
the line. The length includes the brackets.
"""
if not token.matching_bracket:
return 0
end = token.matching_bracket
while end.next_token and not end.next_token.can_break_before:
end = end.next_token
return end.total_length - token.total_length + 1
class _ParenState(object):
"""Maintains the state of the bracket enclosures.
A stack of _ParenState objects are kept so that we know how to indent relative
to the brackets.
Attributes:
indent: The column position to which a specified parenthesis level needs to
be indented.
last_space: The column position of the last space on each level.
split_before_closing_bracket: Whether a newline needs to be inserted before
the closing bracket. We only want to insert a newline before the closing
bracket if there also was a newline after the beginning left bracket.
split_before_parameter: Split the line after the next comma.
num_line_splits: Number of line splits this _ParenState contains already.
Each subsequent line split gets an increasing penalty.
"""
# TODO(morbo): This doesn't track "bin packing."
def __init__(self, indent, last_space):
self.indent = indent
self.last_space = last_space
self.closing_scope_indent = 0
self.split_before_closing_bracket = False
self.split_before_parameter = False
self.num_line_splits = 0
def __repr__(self):
return '[indent::%d, last_space::%d, closing_scope_indent::%d]' % (
self.indent, self.last_space, self.closing_scope_indent)
|
apache-2.0
| 8,405,098,311,041,560,000 | -6,529,468,240,539,915,000 | 35.68059 | 80 | 0.681224 | false |
openslack/openslack-wechat
|
examples/bae-echo-enterprise/wechatapp.py
|
14
|
1661
|
from __future__ import absolute_import, unicode_literals
from flask import Flask, request, abort
from wechatpy.enterprise.crypto import WeChatCrypto
from wechatpy.exceptions import InvalidSignatureException
from wechatpy.enterprise.exceptions import InvalidCorpIdException
from wechatpy.enterprise import parse_message, create_reply
TOKEN = ''
EncodingAESKey = ''
CorpId = ''
app = Flask(__name__)
@app.route('/wechat', methods=['GET', 'POST'])
def wechat():
signature = request.args.get('msg_signature', '')
timestamp = request.args.get('timestamp', '')
nonce = request.args.get('nonce', '')
crypto = WeChatCrypto(TOKEN, EncodingAESKey, CorpId)
if request.method == 'GET':
echo_str = request.args.get('echostr', '')
try:
echo_str = crypto.check_signature(
signature,
timestamp,
nonce,
echo_str
)
except InvalidSignatureException:
abort(403)
return echo_str
else:
try:
msg = crypto.decrypt_message(
request.data,
signature,
timestamp,
nonce
)
except (InvalidSignatureException, InvalidCorpIdException):
abort(403)
msg = parse_message(msg)
if msg.type == 'text':
reply = create_reply(msg.content, msg).render()
else:
reply = create_reply('Can not handle this for now', msg).render()
res = crypto.encrypt_message(reply, nonce, timestamp)
return res
if __name__ == '__main__':
app.run('127.0.0.1', 5001, debug=True)
|
apache-2.0
| 8,309,180,496,020,104,000 | -7,346,756,653,558,532,000 | 29.2 | 77 | 0.590608 | false |
ncultra/qemu
|
scripts/vmstate-static-checker.py
|
29
|
15449
|
#!/usr/bin/python
#
# Compares vmstate information stored in JSON format, obtained from
# the -dump-vmstate QEMU command.
#
# Copyright 2014 Amit Shah <[email protected]>
# Copyright 2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, see <http://www.gnu.org/licenses/>.
import argparse
import json
import sys
# Count the number of errors found
taint = 0
def bump_taint():
global taint
# Ensure we don't wrap around or reset to 0 -- the shell only has
# an 8-bit return value.
if taint < 255:
taint = taint + 1
def check_fields_match(name, s_field, d_field):
if s_field == d_field:
return True
# Some fields changed names between qemu versions. This list
# is used to whitelist such changes in each section / description.
changed_names = {
'apic': ['timer', 'timer_expiry'],
'e1000': ['dev', 'parent_obj'],
'ehci': ['dev', 'pcidev'],
'I440FX': ['dev', 'parent_obj'],
'ich9_ahci': ['card', 'parent_obj'],
'ich9-ahci': ['ahci', 'ich9_ahci'],
'ioh3420': ['PCIDevice', 'PCIEDevice'],
'ioh-3240-express-root-port': ['port.br.dev',
'parent_obj.parent_obj.parent_obj',
'port.br.dev.exp.aer_log',
'parent_obj.parent_obj.parent_obj.exp.aer_log'],
'cirrus_vga': ['hw_cursor_x', 'vga.hw_cursor_x',
'hw_cursor_y', 'vga.hw_cursor_y'],
'lsiscsi': ['dev', 'parent_obj'],
'mch': ['d', 'parent_obj'],
'pci_bridge': ['bridge.dev', 'parent_obj', 'bridge.dev.shpc', 'shpc'],
'pcnet': ['pci_dev', 'parent_obj'],
'PIIX3': ['pci_irq_levels', 'pci_irq_levels_vmstate'],
'piix4_pm': ['dev', 'parent_obj', 'pci0_status',
'acpi_pci_hotplug.acpi_pcihp_pci_status[0x0]',
'pm1a.sts', 'ar.pm1.evt.sts', 'pm1a.en', 'ar.pm1.evt.en',
'pm1_cnt.cnt', 'ar.pm1.cnt.cnt',
'tmr.timer', 'ar.tmr.timer',
'tmr.overflow_time', 'ar.tmr.overflow_time',
'gpe', 'ar.gpe'],
'rtl8139': ['dev', 'parent_obj'],
'qxl': ['num_surfaces', 'ssd.num_surfaces'],
'usb-ccid': ['abProtocolDataStructure', 'abProtocolDataStructure.data'],
'usb-host': ['dev', 'parent_obj'],
'usb-mouse': ['usb-ptr-queue', 'HIDPointerEventQueue'],
'usb-tablet': ['usb-ptr-queue', 'HIDPointerEventQueue'],
'vmware_vga': ['card', 'parent_obj'],
'vmware_vga_internal': ['depth', 'new_depth'],
'xhci': ['pci_dev', 'parent_obj'],
'x3130-upstream': ['PCIDevice', 'PCIEDevice'],
'xio3130-express-downstream-port': ['port.br.dev',
'parent_obj.parent_obj.parent_obj',
'port.br.dev.exp.aer_log',
'parent_obj.parent_obj.parent_obj.exp.aer_log'],
'xio3130-downstream': ['PCIDevice', 'PCIEDevice'],
'xio3130-express-upstream-port': ['br.dev', 'parent_obj.parent_obj',
'br.dev.exp.aer_log',
'parent_obj.parent_obj.exp.aer_log'],
}
if not name in changed_names:
return False
if s_field in changed_names[name] and d_field in changed_names[name]:
return True
return False
def get_changed_sec_name(sec):
# Section names can change -- see commit 292b1634 for an example.
changes = {
"ICH9 LPC": "ICH9-LPC",
}
for item in changes:
if item == sec:
return changes[item]
if changes[item] == sec:
return item
return ""
def exists_in_substruct(fields, item):
# Some QEMU versions moved a few fields inside a substruct. This
# kept the on-wire format the same. This function checks if
# something got shifted inside a substruct. For example, the
# change in commit 1f42d22233b4f3d1a2933ff30e8d6a6d9ee2d08f
if not "Description" in fields:
return False
if not "Fields" in fields["Description"]:
return False
substruct_fields = fields["Description"]["Fields"]
if substruct_fields == []:
return False
return check_fields_match(fields["Description"]["name"],
substruct_fields[0]["field"], item)
def check_fields(src_fields, dest_fields, desc, sec):
# This function checks for all the fields in a section. If some
# fields got embedded into a substruct, this function will also
# attempt to check inside the substruct.
d_iter = iter(dest_fields)
s_iter = iter(src_fields)
# Using these lists as stacks to store previous value of s_iter
# and d_iter, so that when time comes to exit out of a substruct,
# we can go back one level up and continue from where we left off.
s_iter_list = []
d_iter_list = []
advance_src = True
advance_dest = True
unused_count = 0
while True:
if advance_src:
try:
s_item = s_iter.next()
except StopIteration:
if s_iter_list == []:
break
s_iter = s_iter_list.pop()
continue
else:
if unused_count == 0:
# We want to avoid advancing just once -- when entering a
# dest substruct, or when exiting one.
advance_src = True
if advance_dest:
try:
d_item = d_iter.next()
except StopIteration:
if d_iter_list == []:
# We were not in a substruct
print "Section \"" + sec + "\",",
print "Description " + "\"" + desc + "\":",
print "expected field \"" + s_item["field"] + "\",",
print "while dest has no further fields"
bump_taint()
break
d_iter = d_iter_list.pop()
advance_src = False
continue
else:
if unused_count == 0:
advance_dest = True
if unused_count > 0:
if advance_dest == False:
unused_count = unused_count - s_item["size"]
if unused_count == 0:
advance_dest = True
continue
if unused_count < 0:
print "Section \"" + sec + "\",",
print "Description \"" + desc + "\":",
print "unused size mismatch near \"",
print s_item["field"] + "\""
bump_taint()
break
continue
if advance_src == False:
unused_count = unused_count - d_item["size"]
if unused_count == 0:
advance_src = True
continue
if unused_count < 0:
print "Section \"" + sec + "\",",
print "Description \"" + desc + "\":",
print "unused size mismatch near \"",
print d_item["field"] + "\""
bump_taint()
break
continue
if not check_fields_match(desc, s_item["field"], d_item["field"]):
# Some fields were put in substructs, keeping the
# on-wire format the same, but breaking static tools
# like this one.
# First, check if dest has a new substruct.
if exists_in_substruct(d_item, s_item["field"]):
# listiterators don't have a prev() function, so we
# have to store our current location, descend into the
# substruct, and ensure we come out as if nothing
# happened when the substruct is over.
#
# Essentially we're opening the substructs that got
# added which didn't change the wire format.
d_iter_list.append(d_iter)
substruct_fields = d_item["Description"]["Fields"]
d_iter = iter(substruct_fields)
advance_src = False
continue
# Next, check if src has substruct that dest removed
# (can happen in backward migration: 2.0 -> 1.5)
if exists_in_substruct(s_item, d_item["field"]):
s_iter_list.append(s_iter)
substruct_fields = s_item["Description"]["Fields"]
s_iter = iter(substruct_fields)
advance_dest = False
continue
if s_item["field"] == "unused" or d_item["field"] == "unused":
if s_item["size"] == d_item["size"]:
continue
if d_item["field"] == "unused":
advance_dest = False
unused_count = d_item["size"] - s_item["size"]
continue
if s_item["field"] == "unused":
advance_src = False
unused_count = s_item["size"] - d_item["size"]
continue
print "Section \"" + sec + "\",",
print "Description \"" + desc + "\":",
print "expected field \"" + s_item["field"] + "\",",
print "got \"" + d_item["field"] + "\"; skipping rest"
bump_taint()
break
check_version(s_item, d_item, sec, desc)
if not "Description" in s_item:
# Check size of this field only if it's not a VMSTRUCT entry
check_size(s_item, d_item, sec, desc, s_item["field"])
check_description_in_list(s_item, d_item, sec, desc)
def check_subsections(src_sub, dest_sub, desc, sec):
for s_item in src_sub:
found = False
for d_item in dest_sub:
if s_item["name"] != d_item["name"]:
continue
found = True
check_descriptions(s_item, d_item, sec)
if not found:
print "Section \"" + sec + "\", Description \"" + desc + "\":",
print "Subsection \"" + s_item["name"] + "\" not found"
bump_taint()
def check_description_in_list(s_item, d_item, sec, desc):
if not "Description" in s_item:
return
if not "Description" in d_item:
print "Section \"" + sec + "\", Description \"" + desc + "\",",
print "Field \"" + s_item["field"] + "\": missing description"
bump_taint()
return
check_descriptions(s_item["Description"], d_item["Description"], sec)
def check_descriptions(src_desc, dest_desc, sec):
check_version(src_desc, dest_desc, sec, src_desc["name"])
if not check_fields_match(sec, src_desc["name"], dest_desc["name"]):
print "Section \"" + sec + "\":",
print "Description \"" + src_desc["name"] + "\"",
print "missing, got \"" + dest_desc["name"] + "\" instead; skipping"
bump_taint()
return
for f in src_desc:
if not f in dest_desc:
print "Section \"" + sec + "\"",
print "Description \"" + src_desc["name"] + "\":",
print "Entry \"" + f + "\" missing"
bump_taint()
continue
if f == 'Fields':
check_fields(src_desc[f], dest_desc[f], src_desc["name"], sec)
if f == 'Subsections':
check_subsections(src_desc[f], dest_desc[f], src_desc["name"], sec)
def check_version(s, d, sec, desc=None):
if s["version_id"] > d["version_id"]:
print "Section \"" + sec + "\"",
if desc:
print "Description \"" + desc + "\":",
print "version error:", s["version_id"], ">", d["version_id"]
bump_taint()
if not "minimum_version_id" in d:
return
if s["version_id"] < d["minimum_version_id"]:
print "Section \"" + sec + "\"",
if desc:
print "Description \"" + desc + "\":",
print "minimum version error:", s["version_id"], "<",
print d["minimum_version_id"]
bump_taint()
def check_size(s, d, sec, desc=None, field=None):
if s["size"] != d["size"]:
print "Section \"" + sec + "\"",
if desc:
print "Description \"" + desc + "\"",
if field:
print "Field \"" + field + "\"",
print "size mismatch:", s["size"], ",", d["size"]
bump_taint()
def check_machine_type(s, d):
if s["Name"] != d["Name"]:
print "Warning: checking incompatible machine types:",
print "\"" + s["Name"] + "\", \"" + d["Name"] + "\""
return
def main():
help_text = "Parse JSON-formatted vmstate dumps from QEMU in files SRC and DEST. Checks whether migration from SRC to DEST QEMU versions would break based on the VMSTATE information contained within the JSON outputs. The JSON output is created from a QEMU invocation with the -dump-vmstate parameter and a filename argument to it. Other parameters to QEMU do not matter, except the -M (machine type) parameter."
parser = argparse.ArgumentParser(description=help_text)
parser.add_argument('-s', '--src', type=file, required=True,
help='json dump from src qemu')
parser.add_argument('-d', '--dest', type=file, required=True,
help='json dump from dest qemu')
parser.add_argument('--reverse', required=False, default=False,
action='store_true',
help='reverse the direction')
args = parser.parse_args()
src_data = json.load(args.src)
dest_data = json.load(args.dest)
args.src.close()
args.dest.close()
if args.reverse:
temp = src_data
src_data = dest_data
dest_data = temp
for sec in src_data:
dest_sec = sec
if not dest_sec in dest_data:
# Either the section name got changed, or the section
# doesn't exist in dest.
dest_sec = get_changed_sec_name(sec)
if not dest_sec in dest_data:
print "Section \"" + sec + "\" does not exist in dest"
bump_taint()
continue
s = src_data[sec]
d = dest_data[dest_sec]
if sec == "vmschkmachine":
check_machine_type(s, d)
continue
check_version(s, d, sec)
for entry in s:
if not entry in d:
print "Section \"" + sec + "\": Entry \"" + entry + "\"",
print "missing"
bump_taint()
continue
if entry == "Description":
check_descriptions(s[entry], d[entry], sec)
return taint
if __name__ == '__main__':
sys.exit(main())
|
gpl-2.0
| 1,815,444,969,299,696,000 | -5,369,465,599,639,399,000 | 35.436321 | 418 | 0.518933 | false |
roandelyf/iTerm2
|
tools/ply/ply-3.4/example/BASIC/basinterp.py
|
166
|
17284
|
# This file provides the runtime support for running a basic program
# Assumes the program has been parsed using basparse.py
import sys
import math
import random
class BasicInterpreter:
# Initialize the interpreter. prog is a dictionary
# containing (line,statement) mappings
def __init__(self,prog):
self.prog = prog
self.functions = { # Built-in function table
'SIN' : lambda z: math.sin(self.eval(z)),
'COS' : lambda z: math.cos(self.eval(z)),
'TAN' : lambda z: math.tan(self.eval(z)),
'ATN' : lambda z: math.atan(self.eval(z)),
'EXP' : lambda z: math.exp(self.eval(z)),
'ABS' : lambda z: abs(self.eval(z)),
'LOG' : lambda z: math.log(self.eval(z)),
'SQR' : lambda z: math.sqrt(self.eval(z)),
'INT' : lambda z: int(self.eval(z)),
'RND' : lambda z: random.random()
}
# Collect all data statements
def collect_data(self):
self.data = []
for lineno in self.stat:
if self.prog[lineno][0] == 'DATA':
self.data = self.data + self.prog[lineno][1]
self.dc = 0 # Initialize the data counter
# Check for end statements
def check_end(self):
has_end = 0
for lineno in self.stat:
if self.prog[lineno][0] == 'END' and not has_end:
has_end = lineno
if not has_end:
print("NO END INSTRUCTION")
self.error = 1
return
if has_end != lineno:
print("END IS NOT LAST")
self.error = 1
# Check loops
def check_loops(self):
for pc in range(len(self.stat)):
lineno = self.stat[pc]
if self.prog[lineno][0] == 'FOR':
forinst = self.prog[lineno]
loopvar = forinst[1]
for i in range(pc+1,len(self.stat)):
if self.prog[self.stat[i]][0] == 'NEXT':
nextvar = self.prog[self.stat[i]][1]
if nextvar != loopvar: continue
self.loopend[pc] = i
break
else:
print("FOR WITHOUT NEXT AT LINE %s" % self.stat[pc])
self.error = 1
# Evaluate an expression
def eval(self,expr):
etype = expr[0]
if etype == 'NUM': return expr[1]
elif etype == 'GROUP': return self.eval(expr[1])
elif etype == 'UNARY':
if expr[1] == '-': return -self.eval(expr[2])
elif etype == 'BINOP':
if expr[1] == '+': return self.eval(expr[2])+self.eval(expr[3])
elif expr[1] == '-': return self.eval(expr[2])-self.eval(expr[3])
elif expr[1] == '*': return self.eval(expr[2])*self.eval(expr[3])
elif expr[1] == '/': return float(self.eval(expr[2]))/self.eval(expr[3])
elif expr[1] == '^': return abs(self.eval(expr[2]))**self.eval(expr[3])
elif etype == 'VAR':
var,dim1,dim2 = expr[1]
if not dim1 and not dim2:
if var in self.vars:
return self.vars[var]
else:
print("UNDEFINED VARIABLE %s AT LINE %s" % (var, self.stat[self.pc]))
raise RuntimeError
# May be a list lookup or a function evaluation
if dim1 and not dim2:
if var in self.functions:
# A function
return self.functions[var](dim1)
else:
# A list evaluation
if var in self.lists:
dim1val = self.eval(dim1)
if dim1val < 1 or dim1val > len(self.lists[var]):
print("LIST INDEX OUT OF BOUNDS AT LINE %s" % self.stat[self.pc])
raise RuntimeError
return self.lists[var][dim1val-1]
if dim1 and dim2:
if var in self.tables:
dim1val = self.eval(dim1)
dim2val = self.eval(dim2)
if dim1val < 1 or dim1val > len(self.tables[var]) or dim2val < 1 or dim2val > len(self.tables[var][0]):
print("TABLE INDEX OUT OUT BOUNDS AT LINE %s" % self.stat[self.pc])
raise RuntimeError
return self.tables[var][dim1val-1][dim2val-1]
print("UNDEFINED VARIABLE %s AT LINE %s" % (var, self.stat[self.pc]))
raise RuntimeError
# Evaluate a relational expression
def releval(self,expr):
etype = expr[1]
lhs = self.eval(expr[2])
rhs = self.eval(expr[3])
if etype == '<':
if lhs < rhs: return 1
else: return 0
elif etype == '<=':
if lhs <= rhs: return 1
else: return 0
elif etype == '>':
if lhs > rhs: return 1
else: return 0
elif etype == '>=':
if lhs >= rhs: return 1
else: return 0
elif etype == '=':
if lhs == rhs: return 1
else: return 0
elif etype == '<>':
if lhs != rhs: return 1
else: return 0
# Assignment
def assign(self,target,value):
var, dim1, dim2 = target
if not dim1 and not dim2:
self.vars[var] = self.eval(value)
elif dim1 and not dim2:
# List assignment
dim1val = self.eval(dim1)
if not var in self.lists:
self.lists[var] = [0]*10
if dim1val > len(self.lists[var]):
print ("DIMENSION TOO LARGE AT LINE %s" % self.stat[self.pc])
raise RuntimeError
self.lists[var][dim1val-1] = self.eval(value)
elif dim1 and dim2:
dim1val = self.eval(dim1)
dim2val = self.eval(dim2)
if not var in self.tables:
temp = [0]*10
v = []
for i in range(10): v.append(temp[:])
self.tables[var] = v
# Variable already exists
if dim1val > len(self.tables[var]) or dim2val > len(self.tables[var][0]):
print("DIMENSION TOO LARGE AT LINE %s" % self.stat[self.pc])
raise RuntimeError
self.tables[var][dim1val-1][dim2val-1] = self.eval(value)
# Change the current line number
def goto(self,linenum):
if not linenum in self.prog:
print("UNDEFINED LINE NUMBER %d AT LINE %d" % (linenum, self.stat[self.pc]))
raise RuntimeError
self.pc = self.stat.index(linenum)
# Run it
def run(self):
self.vars = { } # All variables
self.lists = { } # List variables
self.tables = { } # Tables
self.loops = [ ] # Currently active loops
self.loopend= { } # Mapping saying where loops end
self.gosub = None # Gosub return point (if any)
self.error = 0 # Indicates program error
self.stat = list(self.prog) # Ordered list of all line numbers
self.stat.sort()
self.pc = 0 # Current program counter
# Processing prior to running
self.collect_data() # Collect all of the data statements
self.check_end()
self.check_loops()
if self.error: raise RuntimeError
while 1:
line = self.stat[self.pc]
instr = self.prog[line]
op = instr[0]
# END and STOP statements
if op == 'END' or op == 'STOP':
break # We're done
# GOTO statement
elif op == 'GOTO':
newline = instr[1]
self.goto(newline)
continue
# PRINT statement
elif op == 'PRINT':
plist = instr[1]
out = ""
for label,val in plist:
if out:
out += ' '*(15 - (len(out) % 15))
out += label
if val:
if label: out += " "
eval = self.eval(val)
out += str(eval)
sys.stdout.write(out)
end = instr[2]
if not (end == ',' or end == ';'):
sys.stdout.write("\n")
if end == ',': sys.stdout.write(" "*(15-(len(out) % 15)))
if end == ';': sys.stdout.write(" "*(3-(len(out) % 3)))
# LET statement
elif op == 'LET':
target = instr[1]
value = instr[2]
self.assign(target,value)
# READ statement
elif op == 'READ':
for target in instr[1]:
if self.dc < len(self.data):
value = ('NUM',self.data[self.dc])
self.assign(target,value)
self.dc += 1
else:
# No more data. Program ends
return
elif op == 'IF':
relop = instr[1]
newline = instr[2]
if (self.releval(relop)):
self.goto(newline)
continue
elif op == 'FOR':
loopvar = instr[1]
initval = instr[2]
finval = instr[3]
stepval = instr[4]
# Check to see if this is a new loop
if not self.loops or self.loops[-1][0] != self.pc:
# Looks like a new loop. Make the initial assignment
newvalue = initval
self.assign((loopvar,None,None),initval)
if not stepval: stepval = ('NUM',1)
stepval = self.eval(stepval) # Evaluate step here
self.loops.append((self.pc,stepval))
else:
# It's a repeat of the previous loop
# Update the value of the loop variable according to the step
stepval = ('NUM',self.loops[-1][1])
newvalue = ('BINOP','+',('VAR',(loopvar,None,None)),stepval)
if self.loops[-1][1] < 0: relop = '>='
else: relop = '<='
if not self.releval(('RELOP',relop,newvalue,finval)):
# Loop is done. Jump to the NEXT
self.pc = self.loopend[self.pc]
self.loops.pop()
else:
self.assign((loopvar,None,None),newvalue)
elif op == 'NEXT':
if not self.loops:
print("NEXT WITHOUT FOR AT LINE %s" % line)
return
nextvar = instr[1]
self.pc = self.loops[-1][0]
loopinst = self.prog[self.stat[self.pc]]
forvar = loopinst[1]
if nextvar != forvar:
print("NEXT DOESN'T MATCH FOR AT LINE %s" % line)
return
continue
elif op == 'GOSUB':
newline = instr[1]
if self.gosub:
print("ALREADY IN A SUBROUTINE AT LINE %s" % line)
return
self.gosub = self.stat[self.pc]
self.goto(newline)
continue
elif op == 'RETURN':
if not self.gosub:
print("RETURN WITHOUT A GOSUB AT LINE %s" % line)
return
self.goto(self.gosub)
self.gosub = None
elif op == 'FUNC':
fname = instr[1]
pname = instr[2]
expr = instr[3]
def eval_func(pvalue,name=pname,self=self,expr=expr):
self.assign((pname,None,None),pvalue)
return self.eval(expr)
self.functions[fname] = eval_func
elif op == 'DIM':
for vname,x,y in instr[1]:
if y == 0:
# Single dimension variable
self.lists[vname] = [0]*x
else:
# Double dimension variable
temp = [0]*y
v = []
for i in range(x):
v.append(temp[:])
self.tables[vname] = v
self.pc += 1
# Utility functions for program listing
def expr_str(self,expr):
etype = expr[0]
if etype == 'NUM': return str(expr[1])
elif etype == 'GROUP': return "(%s)" % self.expr_str(expr[1])
elif etype == 'UNARY':
if expr[1] == '-': return "-"+str(expr[2])
elif etype == 'BINOP':
return "%s %s %s" % (self.expr_str(expr[2]),expr[1],self.expr_str(expr[3]))
elif etype == 'VAR':
return self.var_str(expr[1])
def relexpr_str(self,expr):
return "%s %s %s" % (self.expr_str(expr[2]),expr[1],self.expr_str(expr[3]))
def var_str(self,var):
varname,dim1,dim2 = var
if not dim1 and not dim2: return varname
if dim1 and not dim2: return "%s(%s)" % (varname, self.expr_str(dim1))
return "%s(%s,%s)" % (varname, self.expr_str(dim1),self.expr_str(dim2))
# Create a program listing
def list(self):
stat = list(self.prog) # Ordered list of all line numbers
stat.sort()
for line in stat:
instr = self.prog[line]
op = instr[0]
if op in ['END','STOP','RETURN']:
print("%s %s" % (line, op))
continue
elif op == 'REM':
print("%s %s" % (line, instr[1]))
elif op == 'PRINT':
_out = "%s %s " % (line, op)
first = 1
for p in instr[1]:
if not first: _out += ", "
if p[0] and p[1]: _out += '"%s"%s' % (p[0],self.expr_str(p[1]))
elif p[1]: _out += self.expr_str(p[1])
else: _out += '"%s"' % (p[0],)
first = 0
if instr[2]: _out += instr[2]
print(_out)
elif op == 'LET':
print("%s LET %s = %s" % (line,self.var_str(instr[1]),self.expr_str(instr[2])))
elif op == 'READ':
_out = "%s READ " % line
first = 1
for r in instr[1]:
if not first: _out += ","
_out += self.var_str(r)
first = 0
print(_out)
elif op == 'IF':
print("%s IF %s THEN %d" % (line,self.relexpr_str(instr[1]),instr[2]))
elif op == 'GOTO' or op == 'GOSUB':
print("%s %s %s" % (line, op, instr[1]))
elif op == 'FOR':
_out = "%s FOR %s = %s TO %s" % (line,instr[1],self.expr_str(instr[2]),self.expr_str(instr[3]))
if instr[4]: _out += " STEP %s" % (self.expr_str(instr[4]))
print(_out)
elif op == 'NEXT':
print("%s NEXT %s" % (line, instr[1]))
elif op == 'FUNC':
print("%s DEF %s(%s) = %s" % (line,instr[1],instr[2],self.expr_str(instr[3])))
elif op == 'DIM':
_out = "%s DIM " % line
first = 1
for vname,x,y in instr[1]:
if not first: _out += ","
first = 0
if y == 0:
_out += "%s(%d)" % (vname,x)
else:
_out += "%s(%d,%d)" % (vname,x,y)
print(_out)
elif op == 'DATA':
_out = "%s DATA " % line
first = 1
for v in instr[1]:
if not first: _out += ","
first = 0
_out += v
print(_out)
# Erase the current program
def new(self):
self.prog = {}
# Insert statements
def add_statements(self,prog):
for line,stat in prog.items():
self.prog[line] = stat
# Delete a statement
def del_line(self,lineno):
try:
del self.prog[lineno]
except KeyError:
pass
|
gpl-2.0
| 6,948,300,852,361,839,000 | -6,248,953,323,772,049,000 | 38.192744 | 125 | 0.423224 | false |
ptonner/GPy
|
GPy/inference/latent_function_inference/var_gauss.py
|
15
|
2641
|
# Copyright (c) 2015, James Hensman
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ...util.linalg import pdinv
from .posterior import Posterior
from . import LatentFunctionInference
log_2_pi = np.log(2*np.pi)
class VarGauss(LatentFunctionInference):
"""
The Variational Gaussian Approximation revisited
@article{Opper:2009,
title = {The Variational Gaussian Approximation Revisited},
author = {Opper, Manfred and Archambeau, C{\'e}dric},
journal = {Neural Comput.},
year = {2009},
pages = {786--792},
}
"""
def __init__(self, alpha, beta):
"""
:param alpha: GPy.core.Param varational parameter
:param beta: GPy.core.Param varational parameter
"""
self.alpha, self.beta = alpha, beta
def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None, Z=None):
if mean_function is not None:
raise NotImplementedError
num_data, output_dim = Y.shape
assert output_dim ==1, "Only one output supported"
K = kern.K(X)
m = K.dot(self.alpha)
KB = K*self.beta[:, None]
BKB = KB*self.beta[None, :]
A = np.eye(num_data) + BKB
Ai, LA, _, Alogdet = pdinv(A)
Sigma = np.diag(self.beta**-2) - Ai/self.beta[:, None]/self.beta[None, :] # posterior coavairance: need full matrix for gradients
var = np.diag(Sigma).reshape(-1,1)
F, dF_dm, dF_dv, dF_dthetaL = likelihood.variational_expectations(Y, m, var, Y_metadata=Y_metadata)
if dF_dthetaL is not None:
dL_dthetaL = dF_dthetaL.sum(1).sum(1)
else:
dL_dthetaL = np.array([])
dF_da = np.dot(K, dF_dm)
SigmaB = Sigma*self.beta
#dF_db_ = -np.diag(Sigma.dot(np.diag(dF_dv.flatten())).dot(SigmaB))*2
dF_db = -2*np.sum(Sigma**2 * (dF_dv * self.beta), 0)
#assert np.allclose(dF_db, dF_db_)
KL = 0.5*(Alogdet + np.trace(Ai) - num_data + np.sum(m*self.alpha))
dKL_da = m
A_A2 = Ai - Ai.dot(Ai)
dKL_db = np.diag(np.dot(KB.T, A_A2))
log_marginal = F.sum() - KL
self.alpha.gradient = dF_da - dKL_da
self.beta.gradient = dF_db - dKL_db
# K-gradients
dKL_dK = 0.5*(self.alpha*self.alpha.T + self.beta[:, None]*self.beta[None, :]*A_A2)
tmp = Ai*self.beta[:, None]/self.beta[None, :]
dF_dK = self.alpha*dF_dm.T + np.dot(tmp*dF_dv, tmp.T)
return Posterior(mean=m, cov=Sigma ,K=K),\
log_marginal,\
{'dL_dK':dF_dK-dKL_dK, 'dL_dthetaL':dL_dthetaL}
|
bsd-3-clause
| 5,981,950,182,193,212,000 | 596,377,831,073,864,000 | 37.275362 | 138 | 0.578947 | false |
ppanczyk/ansible
|
contrib/inventory/softlayer.py
|
29
|
7171
|
#!/usr/bin/env python
"""
SoftLayer external inventory script.
The SoftLayer Python API client is required. Use `pip install softlayer` to install it.
You have a few different options for configuring your username and api_key. You can pass
environment variables (SL_USERNAME and SL_API_KEY). You can also write INI file to
~/.softlayer or /etc/softlayer.conf. For more information see the SL API at:
- https://softlayer-python.readthedocs.org/en/latest/config_file.html
The SoftLayer Python client has a built in command for saving this configuration file
via the command `sl config setup`.
"""
# Copyright (C) 2014 AJ Bourg <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# I found the structure of the ec2.py script very helpful as an example
# as I put this together. Thanks to whoever wrote that script!
#
import SoftLayer
import re
import argparse
import itertools
try:
import json
except:
import simplejson as json
class SoftLayerInventory(object):
common_items = [
'id',
'globalIdentifier',
'hostname',
'domain',
'fullyQualifiedDomainName',
'primaryBackendIpAddress',
'primaryIpAddress',
'datacenter',
'tagReferences.tag.name',
'userData.value',
]
vs_items = [
'lastKnownPowerState.name',
'powerState',
'maxCpu',
'maxMemory',
'activeTransaction.transactionStatus[friendlyName,name]',
'status',
]
hw_items = [
'hardwareStatusId',
'processorPhysicalCoreAmount',
'memoryCapacity',
]
def _empty_inventory(self):
return {"_meta": {"hostvars": {}}}
def __init__(self):
'''Main path'''
self.inventory = self._empty_inventory()
self.parse_options()
if self.args.list:
self.get_all_servers()
print(self.json_format_dict(self.inventory, True))
elif self.args.host:
self.get_virtual_servers()
print(self.json_format_dict(self.inventory["_meta"]["hostvars"][self.args.host], True))
def to_safe(self, word):
'''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups'''
return re.sub("[^A-Za-z0-9\-\.]", "_", word)
def push(self, my_dict, key, element):
'''Push an element onto an array that may not have been defined in the dict'''
if key in my_dict:
my_dict[key].append(element)
else:
my_dict[key] = [element]
def parse_options(self):
'''Parse all the arguments from the CLI'''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on SoftLayer')
parser.add_argument('--list', action='store_true', default=False,
help='List instances (default: False)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
self.args = parser.parse_args()
def json_format_dict(self, data, pretty=False):
'''Converts a dict to a JSON object and dumps it as a formatted string'''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
def process_instance(self, instance, instance_type="virtual"):
'''Populate the inventory dictionary with any instance information'''
# only want active instances
if 'status' in instance and instance['status']['name'] != 'Active':
return
# and powered on instances
if 'powerState' in instance and instance['powerState']['name'] != 'Running':
return
# 5 is active for hardware... see https://forums.softlayer.com/forum/softlayer-developer-network/general-discussion/2955-hardwarestatusid
if 'hardwareStatusId' in instance and instance['hardwareStatusId'] != 5:
return
# if there's no IP address, we can't reach it
if 'primaryIpAddress' not in instance:
return
instance['userData'] = instance['userData'][0]['value'] if instance['userData'] else ''
dest = instance['primaryIpAddress']
self.inventory["_meta"]["hostvars"][dest] = instance
# Inventory: group by memory
if 'maxMemory' in instance:
self.push(self.inventory, self.to_safe('memory_' + str(instance['maxMemory'])), dest)
elif 'memoryCapacity' in instance:
self.push(self.inventory, self.to_safe('memory_' + str(instance['memoryCapacity'])), dest)
# Inventory: group by cpu count
if 'maxCpu' in instance:
self.push(self.inventory, self.to_safe('cpu_' + str(instance['maxCpu'])), dest)
elif 'processorPhysicalCoreAmount' in instance:
self.push(self.inventory, self.to_safe('cpu_' + str(instance['processorPhysicalCoreAmount'])), dest)
# Inventory: group by datacenter
self.push(self.inventory, self.to_safe('datacenter_' + instance['datacenter']['name']), dest)
# Inventory: group by hostname
self.push(self.inventory, self.to_safe(instance['hostname']), dest)
# Inventory: group by FQDN
self.push(self.inventory, self.to_safe(instance['fullyQualifiedDomainName']), dest)
# Inventory: group by domain
self.push(self.inventory, self.to_safe(instance['domain']), dest)
# Inventory: group by type (hardware/virtual)
self.push(self.inventory, instance_type, dest)
# Inventory: group by tag
for tag in instance['tagReferences']:
self.push(self.inventory, tag['tag']['name'], dest)
def get_virtual_servers(self):
'''Get all the CCI instances'''
vs = SoftLayer.VSManager(self.client)
mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.vs_items))
instances = vs.list_instances(mask=mask)
for instance in instances:
self.process_instance(instance)
def get_physical_servers(self):
'''Get all the hardware instances'''
hw = SoftLayer.HardwareManager(self.client)
mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.hw_items))
instances = hw.list_hardware(mask=mask)
for instance in instances:
self.process_instance(instance, 'hardware')
def get_all_servers(self):
self.client = SoftLayer.Client()
self.get_virtual_servers()
self.get_physical_servers()
SoftLayerInventory()
|
gpl-3.0
| -4,504,826,842,891,985,400 | 7,052,805,767,310,280,000 | 34.676617 | 145 | 0.640915 | false |
abergeron/DeepLearningTutorials
|
code/convolutional_mlp.py
|
4
|
12722
|
"""This tutorial introduces the LeNet5 neural network architecture
using Theano. LeNet5 is a convolutional neural network, good for
classifying images. This tutorial shows how to build the architecture,
and comes with all the hyper-parameters you need to reproduce the
paper's MNIST results.
This implementation simplifies the model in the following ways:
- LeNetConvPool doesn't implement location-specific gain and bias parameters
- LeNetConvPool doesn't implement pooling by average, it implements pooling
by max.
- Digit classification is implemented with a logistic regression rather than
an RBF network
- LeNet5 was not fully-connected convolutions at second layer
References:
- Y. LeCun, L. Bottou, Y. Bengio and P. Haffner:
Gradient-Based Learning Applied to Document
Recognition, Proceedings of the IEEE, 86(11):2278-2324, November 1998.
http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf
"""
from __future__ import print_function
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
from theano.tensor.signal import pool
from theano.tensor.nnet import conv2d
from logistic_sgd import LogisticRegression, load_data
from mlp import HiddenLayer
class LeNetConvPoolLayer(object):
"""Pool Layer of a convolutional network """
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height, filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows, #cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = numpy.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) //
numpy.prod(poolsize))
# initialize weights with random weights
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(
numpy.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
# the bias is a 1D tensor -- one bias per output feature map
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
# convolve input feature maps with filters
conv_out = conv2d(
input=input,
filters=self.W,
filter_shape=filter_shape,
input_shape=image_shape
)
# pool each feature map individually, using maxpooling
pooled_out = pool.pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
# store parameters of this layer
self.params = [self.W, self.b]
# keep track of model input
self.input = input
def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
dataset='mnist.pkl.gz',
nkerns=[20, 50], batch_size=500):
""" Demonstrates lenet on MNIST dataset
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: path to the dataset used for training /testing (MNIST here)
:type nkerns: list of ints
:param nkerns: number of kernels on each layer
"""
rng = numpy.random.RandomState(23455)
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches //= batch_size
n_valid_batches //= batch_size
n_test_batches //= batch_size
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# start-snippet-1
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
######################
# BUILD ACTUAL MODEL #
######################
print('... building the model')
# Reshape matrix of rasterized images of shape (batch_size, 28 * 28)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
# (28, 28) is the size of MNIST images.
layer0_input = x.reshape((batch_size, 1, 28, 28))
# Construct the first convolutional pooling layer:
# filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)
# maxpooling reduces this further to (24/2, 24/2) = (12, 12)
# 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)
layer0 = LeNetConvPoolLayer(
rng,
input=layer0_input,
image_shape=(batch_size, 1, 28, 28),
filter_shape=(nkerns[0], 1, 5, 5),
poolsize=(2, 2)
)
# Construct the second convolutional pooling layer
# filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)
# maxpooling reduces this further to (8/2, 8/2) = (4, 4)
# 4D output tensor is thus of shape (batch_size, nkerns[1], 4, 4)
layer1 = LeNetConvPoolLayer(
rng,
input=layer0.output,
image_shape=(batch_size, nkerns[0], 12, 12),
filter_shape=(nkerns[1], nkerns[0], 5, 5),
poolsize=(2, 2)
)
# the HiddenLayer being fully-connected, it operates on 2D matrices of
# shape (batch_size, num_pixels) (i.e matrix of rasterized images).
# This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),
# or (500, 50 * 4 * 4) = (500, 800) with the default values.
layer2_input = layer1.output.flatten(2)
# construct a fully-connected sigmoidal layer
layer2 = HiddenLayer(
rng,
input=layer2_input,
n_in=nkerns[1] * 4 * 4,
n_out=500,
activation=T.tanh
)
# classify the values of the fully-connected sigmoidal layer
layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)
# the cost we minimize during training is the NLL of the model
cost = layer3.negative_log_likelihood(y)
# create a function to compute the mistakes that are made by the model
test_model = theano.function(
[index],
layer3.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
[index],
layer3.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# create a list of all model parameters to be fit by gradient descent
params = layer3.params + layer2.params + layer1.params + layer0.params
# create a list of gradients for all model parameters
grads = T.grad(cost, params)
# train_model is a function that updates the model parameters by
# SGD Since this model has many parameters, it would be tedious to
# manually create an update rule for each model parameter. We thus
# create the updates list by automatically looping over all
# (params[i], grads[i]) pairs.
updates = [
(param_i, param_i - learning_rate * grad_i)
for param_i, grad_i in zip(params, grads)
]
train_model = theano.function(
[index],
cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-1
###############
# TRAIN MODEL #
###############
print('... training')
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience // 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
best_iter = 0
test_score = 0.
start_time = timeit.default_timer()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in range(n_train_batches):
iter = (epoch - 1) * n_train_batches + minibatch_index
if iter % 100 == 0:
print('training @ iter = ', iter)
cost_ij = train_model(minibatch_index)
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in range(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print('epoch %i, minibatch %i/%i, validation error %f %%' %
(epoch, minibatch_index + 1, n_train_batches,
this_validation_loss * 100.))
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
# save best validation score and iteration number
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [
test_model(i)
for i in range(n_test_batches)
]
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print('Optimization complete.')
print('Best validation score of %f %% obtained at iteration %i, '
'with test performance %f %%' %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print(('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.)), file=sys.stderr)
if __name__ == '__main__':
evaluate_lenet5()
def experiment(state, channel):
evaluate_lenet5(state.learning_rate, dataset=state.dataset)
|
bsd-3-clause
| -8,219,740,829,340,197,000 | -2,375,987,750,685,055,000 | 35.452722 | 80 | 0.591495 | false |
simbha/mAngE-Gin
|
lib/django/contrib/formtools/wizard/storage/base.py
|
79
|
4920
|
from django.core.files.uploadedfile import UploadedFile
from django.utils.datastructures import MultiValueDict
from django.utils.functional import lazy_property
from django.utils import six
from django.contrib.formtools.wizard.storage.exceptions import NoFileStorageConfigured
class BaseStorage(object):
step_key = 'step'
step_data_key = 'step_data'
step_files_key = 'step_files'
extra_data_key = 'extra_data'
def __init__(self, prefix, request=None, file_storage=None):
self.prefix = 'wizard_%s' % prefix
self.request = request
self.file_storage = file_storage
self._files = {}
self._tmp_files = []
def init_data(self):
self.data = {
self.step_key: None,
self.step_data_key: {},
self.step_files_key: {},
self.extra_data_key: {},
}
def reset(self):
# Store unused temporary file names in order to delete them
# at the end of the response cycle through a callback attached in
# `update_response`.
wizard_files = self.data[self.step_files_key]
for step_files in six.itervalues(wizard_files):
for step_file in six.itervalues(step_files):
self._tmp_files.append(step_file['tmp_name'])
self.init_data()
def _get_current_step(self):
return self.data[self.step_key]
def _set_current_step(self, step):
self.data[self.step_key] = step
current_step = lazy_property(_get_current_step, _set_current_step)
def _get_extra_data(self):
return self.data[self.extra_data_key]
def _set_extra_data(self, extra_data):
self.data[self.extra_data_key] = extra_data
extra_data = lazy_property(_get_extra_data, _set_extra_data)
def get_step_data(self, step):
# When reading the serialized data, upconvert it to a MultiValueDict,
# some serializers (json) don't preserve the type of the object.
values = self.data[self.step_data_key].get(step, None)
if values is not None:
values = MultiValueDict(values)
return values
def set_step_data(self, step, cleaned_data):
# If the value is a MultiValueDict, convert it to a regular dict of the
# underlying contents. Some serializers call the public API on it (as
# opposed to the underlying dict methods), in which case the content
# can be truncated (__getitem__ returns only the first item).
if isinstance(cleaned_data, MultiValueDict):
cleaned_data = dict(cleaned_data.lists())
self.data[self.step_data_key][step] = cleaned_data
@property
def current_step_data(self):
return self.get_step_data(self.current_step)
def get_step_files(self, step):
wizard_files = self.data[self.step_files_key].get(step, {})
if wizard_files and not self.file_storage:
raise NoFileStorageConfigured(
"You need to define 'file_storage' in your "
"wizard view in order to handle file uploads.")
files = {}
for field, field_dict in six.iteritems(wizard_files):
field_dict = field_dict.copy()
tmp_name = field_dict.pop('tmp_name')
if (step, field) not in self._files:
self._files[(step, field)] = UploadedFile(
file=self.file_storage.open(tmp_name), **field_dict)
files[field] = self._files[(step, field)]
return files or None
def set_step_files(self, step, files):
if files and not self.file_storage:
raise NoFileStorageConfigured(
"You need to define 'file_storage' in your "
"wizard view in order to handle file uploads.")
if step not in self.data[self.step_files_key]:
self.data[self.step_files_key][step] = {}
for field, field_file in six.iteritems(files or {}):
tmp_filename = self.file_storage.save(field_file.name, field_file)
file_dict = {
'tmp_name': tmp_filename,
'name': field_file.name,
'content_type': field_file.content_type,
'size': field_file.size,
'charset': field_file.charset
}
self.data[self.step_files_key][step][field] = file_dict
@property
def current_step_files(self):
return self.get_step_files(self.current_step)
def update_response(self, response):
def post_render_callback(response):
for file in self._files.values():
if not file.closed:
file.close()
for tmp_file in self._tmp_files:
self.file_storage.delete(tmp_file)
if hasattr(response, 'render'):
response.add_post_render_callback(post_render_callback)
else:
post_render_callback(response)
|
mit
| 1,009,388,237,647,608,700 | -7,781,959,831,829,291,000 | 36.846154 | 86 | 0.606098 | false |
ma314smith/home-assistant
|
homeassistant/components/notify/command_line.py
|
11
|
1562
|
"""
Support for command line notification services.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.command_line/
"""
import logging
import subprocess
import voluptuous as vol
from homeassistant.const import (CONF_COMMAND, CONF_NAME)
from homeassistant.components.notify import (
BaseNotificationService, PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_COMMAND): cv.string,
vol.Optional(CONF_NAME): cv.string,
})
def get_service(hass, config):
"""Get the Command Line notification service."""
command = config[CONF_COMMAND]
return CommandLineNotificationService(command)
class CommandLineNotificationService(BaseNotificationService):
"""Implement the notification service for the Command Line service."""
def __init__(self, command):
"""Initialize the service."""
self.command = command
def send_message(self, message="", **kwargs):
"""Send a message to a command line."""
try:
proc = subprocess.Popen(self.command, universal_newlines=True,
stdin=subprocess.PIPE, shell=True)
proc.communicate(input=message)
if proc.returncode != 0:
_LOGGER.error('Command failed: %s', self.command)
except subprocess.SubprocessError:
_LOGGER.error('Error trying to exec Command: %s', self.command)
|
mit
| 6,591,252,021,202,107,000 | -8,501,407,234,914,979,000 | 31.541667 | 75 | 0.690141 | false |
seanmonstar/servo
|
src/components/script/dom/bindings/codegen/parser/tests/test_attr.py
|
106
|
12541
|
import WebIDL
def WebIDLTest(parser, harness):
testData = [("::TestAttr%s::b", "b", "Byte%s", False),
("::TestAttr%s::rb", "rb", "Byte%s", True),
("::TestAttr%s::o", "o", "Octet%s", False),
("::TestAttr%s::ro", "ro", "Octet%s", True),
("::TestAttr%s::s", "s", "Short%s", False),
("::TestAttr%s::rs", "rs", "Short%s", True),
("::TestAttr%s::us", "us", "UnsignedShort%s", False),
("::TestAttr%s::rus", "rus", "UnsignedShort%s", True),
("::TestAttr%s::l", "l", "Long%s", False),
("::TestAttr%s::rl", "rl", "Long%s", True),
("::TestAttr%s::ul", "ul", "UnsignedLong%s", False),
("::TestAttr%s::rul", "rul", "UnsignedLong%s", True),
("::TestAttr%s::ll", "ll", "LongLong%s", False),
("::TestAttr%s::rll", "rll", "LongLong%s", True),
("::TestAttr%s::ull", "ull", "UnsignedLongLong%s", False),
("::TestAttr%s::rull", "rull", "UnsignedLongLong%s", True),
("::TestAttr%s::str", "str", "String%s", False),
("::TestAttr%s::rstr", "rstr", "String%s", True),
("::TestAttr%s::obj", "obj", "Object%s", False),
("::TestAttr%s::robj", "robj", "Object%s", True),
("::TestAttr%s::object", "object", "Object%s", False),
("::TestAttr%s::f", "f", "Float%s", False),
("::TestAttr%s::rf", "rf", "Float%s", True)]
parser.parse("""
interface TestAttr {
attribute byte b;
readonly attribute byte rb;
attribute octet o;
readonly attribute octet ro;
attribute short s;
readonly attribute short rs;
attribute unsigned short us;
readonly attribute unsigned short rus;
attribute long l;
readonly attribute long rl;
attribute unsigned long ul;
readonly attribute unsigned long rul;
attribute long long ll;
readonly attribute long long rll;
attribute unsigned long long ull;
readonly attribute unsigned long long rull;
attribute DOMString str;
readonly attribute DOMString rstr;
attribute object obj;
readonly attribute object robj;
attribute object _object;
attribute float f;
readonly attribute float rf;
};
interface TestAttrNullable {
attribute byte? b;
readonly attribute byte? rb;
attribute octet? o;
readonly attribute octet? ro;
attribute short? s;
readonly attribute short? rs;
attribute unsigned short? us;
readonly attribute unsigned short? rus;
attribute long? l;
readonly attribute long? rl;
attribute unsigned long? ul;
readonly attribute unsigned long? rul;
attribute long long? ll;
readonly attribute long long? rll;
attribute unsigned long long? ull;
readonly attribute unsigned long long? rull;
attribute DOMString? str;
readonly attribute DOMString? rstr;
attribute object? obj;
readonly attribute object? robj;
attribute object? _object;
attribute float? f;
readonly attribute float? rf;
};
interface TestAttrArray {
attribute byte[] b;
readonly attribute byte[] rb;
attribute octet[] o;
readonly attribute octet[] ro;
attribute short[] s;
readonly attribute short[] rs;
attribute unsigned short[] us;
readonly attribute unsigned short[] rus;
attribute long[] l;
readonly attribute long[] rl;
attribute unsigned long[] ul;
readonly attribute unsigned long[] rul;
attribute long long[] ll;
readonly attribute long long[] rll;
attribute unsigned long long[] ull;
readonly attribute unsigned long long[] rull;
attribute DOMString[] str;
readonly attribute DOMString[] rstr;
attribute object[] obj;
readonly attribute object[] robj;
attribute object[] _object;
attribute float[] f;
readonly attribute float[] rf;
};
interface TestAttrNullableArray {
attribute byte[]? b;
readonly attribute byte[]? rb;
attribute octet[]? o;
readonly attribute octet[]? ro;
attribute short[]? s;
readonly attribute short[]? rs;
attribute unsigned short[]? us;
readonly attribute unsigned short[]? rus;
attribute long[]? l;
readonly attribute long[]? rl;
attribute unsigned long[]? ul;
readonly attribute unsigned long[]? rul;
attribute long long[]? ll;
readonly attribute long long[]? rll;
attribute unsigned long long[]? ull;
readonly attribute unsigned long long[]? rull;
attribute DOMString[]? str;
readonly attribute DOMString[]? rstr;
attribute object[]? obj;
readonly attribute object[]? robj;
attribute object[]? _object;
attribute float[]? f;
readonly attribute float[]? rf;
};
interface TestAttrArrayOfNullableTypes {
attribute byte?[] b;
readonly attribute byte?[] rb;
attribute octet?[] o;
readonly attribute octet?[] ro;
attribute short?[] s;
readonly attribute short?[] rs;
attribute unsigned short?[] us;
readonly attribute unsigned short?[] rus;
attribute long?[] l;
readonly attribute long?[] rl;
attribute unsigned long?[] ul;
readonly attribute unsigned long?[] rul;
attribute long long?[] ll;
readonly attribute long long?[] rll;
attribute unsigned long long?[] ull;
readonly attribute unsigned long long?[] rull;
attribute DOMString?[] str;
readonly attribute DOMString?[] rstr;
attribute object?[] obj;
readonly attribute object?[] robj;
attribute object?[] _object;
attribute float?[] f;
readonly attribute float?[] rf;
};
interface TestAttrNullableArrayOfNullableTypes {
attribute byte?[]? b;
readonly attribute byte?[]? rb;
attribute octet?[]? o;
readonly attribute octet?[]? ro;
attribute short?[]? s;
readonly attribute short?[]? rs;
attribute unsigned short?[]? us;
readonly attribute unsigned short?[]? rus;
attribute long?[]? l;
readonly attribute long?[]? rl;
attribute unsigned long?[]? ul;
readonly attribute unsigned long?[]? rul;
attribute long long?[]? ll;
readonly attribute long long?[]? rll;
attribute unsigned long long?[]? ull;
readonly attribute unsigned long long?[]? rull;
attribute DOMString?[]? str;
readonly attribute DOMString?[]? rstr;
attribute object?[]? obj;
readonly attribute object?[]? robj;
attribute object?[]? _object;
attribute float?[]? f;
readonly attribute float?[]? rf;
};
""")
results = parser.finish()
def checkAttr(attr, QName, name, type, readonly):
harness.ok(isinstance(attr, WebIDL.IDLAttribute),
"Should be an IDLAttribute")
harness.ok(attr.isAttr(), "Attr is an Attr")
harness.ok(not attr.isMethod(), "Attr is not an method")
harness.ok(not attr.isConst(), "Attr is not a const")
harness.check(attr.identifier.QName(), QName, "Attr has the right QName")
harness.check(attr.identifier.name, name, "Attr has the right name")
harness.check(str(attr.type), type, "Attr has the right type")
harness.check(attr.readonly, readonly, "Attr's readonly state is correct")
harness.ok(True, "TestAttr interface parsed without error.")
harness.check(len(results), 6, "Should be six productions.")
iface = results[0]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestAttr", "Interface has the right QName")
harness.check(iface.identifier.name, "TestAttr", "Interface has the right name")
harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData))
attrs = iface.members
for i in range(len(attrs)):
data = testData[i]
attr = attrs[i]
(QName, name, type, readonly) = data
checkAttr(attr, QName % "", name, type % "", readonly)
iface = results[1]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestAttrNullable", "Interface has the right QName")
harness.check(iface.identifier.name, "TestAttrNullable", "Interface has the right name")
harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData))
attrs = iface.members
for i in range(len(attrs)):
data = testData[i]
attr = attrs[i]
(QName, name, type, readonly) = data
checkAttr(attr, QName % "Nullable", name, type % "OrNull", readonly)
iface = results[2]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestAttrArray", "Interface has the right QName")
harness.check(iface.identifier.name, "TestAttrArray", "Interface has the right name")
harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData))
attrs = iface.members
for i in range(len(attrs)):
data = testData[i]
attr = attrs[i]
(QName, name, type, readonly) = data
checkAttr(attr, QName % "Array", name, type % "Array", readonly)
iface = results[3]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestAttrNullableArray", "Interface has the right QName")
harness.check(iface.identifier.name, "TestAttrNullableArray", "Interface has the right name")
harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData))
attrs = iface.members
for i in range(len(attrs)):
data = testData[i]
attr = attrs[i]
(QName, name, type, readonly) = data
checkAttr(attr, QName % "NullableArray", name, type % "ArrayOrNull", readonly)
iface = results[4]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestAttrArrayOfNullableTypes", "Interface has the right QName")
harness.check(iface.identifier.name, "TestAttrArrayOfNullableTypes", "Interface has the right name")
harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData))
attrs = iface.members
for i in range(len(attrs)):
data = testData[i]
attr = attrs[i]
(QName, name, type, readonly) = data
checkAttr(attr, QName % "ArrayOfNullableTypes", name, type % "OrNullArray", readonly)
iface = results[5]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestAttrNullableArrayOfNullableTypes", "Interface has the right QName")
harness.check(iface.identifier.name, "TestAttrNullableArrayOfNullableTypes", "Interface has the right name")
harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData))
attrs = iface.members
for i in range(len(attrs)):
data = testData[i]
attr = attrs[i]
(QName, name, type, readonly) = data
checkAttr(attr, QName % "NullableArrayOfNullableTypes", name, type % "OrNullArrayOrNull", readonly)
parser = parser.reset()
threw = False
try:
parser.parse("""
interface A {
[SetterInfallible] readonly attribute boolean foo;
};
""")
results = parser.finish()
except Exception, x:
threw = True
harness.ok(threw, "Should not allow [SetterInfallible] on readonly attributes")
|
mpl-2.0
| -4,938,838,790,391,233,000 | -4,412,883,429,397,179,400 | 40.52649 | 118 | 0.587912 | false |
azumimuo/family-xbmc-addon
|
script.module.t0mm0.common/lib/t0mm0/common/addon.py
|
15
|
26656
|
'''
common XBMC Module
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import cgi
import re
import os
try:
import cPickle as pickle
except:
import pickle
import unicodedata
import urllib
import xbmc
import xbmcaddon
import xbmcgui
import xbmcplugin
class Addon:
'''
This class provides a lot of code that is used across many XBMC addons
in the hope that it will simplify some of the common tasks an addon needs
to perform.
Mostly this is achieved by providing a wrapper around commonly used parts
of :mod:`xbmc`, :mod:`xbmcaddon`, :mod:`xbmcgui` and :mod:`xbmcplugin`.
You probably want to have exactly one instance of this class in your addon
which you can call from anywhere in your code.
Example::
import sys
from t0mm0.common.addon import Addon
addon = Addon('my.plugin.id', argv=sys.argv)
'''
def __init__(self, addon_id, argv=None):
'''
Args:
addon_id (str): Your addon's id (eg. 'plugin.video.t0mm0.test').
Kwargs:
argv (list): List of arguments passed to your addon if applicable
(eg. sys.argv).
'''
self.addon = xbmcaddon.Addon(id=addon_id)
if argv:
self.url = argv[0]
self.handle = int(argv[1])
self.queries = self.parse_query(argv[2][1:])
def get_author(self):
'''Returns the addon author as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('author')
def get_changelog(self):
'''Returns the addon changelog.'''
return self.addon.getAddonInfo('changelog')
def get_description(self):
'''Returns the addon description as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('description')
def get_disclaimer(self):
'''Returns the addon disclaimer as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('disclaimer')
def get_fanart(self):
'''Returns the full path to the addon fanart.'''
return self.addon.getAddonInfo('fanart')
def get_icon(self):
'''Returns the full path to the addon icon.'''
return self.addon.getAddonInfo('icon')
def get_id(self):
'''Returns the addon id as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('id')
def get_name(self):
'''Returns the addon name as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('name')
def get_path(self):
'''Returns the full path to the addon directory.'''
return self.addon.getAddonInfo('path')
def get_profile(self):
'''
Returns the full path to the addon profile directory
(useful for storing files needed by the addon such as cookies).
'''
return xbmc.translatePath(self.addon.getAddonInfo('profile'))
def get_stars(self):
'''Returns the number of stars for this addon.'''
return self.addon.getAddonInfo('stars')
def get_summary(self):
'''Returns the addon summary as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('summary')
def get_type(self):
'''
Returns the addon summary as defined in ``addon.xml``
(eg. xbmc.python.pluginsource).
'''
return self.addon.getAddonInfo('type')
def get_version(self):
'''Returns the addon version as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('version')
def get_setting(self, setting):
'''
Returns an addon setting. Settings must be defined in your addon's
``resources/settings.xml`` file.
Args:
setting (str): Name of the setting to be retrieved.
Returns:
str containing the requested setting.
'''
return self.addon.getSetting(setting)
def get_string(self, string_id):
'''
Returns a localized string. Strings must be defined in your addon's
``resources/language/[lang_name]/strings.xml`` file.
Args:
string_id (int): id of the translated string to retrieve.
Returns:
str containing the localized requested string.
'''
return self.addon.getLocalizedString(string_id)
def parse_query(self, query, defaults={'mode': 'main'}):
'''
Parse a query string as used in a URL or passed to your addon by XBMC.
Example:
>>> addon.parse_query('name=test&type=basic')
{'mode': 'main', 'name': 'test', 'type': 'basic'}
Args:
query (str): A query string.
Kwargs:
defaults (dict): A dictionary containing key/value pairs parsed
from the query string. If a key is repeated in the query string
its value will be a list containing all of that keys values.
'''
queries = cgi.parse_qs(query)
q = defaults
for key, value in queries.items():
if len(value) == 1:
q[key] = value[0]
else:
q[key] = value
return q
def build_plugin_url(self, queries):
'''
Returns a ``plugin://`` URL which can be used to call the addon with
the specified queries.
Example:
>>> addon.build_plugin_url({'name': 'test', 'type': 'basic'})
'plugin://your.plugin.id/?name=test&type=basic'
Args:
queries (dict): A dctionary of keys/values to be added to the
``plugin://`` URL.
Retuns:
A string containing a fully formed ``plugin://`` URL.
'''
out_dict = {}
for k, v in queries.iteritems():
if isinstance(v, unicode):
v = v.encode('utf8')
elif isinstance(v, str):
# Must be encoded in UTF-8
v.decode('utf8')
out_dict[k] = v
return self.url + '?' + urllib.urlencode(out_dict)
def log(self, msg, level=xbmc.LOGNOTICE):
'''
Writes a string to the XBMC log file. The addon name is inserted into
the beginning of the message automatically to help you find relevent
messages in the log file.
The available log levels are defined in the :mod:`xbmc` module and are
currently as follows::
xbmc.LOGDEBUG = 0
xbmc.LOGERROR = 4
xbmc.LOGFATAL = 6
xbmc.LOGINFO = 1
xbmc.LOGNONE = 7
xbmc.LOGNOTICE = 2
xbmc.LOGSEVERE = 5
xbmc.LOGWARNING = 3
Args:
msg (str or unicode): The message to be written to the log file.
Kwargs:
level (int): The XBMC log level to write at.
'''
#msg = unicodedata.normalize('NFKD', unicode(msg)).encode('ascii',
# 'ignore')
xbmc.log('%s: %s' % (self.get_name(), msg), level)
def log_error(self, msg):
'''
Convenience method to write to the XBMC log file at the
``xbmc.LOGERROR`` error level. Use when something has gone wrong in
your addon code. This will show up in the log prefixed with 'ERROR:'
whether you have debugging switched on or not.
'''
self.log(msg, xbmc.LOGERROR)
def log_debug(self, msg):
'''
Convenience method to write to the XBMC log file at the
``xbmc.LOGDEBUG`` error level. Use this when you want to print out lots
of detailed information that is only usefull for debugging. This will
show up in the log only when debugging is enabled in the XBMC settings,
and will be prefixed with 'DEBUG:'.
'''
self.log(msg, xbmc.LOGDEBUG)
def log_notice(self, msg):
'''
Convenience method to write to the XBMC log file at the
``xbmc.LOGNOTICE`` error level. Use for general log messages. This will
show up in the log prefixed with 'NOTICE:' whether you have debugging
switched on or not.
'''
self.log(msg, xbmc.LOGNOTICE)
def show_ok_dialog(self, msg, title=None, is_error=False):
'''
Display an XBMC dialog with a message and a single 'OK' button. The
message is also written to the XBMC log file at the appropriate log
level.
.. warning::
Don't forget that `msg` must be a list of strings and not just a
string even if you only want to display a single line!
Example::
addon.show_ok_dialog(['My message'], 'My Addon')
Args:
msg (list of strings): The message to be displayed in the dialog.
Only the first 3 list items will be displayed.
Kwargs:
title (str): String to be displayed as the title of the dialog box.
Defaults to the addon name.
is_error (bool): If ``True``, the log message will be written at
the ERROR log level, otherwise NOTICE will be used.
'''
if not title:
title = self.get_name()
log_msg = ' '.join(msg)
while len(msg) < 3:
msg.append('')
if is_error:
self.log_error(log_msg)
else:
self.log_notice(log_msg)
xbmcgui.Dialog().ok(title, msg[0], msg[1], msg[2])
def show_error_dialog(self, msg):
'''
Convenience method to show an XBMC dialog box with a single OK button
and also write the message to the log file at the ERROR log level.
The title of the dialog will be the addon's name with the prefix
'Error: '.
.. warning::
Don't forget that `msg` must be a list of strings and not just a
string even if you only want to display a single line!
Args:
msg (list of strings): The message to be displayed in the dialog.
Only the first 3 list items will be displayed.
'''
self.show_ok_dialog(msg, 'Error: %s' % self.get_name(), True)
def show_small_popup(self, title='', msg='', delay=5000, image=''):
'''
Displays a small popup box in the lower right corner. The default delay
is 5 seconds.
Code inspired by anarchintosh and daledude's Icefilms addon.
Example::
import os
logo = os.path.join(addon.get_path(), 'art','logo.jpg')
addon.show_small_popup('MyAddonName','Is now loaded enjoy', 5000, logo)
Kwargs:
title (str): title to be displayed at the top of the box
msg (str): Main message body
delay (int): delay in milliseconds until it disapears
image (str): Path to the image you want to display
'''
xbmc.executebuiltin('XBMC.Notification("%s","%s",%d,"%s")' %
(title, msg, delay, image))
def show_countdown(self, time_to_wait, title='', text=''):
'''
Show a countdown dialog with a progress bar for XBMC while delaying
execution. Necessary for some filehosters eg. megaupload
The original version of this code came from Anarchintosh.
Args:
time_to_wait (int): number of seconds to pause for.
Kwargs:
title (str): Displayed in the title of the countdown dialog. Default
is blank.
text (str): A line of text to be displayed in the dialog. Default
is blank.
Returns:
``True`` if countdown is allowed to complete, ``False`` if the
user cancelled the countdown.
'''
dialog = xbmcgui.DialogProgress()
ret = dialog.create(title)
self.log_notice('waiting %d secs' % time_to_wait)
secs = 0
increment = 100 / time_to_wait
cancelled = False
while secs <= time_to_wait:
if (dialog.iscanceled()):
cancelled = True
break
if secs != 0:
xbmc.sleep(1000)
secs_left = time_to_wait - secs
if secs_left == 0:
percent = 100
else:
percent = increment * secs
remaining_display = ('Wait %d seconds for the ' +
'video stream to activate...') % secs_left
dialog.update(percent, text, remaining_display)
secs += 1
if cancelled == True:
self.log_notice('countdown cancelled')
return False
else:
self.log_debug('countdown finished waiting')
return True
def show_settings(self):
'''Shows the settings dialog for this addon.'''
self.addon.openSettings()
def resolve_url(self, stream_url):
'''
Tell XBMC that you have resolved a URL (or not!).
This method should be called as follows:
#. The user selects a list item that has previously had ``isPlayable``
set (this is true for items added with :meth:`add_item`,
:meth:`add_music_item` or :meth:`add_music_item`)
#. Your code resolves the item requested by the user to a media URL
#. Your addon calls this method with the resolved URL
Args:
stream_url (str or ``False``): If a string, tell XBMC that the
media URL ha been successfully resolved to stream_url. If ``False``
or an empty string tell XBMC the resolving failed and pop up an
error messsage.
'''
if stream_url:
self.log_debug('resolved to: %s' % stream_url)
xbmcplugin.setResolvedUrl(self.handle, True,
xbmcgui.ListItem(path=stream_url))
else:
self.show_error_dialog(['sorry, failed to resolve URL :('])
xbmcplugin.setResolvedUrl(self.handle, False, xbmcgui.ListItem())
def get_playlist(self, pl_type, new=False):
'''
Return a :class:`xbmc.Playlist` object of the specified type.
The available playlist types are defined in the :mod:`xbmc` module and
are currently as follows::
xbmc.PLAYLIST_MUSIC = 0
xbmc.PLAYLIST_VIDEO = 1
.. seealso::
:meth:`get_music_playlist`, :meth:`get_video_playlist`
Args:
pl_type (int): The type of playlist to get.
new (bool): If ``False`` (default), get the current
:class:`xbmc.Playlist` object of the type specified. If ``True``
then return a new blank :class:`xbmc.Playlist`.
Returns:
A :class:`xbmc.Playlist` object.
'''
pl = xbmc.PlayList(pl_type)
if new:
pl.clear()
return pl
def get_music_playlist(self, new=False):
'''
Convenience method to return a music :class:`xbmc.Playlist` object.
.. seealso::
:meth:`get_playlist`
Kwargs:
new (bool): If ``False`` (default), get the current music
:class:`xbmc.Playlist` object. If ``True`` then return a new blank
music :class:`xbmc.Playlist`.
Returns:
A :class:`xbmc.Playlist` object.
'''
self.get_playlist(xbmc.PLAYLIST_MUSIC, new)
def get_video_playlist(self, new=False):
'''
Convenience method to return a video :class:`xbmc.Playlist` object.
.. seealso::
:meth:`get_playlist`
Kwargs:
new (bool): If ``False`` (default), get the current video
:class:`xbmc.Playlist` object. If ``True`` then return a new blank
video :class:`xbmc.Playlist`.
Returns:
A :class:`xbmc.Playlist` object.
'''
self.get_playlist(xbmc.PLAYLIST_VIDEO, new)
def add_item(self, queries, infolabels, contextmenu_items='', context_replace=False, img='',
fanart='', resolved=False, total_items=0, playlist=False, item_type='video',
is_folder=False):
'''
Adds an item to the list of entries to be displayed in XBMC or to a
playlist.
Use this method when you want users to be able to select this item to
start playback of a media file. ``queries`` is a dict that will be sent
back to the addon when this item is selected::
add_item({'host': 'youtube.com', 'media_id': 'ABC123XYZ'},
{'title': 'A youtube vid'})
will add a link to::
plugin://your.plugin.id/?host=youtube.com&media_id=ABC123XYZ
.. seealso::
:meth:`add_music_item`, :meth:`add_video_item`,
:meth:`add_directory`
Args:
queries (dict): A set of keys/values to be sent to the addon when
the user selects this item.
infolabels (dict): A dictionary of information about this media
(see the `XBMC Wiki InfoLabels entry
<http://wiki.xbmc.org/?title=InfoLabels>`_).
Kwargs:
contextmenu_items (list): A list of contextmenu items
context_replace (bool): To replace the xbmc default contextmenu items
img (str): A URL to an image file to be used as an icon for this
entry.
fanart (str): A URL to a fanart image for this entry.
resolved (str): If not empty, ``queries`` will be ignored and
instead the added item will be the exact contentes of ``resolved``.
total_items (int): Total number of items to be added in this list.
If supplied it enables XBMC to show a progress bar as the list of
items is being built.
playlist (playlist object): If ``False`` (default), the item will
be added to the list of entries to be displayed in this directory.
If a playlist object is passed (see :meth:`get_playlist`) then
the item will be added to the playlist instead
item_type (str): The type of item to add (eg. 'music', 'video' or
'pictures')
'''
infolabels = self.unescape_dict(infolabels)
if not resolved:
if not is_folder:
queries['play'] = 'True'
play = self.build_plugin_url(queries)
else:
play = resolved
listitem = xbmcgui.ListItem(infolabels['title'], iconImage=img,
thumbnailImage=img)
listitem.setInfo(item_type, infolabels)
listitem.setProperty('IsPlayable', 'true')
listitem.setProperty('fanart_image', fanart)
if contextmenu_items:
listitem.addContextMenuItems(contextmenu_items, replaceItems=context_replace)
if playlist is not False:
self.log_debug('adding item: %s - %s to playlist' % \
(infolabels['title'], play))
playlist.add(play, listitem)
else:
self.log_debug('adding item: %s - %s' % (infolabels['title'], play))
xbmcplugin.addDirectoryItem(self.handle, play, listitem,
isFolder=is_folder,
totalItems=total_items)
def add_video_item(self, queries, infolabels, contextmenu_items='', context_replace=False,
img='', fanart='', resolved=False, total_items=0, playlist=False):
'''
Convenience method to add a video item to the directory list or a
playlist.
See :meth:`add_item` for full infomation
'''
self.add_item(queries, infolabels, contextmenu_items, context_replace, img, fanart,
resolved, total_items, playlist, item_type='video')
def add_music_item(self, queries, infolabels, contextmenu_items='', context_replace=False,
img='', fanart='', resolved=False, total_items=0, playlist=False):
'''
Convenience method to add a music item to the directory list or a
playlist.
See :meth:`add_item` for full infomation
'''
self.add_item(queries, infolabels, contextmenu_items, img, context_replace, fanart,
resolved, total_items, playlist, item_type='music')
def add_directory(self, queries, infolabels, contextmenu_items='', context_replace=False,
img='', fanart='', total_items=0, is_folder=True):
'''
Convenience method to add a directory to the display list or a
playlist.
See :meth:`add_item` for full infomation
'''
self.add_item(queries, infolabels, contextmenu_items, context_replace, img, fanart,
total_items=total_items, resolved=self.build_plugin_url(queries),
is_folder=is_folder)
def end_of_directory(self):
'''Tell XBMC that we have finished adding items to this directory.'''
xbmcplugin.endOfDirectory(self.handle)
def _decode_callback(self, matches):
'''Callback method used by :meth:`decode`.'''
id = matches.group(1)
try:
return unichr(int(id))
except:
return id
def decode(self, data):
'''
Regular expression to convert entities such as ``,`` to the correct
characters. It is called by :meth:`unescape` and so it is not required
to call it directly.
This method was found `on the web <http://stackoverflow.com/questions/1208916/decoding-html-entities-with-python/1208931#1208931>`_
Args:
data (str): String to be cleaned.
Returns:
Cleaned string.
'''
return re.sub("&#(\d+)(;|(?=\s))", self._decode_callback, data).strip()
def unescape(self, text):
'''
Decodes HTML entities in a string.
You can add more entities to the ``rep`` dictionary.
Args:
text (str): String to be cleaned.
Returns:
Cleaned string.
'''
try:
text = self.decode(text)
rep = {'<': '<',
'>': '>',
'"': '"',
'’': '\'',
'´': '\'',
}
for s, r in rep.items():
text = text.replace(s, r)
# this has to be last:
text = text.replace("&", "&")
#we don't want to fiddle with non-string types
except TypeError:
pass
return text
def unescape_dict(self, d):
'''
Calls :meth:`unescape` on all values in a dictionary.
Args:
d (dict): A dictionary containing string values
Returns:
A dictionary with HTML entities removed from the values.
'''
out = {}
for key, value in d.items():
out[key] = self.unescape(value)
return out
def save_data(self, filename, data):
'''
Saves the data structure using pickle. If the addon data path does
not exist it will be automatically created. This save function has
the same restrictions as the pickle module.
Args:
filename (string): name of the file you want to save data to. This
file will be saved in your addon's profile directory.
data (data object/string): you want to save.
Returns:
True on success
False on failure
'''
profile_path = self.get_profile()
try:
os.makedirs(profile_path)
except:
pass
save_path = os.path.join(profile_path, filename)
try:
pickle.dump(data, open(save_path, 'wb'))
return True
except pickle.PickleError:
return False
def load_data(self,filename):
'''
Load the data that was saved with save_data() and returns the
data structure.
Args:
filename (string): Name of the file you want to load data from. This
file will be loaded from your addons profile directory.
Returns:
Data stucture on success
False on failure
'''
profile_path = self.get_profile()
load_path = os.path.join(profile_path, filename)
print profile_path
if not os.path.isfile(load_path):
self.log_debug('%s does not exist' % load_path)
return False
try:
data = pickle.load(open(load_path))
except:
return False
return data
|
gpl-2.0
| 7,111,545,326,688,622,000 | -7,737,145,595,919,432,000 | 32.956688 | 139 | 0.541079 | false |
sikmir/QGIS
|
tests/src/python/test_qgsdelimitedtextprovider.py
|
2
|
30990
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsDelimitedTextProvider.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Chris Crook'
__date__ = '20/04/2013'
__copyright__ = 'Copyright 2013, The QGIS Project'
# This module provides unit test for the delimited text provider. It uses data files in
# the testdata/delimitedtext directory.
#
# New tests can be created (or existing ones updated), but incorporating a createTest
# call into the test. This will load the file and generate a test that the features
# loaded from it are correct. It assumes that the data is correct at the time the
# test is created. The new test is written to the test output file, and can be edited into
# this module to implement the test.
#
# To recreate all tests, set rebuildTests to true
import qgis # NOQA
import os
import re
import tempfile
import inspect
import time
import test_qgsdelimitedtextprovider_wanted as want # NOQA
import collections
rebuildTests = 'REBUILD_DELIMITED_TEXT_TESTS' in os.environ
from qgis.PyQt.QtCore import QCoreApplication, QUrl, QObject
from qgis.core import (
QgsProviderRegistry,
QgsVectorLayer,
QgsFeatureRequest,
QgsRectangle,
QgsApplication,
QgsFeature)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath, compareWkt
from providertestbase import ProviderTestCase
start_app()
TEST_DATA_DIR = unitTestDataPath()
geomkey = "#geometry"
fidkey = "#fid"
try:
# Qt 5
from qgis.PyQt.QtCore import QUrlQuery
class MyUrl:
def __init__(self, url):
self.url = url
self.query = QUrlQuery()
@classmethod
def fromLocalFile(cls, filename):
return cls(QUrl.fromLocalFile(filename))
def addQueryItem(self, k, v):
self.query.addQueryItem(k, v)
def toString(self):
urlstr = self.url.toString()
querystr = self.query.toString(QUrl.FullyDecoded)
if querystr != '':
urlstr += '?'
urlstr += querystr
return urlstr
except:
MyUrl = QUrl
def normalize_query_items_order(s):
split_url = s.split('?')
urlstr = split_url[0]
if len(split_url) == 2:
items_list = split_url[1].split('&')
items_map = {}
for item in items_list:
split_item = item.split('=')
items_map[split_item[0]] = split_item[1]
first_arg = True
for k in sorted(items_map.keys()):
if first_arg:
urlstr += '?'
first_arg = False
else:
urlstr += '&'
urlstr += k + '=' + items_map[k]
return urlstr
# Thought we could connect to messageReceived signal but doesn't seem to be available
# in python :-( Not sure why?
class MessageLogger(QObject):
def __init__(self, tag=None):
QObject.__init__(self)
self.log = []
self.tag = tag
def __enter__(self):
QgsApplication.messageLog().messageReceived.connect(self.logMessage)
return self
def __exit__(self, type, value, traceback):
QgsApplication.messageLog().messageReceived.disconnect(self.logMessage)
def logMessage(self, msg, tag, level):
if tag == self.tag or not self.tag:
self.log.append(str(msg))
def messages(self):
return self.log
class TestQgsDelimitedTextProviderXY(unittest.TestCase, ProviderTestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# Create test layer
srcpath = os.path.join(TEST_DATA_DIR, 'provider')
cls.basetestfile = os.path.join(srcpath, 'delimited_xy.csv')
url = MyUrl.fromLocalFile(cls.basetestfile)
url.addQueryItem("crs", "epsg:4326")
url.addQueryItem("type", "csv")
url.addQueryItem("xField", "X")
url.addQueryItem("yField", "Y")
url.addQueryItem("spatialIndex", "no")
url.addQueryItem("subsetIndex", "no")
url.addQueryItem("watchFile", "no")
cls.vl = QgsVectorLayer(url.toString(), 'test', 'delimitedtext')
assert cls.vl.isValid(), "{} is invalid".format(cls.basetestfile)
cls.source = cls.vl.dataProvider()
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
class TestQgsDelimitedTextProviderWKT(unittest.TestCase, ProviderTestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# Create test layer
srcpath = os.path.join(TEST_DATA_DIR, 'provider')
cls.basetestfile = os.path.join(srcpath, 'delimited_wkt.csv')
url = MyUrl.fromLocalFile(cls.basetestfile)
url.addQueryItem("crs", "epsg:4326")
url.addQueryItem("type", "csv")
url.addQueryItem("wktField", "wkt")
url.addQueryItem("spatialIndex", "no")
url.addQueryItem("subsetIndex", "no")
url.addQueryItem("watchFile", "no")
cls.vl = QgsVectorLayer(url.toString(), 'test', 'delimitedtext')
assert cls.vl.isValid(), "{} is invalid".format(cls.basetestfile)
cls.source = cls.vl.dataProvider()
cls.basetestpolyfile = os.path.join(srcpath, 'delimited_wkt_poly.csv')
url = MyUrl.fromLocalFile(cls.basetestpolyfile)
url.addQueryItem("crs", "epsg:4326")
url.addQueryItem("type", "csv")
url.addQueryItem("wktField", "wkt")
url.addQueryItem("spatialIndex", "no")
url.addQueryItem("subsetIndex", "no")
url.addQueryItem("watchFile", "no")
cls.vl_poly = QgsVectorLayer(url.toString(), 'test_polygon', 'delimitedtext')
assert cls.vl_poly.isValid(), "{} is invalid".format(cls.basetestpolyfile)
cls.poly_provider = cls.vl_poly.dataProvider()
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
class TestQgsDelimitedTextProviderOther(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# toggle full ctest output to debug flaky CI test
print('CTEST_FULL_OUTPUT')
def layerData(self, layer, request={}, offset=0):
# Retrieve the data for a layer
first = True
data = {}
fields = []
fieldTypes = []
fr = QgsFeatureRequest()
if request:
if 'exact' in request and request['exact']:
fr.setFlags(QgsFeatureRequest.ExactIntersect)
if 'nogeom' in request and request['nogeom']:
fr.setFlags(QgsFeatureRequest.NoGeometry)
if 'fid' in request:
fr.setFilterFid(request['fid'])
elif 'extents' in request:
fr.setFilterRect(QgsRectangle(*request['extents']))
if 'attributes' in request:
fr.setSubsetOfAttributes(request['attributes'])
# IMPORTANT - we do not use `for f in layer.getFeatures(fr):` as we need
# to verify that existing attributes and geometry are correctly cleared
# from the feature when calling nextFeature()
it = layer.getFeatures(fr)
f = QgsFeature()
while it.nextFeature(f):
if first:
first = False
for field in f.fields():
fields.append(str(field.name()))
fieldTypes.append(str(field.typeName()))
fielddata = dict((name, str(f[name])) for name in fields)
g = f.geometry()
if not g.isNull():
fielddata[geomkey] = str(g.asWkt())
else:
fielddata[geomkey] = "None"
fielddata[fidkey] = f.id()
id = fielddata[fields[0]]
description = fielddata[fields[1]]
fielddata['id'] = id
fielddata['description'] = description
data[f.id() + offset] = fielddata
if 'id' not in fields:
fields.insert(0, 'id')
if 'description' not in fields:
fields.insert(1, 'description')
fields.append(fidkey)
fields.append(geomkey)
return fields, fieldTypes, data
def delimitedTextData(self, testname, filename, requests, verbose, **params):
# Retrieve the data for a delimited text url
# Create a layer for the specified file and query parameters
# and return the data for the layer (fields, data)
filepath = os.path.join(unitTestDataPath("delimitedtext"), filename)
url = MyUrl.fromLocalFile(filepath)
if not requests:
requests = [{}]
for k in list(params.keys()):
url.addQueryItem(k, params[k])
urlstr = url.toString()
log = []
with MessageLogger('DelimitedText') as logger:
if verbose:
print(testname)
layer = QgsVectorLayer(urlstr, 'test', 'delimitedtext')
uri = layer.dataProvider().dataSourceUri()
if verbose:
print(uri)
basename = os.path.basename(filepath)
if not basename.startswith('test'):
basename = 'file'
uri = re.sub(r'^file\:\/\/[^\?]*', 'file://' + basename, uri)
fields = []
fieldTypes = []
data = {}
if layer.isValid():
for nr, r in enumerate(requests):
if verbose:
print(("Processing request", nr + 1, repr(r)))
if isinstance(r, collections.Callable):
r(layer)
if verbose:
print("Request function executed")
if isinstance(r, collections.Callable):
continue
rfields, rtypes, rdata = self.layerData(layer, r, nr * 1000)
if len(rfields) > len(fields):
fields = rfields
fieldTypes = rtypes
data.update(rdata)
if not rdata:
log.append("Request " + str(nr) + " did not return any data")
if verbose:
print(("Request returned", len(list(rdata.keys())), "features"))
for msg in logger.messages():
filelogname = 'temp_file' if 'tmp' in filename.lower() else filename
msg = re.sub(r'file\s+.*' + re.escape(filename), 'file ' + filelogname, msg)
msg = msg.replace(filepath, filelogname)
log.append(msg)
return dict(fields=fields, fieldTypes=fieldTypes, data=data, log=log, uri=uri, geometryType=layer.geometryType())
def printWanted(self, testname, result):
# Routine to export the result as a function definition
print()
print(("def {0}():".format(testname)))
data = result['data']
log = result['log']
fields = result['fields']
prefix = ' '
# Dump the data for a layer - used to construct unit tests
print((prefix + "wanted={}"))
print((prefix + "wanted['uri']=" + repr(result['uri'])))
print((prefix + "wanted['fieldTypes']=" + repr(result['fieldTypes'])))
print((prefix + "wanted['geometryType']=" + repr(result['geometryType'])))
print((prefix + "wanted['data']={"))
for k in sorted(data.keys()):
row = data[k]
print((prefix + " {0}: {{".format(repr(k))))
for f in fields:
print((prefix + " " + repr(f) + ": " + repr(row[f]) + ","))
print((prefix + " },"))
print((prefix + " }"))
print((prefix + "wanted['log']=["))
for msg in log:
print((prefix + ' ' + repr(msg) + ','))
print((prefix + ' ]'))
print(' return wanted')
print('', flush=True)
def recordDifference(self, record1, record2):
# Compare a record defined as a dictionary
for k in list(record1.keys()):
if k not in record2:
return "Field {0} is missing".format(k)
r1k = record1[k]
r2k = record2[k]
if k == geomkey:
if not compareWkt(r1k, r2k):
return "Geometry differs: {0:.50} versus {1:.50}".format(r1k, r2k)
else:
if record1[k] != record2[k]:
return "Field {0} differs: {1:.50} versus {2:.50}".format(k, repr(r1k), repr(r2k))
for k in list(record2.keys()):
if k not in record1:
return "Output contains extra field {0}".format(k)
return ''
def runTest(self, file, requests, **params):
testname = inspect.stack()[1][3]
verbose = not rebuildTests
if verbose:
print(("Running test:", testname))
result = self.delimitedTextData(testname, file, requests, verbose, **params)
if rebuildTests:
self.printWanted(testname, result)
assert False, "Test not run - being rebuilt"
try:
wanted = eval('want.{0}()'.format(testname))
except:
self.printWanted(testname, result)
assert False, "Test results not available for {0}".format(testname)
data = result['data']
log = result['log']
failures = []
if normalize_query_items_order(result['uri']) != normalize_query_items_order(wanted['uri']):
msg = "Layer Uri ({0}) doesn't match expected ({1})".format(
normalize_query_items_order(result['uri']), normalize_query_items_order(wanted['uri']))
print((' ' + msg))
failures.append(msg)
if result['fieldTypes'] != wanted['fieldTypes']:
msg = "Layer field types ({0}) doesn't match expected ({1})".format(
result['fieldTypes'], wanted['fieldTypes'])
failures.append(msg)
if result['geometryType'] != wanted['geometryType']:
msg = "Layer geometry type ({0}) doesn't match expected ({1})".format(
result['geometryType'], wanted['geometryType'])
failures.append(msg)
wanted_data = wanted['data']
for id in sorted(wanted_data.keys()):
print('getting wanted data')
wrec = wanted_data[id]
print('getting received data')
trec = data.get(id, {})
print('getting description')
description = wrec['description']
print('getting difference')
difference = self.recordDifference(wrec, trec)
if not difference:
print((' {0}: Passed'.format(description)))
else:
print((' {0}: {1}'.format(description, difference)))
failures.append(description + ': ' + difference)
for id in sorted(data.keys()):
if id not in wanted_data:
msg = "Layer contains unexpected extra data with id: \"{0}\"".format(id)
print((' ' + msg))
failures.append(msg)
common = []
log_wanted = wanted['log']
for l in log:
if l in log_wanted:
common.append(l)
for l in log_wanted:
if l not in common:
msg = 'Missing log message: ' + l
print((' ' + msg))
failures.append(msg)
for l in log:
if l not in common:
msg = 'Extra log message: ' + l
print((' ' + msg))
failures.append(msg)
if len(log) == len(common) and len(log_wanted) == len(common):
print(' Message log correct: Passed')
if failures:
self.printWanted(testname, result)
assert len(failures) == 0, "\n".join(failures)
def test_001_provider_defined(self):
registry = QgsProviderRegistry.instance()
metadata = registry.providerMetadata('delimitedtext')
assert metadata is not None, "Delimited text provider is not installed"
def test_002_load_csv_file(self):
# CSV file parsing
filename = 'test.csv'
params = {'geomType': 'none', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_003_field_naming(self):
# Management of missing/duplicate/invalid field names
filename = 'testfields.csv'
params = {'geomType': 'none', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_004_max_fields(self):
# Limiting maximum number of fields
filename = 'testfields.csv'
params = {'geomType': 'none', 'maxFields': '7', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_005_load_whitespace(self):
# Whitespace file parsing
filename = 'test.space'
params = {'geomType': 'none', 'type': 'whitespace'}
requests = None
self.runTest(filename, requests, **params)
def test_006_quote_escape(self):
# Quote and escape file parsing
filename = 'test.pipe'
params = {'geomType': 'none', 'quote': '"', 'delimiter': '|', 'escape': '\\'}
requests = None
self.runTest(filename, requests, **params)
def test_007_multiple_quote(self):
# Multiple quote and escape characters
filename = 'test.quote'
params = {'geomType': 'none', 'quote': '\'"', 'type': 'csv', 'escape': '"\''}
requests = None
self.runTest(filename, requests, **params)
def test_008_badly_formed_quotes(self):
# Badly formed quoted fields
filename = 'test.badquote'
params = {'geomType': 'none', 'quote': '"', 'type': 'csv', 'escape': '"'}
requests = None
self.runTest(filename, requests, **params)
def test_009_skip_lines(self):
# Skip lines
filename = 'test2.csv'
params = {'geomType': 'none', 'useHeader': 'no', 'type': 'csv', 'skipLines': '2'}
requests = None
self.runTest(filename, requests, **params)
def test_010_read_coordinates(self):
# Skip lines
filename = 'testpt.csv'
params = {'yField': 'geom_y', 'xField': 'geom_x', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_011_read_wkt(self):
# Reading WKT geometry field
filename = 'testwkt.csv'
params = {'delimiter': '|', 'type': 'csv', 'wktField': 'geom_wkt'}
requests = None
self.runTest(filename, requests, **params)
def test_012_read_wkt_point(self):
# Read WKT points
filename = 'testwkt.csv'
params = {'geomType': 'point', 'delimiter': '|', 'type': 'csv', 'wktField': 'geom_wkt'}
requests = None
self.runTest(filename, requests, **params)
def test_013_read_wkt_line(self):
# Read WKT linestrings
filename = 'testwkt.csv'
params = {'geomType': 'line', 'delimiter': '|', 'type': 'csv', 'wktField': 'geom_wkt'}
requests = None
self.runTest(filename, requests, **params)
def test_014_read_wkt_polygon(self):
# Read WKT polygons
filename = 'testwkt.csv'
params = {'geomType': 'polygon', 'delimiter': '|', 'type': 'csv', 'wktField': 'geom_wkt'}
requests = None
self.runTest(filename, requests, **params)
def test_015_read_dms_xy(self):
# Reading degrees/minutes/seconds angles
filename = 'testdms.csv'
params = {'yField': 'lat', 'xField': 'lon', 'type': 'csv', 'xyDms': 'yes'}
requests = None
self.runTest(filename, requests, **params)
def test_016_decimal_point(self):
# Reading degrees/minutes/seconds angles
filename = 'testdp.csv'
params = {'yField': 'geom_y', 'xField': 'geom_x', 'type': 'csv', 'delimiter': ';', 'decimalPoint': ','}
requests = None
self.runTest(filename, requests, **params)
def test_017_regular_expression_1(self):
# Parsing regular expression delimiter
filename = 'testre.txt'
params = {'geomType': 'none', 'trimFields': 'Y', 'delimiter': 'RE(?:GEXP)?', 'type': 'regexp'}
requests = None
self.runTest(filename, requests, **params)
def test_018_regular_expression_2(self):
# Parsing regular expression delimiter with capture groups
filename = 'testre.txt'
params = {'geomType': 'none', 'trimFields': 'Y', 'delimiter': '(RE)(GEXP)?', 'type': 'regexp'}
requests = None
self.runTest(filename, requests, **params)
def test_019_regular_expression_3(self):
# Parsing anchored regular expression
filename = 'testre2.txt'
params = {'geomType': 'none', 'trimFields': 'Y', 'delimiter': '^(.{5})(.{30})(.{5,})', 'type': 'regexp'}
requests = None
self.runTest(filename, requests, **params)
def test_020_regular_expression_4(self):
# Parsing zero length re
filename = 'testre3.txt'
params = {'geomType': 'none', 'delimiter': 'x?', 'type': 'regexp'}
requests = None
self.runTest(filename, requests, **params)
def test_021_regular_expression_5(self):
# Parsing zero length re 2
filename = 'testre3.txt'
params = {'geomType': 'none', 'delimiter': '\\b', 'type': 'regexp'}
requests = None
self.runTest(filename, requests, **params)
def test_022_utf8_encoded_file(self):
# UTF8 encoded file test
filename = 'testutf8.csv'
params = {'geomType': 'none', 'delimiter': '|', 'type': 'csv', 'encoding': 'utf-8'}
requests = None
self.runTest(filename, requests, **params)
def test_023_latin1_encoded_file(self):
# Latin1 encoded file test
filename = 'testlatin1.csv'
params = {'geomType': 'none', 'delimiter': '|', 'type': 'csv', 'encoding': 'latin1'}
requests = None
self.runTest(filename, requests, **params)
def test_024_filter_rect_xy(self):
# Filter extents on XY layer
filename = 'testextpt.txt'
params = {'yField': 'y', 'delimiter': '|', 'type': 'csv', 'xField': 'x'}
requests = [
{'extents': [10, 30, 30, 50]},
{'extents': [10, 30, 30, 50], 'exact': 1},
{'extents': [110, 130, 130, 150]}]
self.runTest(filename, requests, **params)
def test_025_filter_rect_wkt(self):
# Filter extents on WKT layer
filename = 'testextw.txt'
params = {'delimiter': '|', 'type': 'csv', 'wktField': 'wkt'}
requests = [
{'extents': [10, 30, 30, 50]},
{'extents': [10, 30, 30, 50], 'exact': 1},
{'extents': [110, 130, 130, 150]}]
self.runTest(filename, requests, **params)
def test_026_filter_fid(self):
# Filter on feature id
filename = 'test.csv'
params = {'geomType': 'none', 'type': 'csv'}
requests = [
{'fid': 3},
{'fid': 9},
{'fid': 20},
{'fid': 3}]
self.runTest(filename, requests, **params)
def test_027_filter_attributes(self):
# Filter on attributes
filename = 'test.csv'
params = {'geomType': 'none', 'type': 'csv'}
requests = [
{'attributes': [1, 3]},
{'fid': 9},
{'attributes': [1, 3], 'fid': 9},
{'attributes': [3, 1], 'fid': 9},
{'attributes': [1, 3, 7], 'fid': 9},
{'attributes': [], 'fid': 9}]
self.runTest(filename, requests, **params)
def test_028_substring_test(self):
# CSV file parsing
filename = 'test.csv'
params = {'geomType': 'none', 'subset': 'id % 2 = 1', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_029_file_watcher(self):
# Testing file watcher
(filehandle, filename) = tempfile.mkstemp()
if os.name == "nt":
filename = filename.replace("\\", "/")
with os.fdopen(filehandle, "w") as f:
f.write("id,name\n1,rabbit\n2,pooh\n")
def appendfile(layer):
with open(filename, 'a') as f:
f.write('3,tiger\n')
# print "Appended to file - sleeping"
time.sleep(1)
QCoreApplication.instance().processEvents()
def rewritefile(layer):
with open(filename, 'w') as f:
f.write("name,size,id\ntoad,small,5\nmole,medium,6\nbadger,big,7\n")
# print "Rewritten file - sleeping"
time.sleep(1)
QCoreApplication.instance().processEvents()
def deletefile(layer):
try:
os.remove(filename)
except:
open(filename, "w").close()
assert os.path.getsize(filename) == 0, "removal and truncation of {} failed".format(filename)
# print "Deleted file - sleeping"
time.sleep(1)
QCoreApplication.instance().processEvents()
params = {'geomType': 'none', 'type': 'csv', 'watchFile': 'yes'}
requests = [
{'fid': 3},
{},
{'fid': 7},
appendfile,
{'fid': 3},
{'fid': 4},
{},
{'fid': 7},
rewritefile,
{'fid': 2},
{},
{'fid': 7},
deletefile,
{'fid': 2},
{},
rewritefile,
{'fid': 2},
]
self.runTest(filename, requests, **params)
def test_030_filter_rect_xy_spatial_index(self):
# Filter extents on XY layer with spatial index
filename = 'testextpt.txt'
params = {'yField': 'y', 'delimiter': '|', 'type': 'csv', 'xField': 'x', 'spatialIndex': 'Y'}
requests = [
{'extents': [10, 30, 30, 50]},
{'extents': [10, 30, 30, 50], 'exact': 1},
{'extents': [110, 130, 130, 150]},
{},
{'extents': [-1000, -1000, 1000, 1000]}
]
self.runTest(filename, requests, **params)
def test_031_filter_rect_wkt_spatial_index(self):
# Filter extents on WKT layer with spatial index
filename = 'testextw.txt'
params = {'delimiter': '|', 'type': 'csv', 'wktField': 'wkt', 'spatialIndex': 'Y'}
requests = [
{'extents': [10, 30, 30, 50]},
{'extents': [10, 30, 30, 50], 'exact': 1},
{'extents': [110, 130, 130, 150]},
{},
{'extents': [-1000, -1000, 1000, 1000]}
]
self.runTest(filename, requests, **params)
def test_032_filter_rect_wkt_create_spatial_index(self):
# Filter extents on WKT layer building spatial index
filename = 'testextw.txt'
params = {'delimiter': '|', 'type': 'csv', 'wktField': 'wkt'}
requests = [
{'extents': [10, 30, 30, 50]},
{},
lambda layer: layer.dataProvider().createSpatialIndex(),
{'extents': [10, 30, 30, 50]},
{'extents': [10, 30, 30, 50], 'exact': 1},
{'extents': [110, 130, 130, 150]},
{},
{'extents': [-1000, -1000, 1000, 1000]}
]
self.runTest(filename, requests, **params)
def test_033_reset_subset_string(self):
# CSV file parsing
filename = 'test.csv'
params = {'geomType': 'none', 'type': 'csv'}
requests = [
{},
lambda layer: layer.dataProvider().setSubsetString("id % 2 = 1", True),
{},
lambda layer: layer.dataProvider().setSubsetString("id = 6", False),
{},
lambda layer: layer.dataProvider().setSubsetString("id = 3", False),
{},
lambda layer: layer.dataProvider().setSubsetString("id % 2 = 1", True),
{},
lambda layer: layer.dataProvider().setSubsetString("id % 2 = 0", True),
{},
]
self.runTest(filename, requests, **params)
def test_034_csvt_file(self):
# CSVT field types
filename = 'testcsvt.csv'
params = {'geomType': 'none', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_035_csvt_file2(self):
# CSV field types 2
filename = 'testcsvt2.txt'
params = {'geomType': 'none', 'type': 'csv', 'delimiter': '|'}
requests = None
self.runTest(filename, requests, **params)
def test_036_csvt_file_invalid_types(self):
# CSV field types invalid string format
filename = 'testcsvt3.csv'
params = {'geomType': 'none', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_037_csvt_file_invalid_file(self):
# CSV field types invalid file
filename = 'testcsvt4.csv'
params = {'geomType': 'none', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_038_type_inference(self):
# Skip lines
filename = 'testtypes.csv'
params = {'yField': 'lat', 'xField': 'lon', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_039_issue_13749(self):
# First record contains missing geometry
filename = 'test13749.csv'
params = {'yField': 'geom_y', 'xField': 'geom_x', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_040_issue_14666(self):
# x/y containing some null geometries
filename = 'test14666.csv'
params = {'yField': 'y', 'xField': 'x', 'type': 'csv', 'delimiter': '\\t'}
requests = None
self.runTest(filename, requests, **params)
def test_041_no_detect_type(self):
# CSV file parsing
# Skip lines
filename = 'testtypes.csv'
params = {'yField': 'lat', 'xField': 'lon', 'type': 'csv', 'detectTypes': 'no'}
requests = None
self.runTest(filename, requests, **params)
def test_042_no_detect_types_csvt(self):
# CSVT field types
filename = 'testcsvt.csv'
params = {'geomType': 'none', 'type': 'csv', 'detectTypes': 'no'}
requests = None
self.runTest(filename, requests, **params)
def test_043_decodeuri(self):
# URI decoding
filename = '/home/to/path/test.csv'
uri = 'file://{}?geomType=none'.format(filename)
registry = QgsProviderRegistry.instance()
components = registry.decodeUri('delimitedtext', uri)
self.assertEqual(components['path'], filename)
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
| 1,800,512,634,462,732,800 | 533,006,811,357,915,260 | 36.292419 | 125 | 0.551339 | false |
alisaifee/djlimiter
|
djlimiter/middleware.py
|
1
|
5065
|
import importlib
import logging
from django.conf import settings
from django.core.urlresolvers import resolve
from limits.storage import storage_from_string
from limits.strategies import STRATEGIES
from limits.errors import ConfigurationError
from limits.util import parse_many
import six
from .decorators import DECORATED, EXEMPT
from .util import get_ipaddr, LimitWrapper, BlackHoleHandler
from .errors import RateLimitExceeded
class C:
ENABLED = "RATELIMIT_ENABLED"
HEADERS_ENABLED = "RATELIMIT_HEADERS_ENABLED"
STORAGE_URL = "RATELIMIT_STORAGE_URL"
STRATEGY = "RATELIMIT_STRATEGY"
GLOBAL_LIMITS = "RATELIMIT_GLOBAL"
HEADER_LIMIT = "RATELIMIT_HEADER_LIMIT"
HEADER_REMAINING = "RATELIMIT_HEADER_REMAINING"
HEADER_RESET = "RATELIMIT_HEADER_RESET"
DEFAULT_KEY_FUNCTION = "RATELIMIT_KEY_FUNCTION"
CALLBACK = "RATELIMIT_CALLBACK"
class HEADERS:
RESET = 1
REMAINING = 2
LIMIT = 3
class Limiter(object):
"""
"""
def __init__(self):
conf_limits = getattr(settings, C.GLOBAL_LIMITS, "")
callback = getattr(settings, C.CALLBACK, self.__raise_exceeded )
self.enabled = getattr(settings, C.ENABLED, True)
self.headers_enabled = getattr(settings, C.HEADERS_ENABLED, False)
self.strategy = getattr(settings, C.STRATEGY, 'fixed-window')
if self.strategy not in STRATEGIES:
raise ConfigurationError("Invalid rate limiting strategy %s" % self.strategy)
self.storage = storage_from_string(getattr(settings, C.STORAGE_URL, "memory://"))
self.limiter = STRATEGIES[self.strategy](self.storage)
self.key_function = getattr(settings, C.DEFAULT_KEY_FUNCTION, get_ipaddr)
self.global_limits = []
if conf_limits:
self.global_limits = [
LimitWrapper(
list(parse_many(conf_limits)), self.key_function, None, False
)
]
self.header_mapping = {
HEADERS.RESET : getattr(settings,C.HEADER_RESET, "X-RateLimit-Reset"),
HEADERS.REMAINING : getattr(settings,C.HEADER_REMAINING, "X-RateLimit-Remaining"),
HEADERS.LIMIT : getattr(settings,C.HEADER_LIMIT, "X-RateLimit-Limit"),
}
self.logger = logging.getLogger("djlimiter")
self.logger.addHandler(BlackHoleHandler())
if isinstance(callback, six.string_types):
mod, _, name = callback.rpartition(".")
try:
self.callback = getattr(importlib.import_module(mod), name)
except AttributeError:
self.logger.error(
"Unable to load callback function %s. Rate limiting disabled",
callback
)
self.enabled = False
else:
self.callback = callback
def __raise_exceeded(self, limit):
return RateLimitExceeded(limit)
def process_request(self, request):
"""
:param request:
:return:
"""
func = resolve(request.path).func
name = resolve(request.path).view_name if func else ""
limits = self.global_limits
if (
not self.enabled
or func in EXEMPT
or not name
):
return
if func in DECORATED:
if func in DECORATED:
limits = DECORATED[func]
limit_for_header = None
failed_limit = None
for lim in limits:
limit_scope = lim.get_scope(request) or name
cur_limits = lim.get_limits(request)
for cur_limit in cur_limits:
if not limit_for_header or cur_limit < limit_for_header[0]:
limit_for_header = (cur_limit, (lim.key_func or self.key_function)(request), limit_scope)
if lim.per_method:
limit_scope += ":%s" % request.method
if not self.limiter.hit(cur_limit, (lim.key_func or self.key_function)(request), limit_scope):
self.logger.info("Rate limit exceeded for %s (%s)", name, cur_limit)
failed_limit = cur_limit
limit_for_header = (cur_limit, (lim.key_func or self.key_function)(request), limit_scope)
break
if failed_limit:
break
request.view_rate_limit = limit_for_header
if failed_limit:
return self.callback(failed_limit)
def process_response(self, request, response):
"""
:param request:
:param response:
:return:
"""
current_limit = getattr(request, "view_rate_limit", None)
if self.headers_enabled and current_limit:
window_stats = self.limiter.get_window_stats(*current_limit)
response[self.header_mapping[HEADERS.LIMIT]] = str(current_limit[0].amount)
response[self.header_mapping[HEADERS.REMAINING]] = window_stats[1]
response[self.header_mapping[HEADERS.RESET]] = window_stats[0]
return response
|
mit
| -5,752,953,222,751,799,000 | -4,002,584,867,077,026,300 | 35.438849 | 110 | 0.604738 | false |
vshtanko/scikit-learn
|
sklearn/utils/sparsefuncs.py
|
220
|
11424
|
# Authors: Manoj Kumar
# Thomas Unterthiner
# License: BSD 3 clause
import scipy.sparse as sp
import numpy as np
from .fixes import sparse_min_max, bincount
from .sparsefuncs_fast import csr_mean_variance_axis0 as _csr_mean_var_axis0
from .sparsefuncs_fast import csc_mean_variance_axis0 as _csc_mean_var_axis0
def _raise_typeerror(X):
"""Raises a TypeError if X is not a CSR or CSC matrix"""
input_type = X.format if sp.issparse(X) else type(X)
err = "Expected a CSR or CSC sparse matrix, got %s." % input_type
raise TypeError(err)
def inplace_csr_column_scale(X, scale):
"""Inplace column scaling of a CSR matrix.
Scale each feature of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : CSR matrix with shape (n_samples, n_features)
Matrix to normalize using the variance of the features.
scale : float array with shape (n_features,)
Array of precomputed feature-wise values to use for scaling.
"""
assert scale.shape[0] == X.shape[1]
X.data *= scale.take(X.indices, mode='clip')
def inplace_csr_row_scale(X, scale):
""" Inplace row scaling of a CSR matrix.
Scale each sample of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : CSR sparse matrix, shape (n_samples, n_features)
Matrix to be scaled.
scale : float array with shape (n_samples,)
Array of precomputed sample-wise values to use for scaling.
"""
assert scale.shape[0] == X.shape[0]
X.data *= np.repeat(scale, np.diff(X.indptr))
def mean_variance_axis(X, axis):
"""Compute mean and variance along axis 0 on a CSR or CSC matrix
Parameters
----------
X: CSR or CSC sparse matrix, shape (n_samples, n_features)
Input data.
axis: int (either 0 or 1)
Axis along which the axis should be computed.
Returns
-------
means: float array with shape (n_features,)
Feature-wise means
variances: float array with shape (n_features,)
Feature-wise variances
"""
if axis not in (0, 1):
raise ValueError(
"Unknown axis value: %d. Use 0 for rows, or 1 for columns" % axis)
if isinstance(X, sp.csr_matrix):
if axis == 0:
return _csr_mean_var_axis0(X)
else:
return _csc_mean_var_axis0(X.T)
elif isinstance(X, sp.csc_matrix):
if axis == 0:
return _csc_mean_var_axis0(X)
else:
return _csr_mean_var_axis0(X.T)
else:
_raise_typeerror(X)
def inplace_column_scale(X, scale):
"""Inplace column scaling of a CSC/CSR matrix.
Scale each feature of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X: CSC or CSR matrix with shape (n_samples, n_features)
Matrix to normalize using the variance of the features.
scale: float array with shape (n_features,)
Array of precomputed feature-wise values to use for scaling.
"""
if isinstance(X, sp.csc_matrix):
inplace_csr_row_scale(X.T, scale)
elif isinstance(X, sp.csr_matrix):
inplace_csr_column_scale(X, scale)
else:
_raise_typeerror(X)
def inplace_row_scale(X, scale):
""" Inplace row scaling of a CSR or CSC matrix.
Scale each row of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : CSR or CSC sparse matrix, shape (n_samples, n_features)
Matrix to be scaled.
scale : float array with shape (n_features,)
Array of precomputed sample-wise values to use for scaling.
"""
if isinstance(X, sp.csc_matrix):
inplace_csr_column_scale(X.T, scale)
elif isinstance(X, sp.csr_matrix):
inplace_csr_row_scale(X, scale)
else:
_raise_typeerror(X)
def inplace_swap_row_csc(X, m, n):
"""
Swaps two rows of a CSC matrix in-place.
Parameters
----------
X: scipy.sparse.csc_matrix, shape=(n_samples, n_features)
Matrix whose two rows are to be swapped.
m: int
Index of the row of X to be swapped.
n: int
Index of the row of X to be swapped.
"""
for t in [m, n]:
if isinstance(t, np.ndarray):
raise TypeError("m and n should be valid integers")
if m < 0:
m += X.shape[0]
if n < 0:
n += X.shape[0]
m_mask = X.indices == m
X.indices[X.indices == n] = m
X.indices[m_mask] = n
def inplace_swap_row_csr(X, m, n):
"""
Swaps two rows of a CSR matrix in-place.
Parameters
----------
X: scipy.sparse.csr_matrix, shape=(n_samples, n_features)
Matrix whose two rows are to be swapped.
m: int
Index of the row of X to be swapped.
n: int
Index of the row of X to be swapped.
"""
for t in [m, n]:
if isinstance(t, np.ndarray):
raise TypeError("m and n should be valid integers")
if m < 0:
m += X.shape[0]
if n < 0:
n += X.shape[0]
# The following swapping makes life easier since m is assumed to be the
# smaller integer below.
if m > n:
m, n = n, m
indptr = X.indptr
m_start = indptr[m]
m_stop = indptr[m + 1]
n_start = indptr[n]
n_stop = indptr[n + 1]
nz_m = m_stop - m_start
nz_n = n_stop - n_start
if nz_m != nz_n:
# Modify indptr first
X.indptr[m + 2:n] += nz_n - nz_m
X.indptr[m + 1] = m_start + nz_n
X.indptr[n] = n_stop - nz_m
X.indices = np.concatenate([X.indices[:m_start],
X.indices[n_start:n_stop],
X.indices[m_stop:n_start],
X.indices[m_start:m_stop],
X.indices[n_stop:]])
X.data = np.concatenate([X.data[:m_start],
X.data[n_start:n_stop],
X.data[m_stop:n_start],
X.data[m_start:m_stop],
X.data[n_stop:]])
def inplace_swap_row(X, m, n):
"""
Swaps two rows of a CSC/CSR matrix in-place.
Parameters
----------
X : CSR or CSC sparse matrix, shape=(n_samples, n_features)
Matrix whose two rows are to be swapped.
m: int
Index of the row of X to be swapped.
n: int
Index of the row of X to be swapped.
"""
if isinstance(X, sp.csc_matrix):
return inplace_swap_row_csc(X, m, n)
elif isinstance(X, sp.csr_matrix):
return inplace_swap_row_csr(X, m, n)
else:
_raise_typeerror(X)
def inplace_swap_column(X, m, n):
"""
Swaps two columns of a CSC/CSR matrix in-place.
Parameters
----------
X : CSR or CSC sparse matrix, shape=(n_samples, n_features)
Matrix whose two columns are to be swapped.
m: int
Index of the column of X to be swapped.
n : int
Index of the column of X to be swapped.
"""
if m < 0:
m += X.shape[1]
if n < 0:
n += X.shape[1]
if isinstance(X, sp.csc_matrix):
return inplace_swap_row_csr(X, m, n)
elif isinstance(X, sp.csr_matrix):
return inplace_swap_row_csc(X, m, n)
else:
_raise_typeerror(X)
def min_max_axis(X, axis):
"""Compute minimum and maximum along an axis on a CSR or CSC matrix
Parameters
----------
X : CSR or CSC sparse matrix, shape (n_samples, n_features)
Input data.
axis: int (either 0 or 1)
Axis along which the axis should be computed.
Returns
-------
mins: float array with shape (n_features,)
Feature-wise minima
maxs: float array with shape (n_features,)
Feature-wise maxima
"""
if isinstance(X, sp.csr_matrix) or isinstance(X, sp.csc_matrix):
return sparse_min_max(X, axis=axis)
else:
_raise_typeerror(X)
def count_nonzero(X, axis=None, sample_weight=None):
"""A variant of X.getnnz() with extension to weighting on axis 0
Useful in efficiently calculating multilabel metrics.
Parameters
----------
X : CSR sparse matrix, shape = (n_samples, n_labels)
Input data.
axis : None, 0 or 1
The axis on which the data is aggregated.
sample_weight : array, shape = (n_samples,), optional
Weight for each row of X.
"""
if axis == -1:
axis = 1
elif axis == -2:
axis = 0
elif X.format != 'csr':
raise TypeError('Expected CSR sparse format, got {0}'.format(X.format))
# We rely here on the fact that np.diff(Y.indptr) for a CSR
# will return the number of nonzero entries in each row.
# A bincount over Y.indices will return the number of nonzeros
# in each column. See ``csr_matrix.getnnz`` in scipy >= 0.14.
if axis is None:
if sample_weight is None:
return X.nnz
else:
return np.dot(np.diff(X.indptr), sample_weight)
elif axis == 1:
out = np.diff(X.indptr)
if sample_weight is None:
return out
return out * sample_weight
elif axis == 0:
if sample_weight is None:
return bincount(X.indices, minlength=X.shape[1])
else:
weights = np.repeat(sample_weight, np.diff(X.indptr))
return bincount(X.indices, minlength=X.shape[1],
weights=weights)
else:
raise ValueError('Unsupported axis: {0}'.format(axis))
def _get_median(data, n_zeros):
"""Compute the median of data with n_zeros additional zeros.
This function is used to support sparse matrices; it modifies data in-place
"""
n_elems = len(data) + n_zeros
if not n_elems:
return np.nan
n_negative = np.count_nonzero(data < 0)
middle, is_odd = divmod(n_elems, 2)
data.sort()
if is_odd:
return _get_elem_at_rank(middle, data, n_negative, n_zeros)
return (_get_elem_at_rank(middle - 1, data, n_negative, n_zeros) +
_get_elem_at_rank(middle, data, n_negative, n_zeros)) / 2.
def _get_elem_at_rank(rank, data, n_negative, n_zeros):
"""Find the value in data augmented with n_zeros for the given rank"""
if rank < n_negative:
return data[rank]
if rank - n_negative < n_zeros:
return 0
return data[rank - n_zeros]
def csc_median_axis_0(X):
"""Find the median across axis 0 of a CSC matrix.
It is equivalent to doing np.median(X, axis=0).
Parameters
----------
X : CSC sparse matrix, shape (n_samples, n_features)
Input data.
Returns
-------
median : ndarray, shape (n_features,)
Median.
"""
if not isinstance(X, sp.csc_matrix):
raise TypeError("Expected matrix of CSC format, got %s" % X.format)
indptr = X.indptr
n_samples, n_features = X.shape
median = np.zeros(n_features)
for f_ind, (start, end) in enumerate(zip(indptr[:-1], indptr[1:])):
# Prevent modifying X in place
data = np.copy(X.data[start: end])
nz = n_samples - data.size
median[f_ind] = _get_median(data, nz)
return median
|
bsd-3-clause
| -5,110,316,294,787,414,000 | -3,469,138,051,129,258,000 | 27.277228 | 79 | 0.587798 | false |
a40223217/2015cdb_g6team3
|
static/Brython3.1.1-20150328-091302/Lib/importlib/__init__.py
|
610
|
3472
|
"""A pure Python implementation of import."""
__all__ = ['__import__', 'import_module', 'invalidate_caches']
# Bootstrap help #####################################################
# Until bootstrapping is complete, DO NOT import any modules that attempt
# to import importlib._bootstrap (directly or indirectly). Since this
# partially initialised package would be present in sys.modules, those
# modules would get an uninitialised copy of the source version, instead
# of a fully initialised version (either the frozen one or the one
# initialised below if the frozen one is not available).
import _imp # Just the builtin component, NOT the full Python module
import sys
from . import machinery #fix me brython
try:
import _frozen_importlib as _bootstrap
except ImportError:
from . import _bootstrap
_bootstrap._setup(sys, _imp)
else:
# importlib._bootstrap is the built-in import, ensure we don't create
# a second copy of the module.
_bootstrap.__name__ = 'importlib._bootstrap'
_bootstrap.__package__ = 'importlib'
_bootstrap.__file__ = __file__.replace('__init__.py', '_bootstrap.py')
sys.modules['importlib._bootstrap'] = _bootstrap
# To simplify imports in test code
_w_long = _bootstrap._w_long
_r_long = _bootstrap._r_long
# Fully bootstrapped at this point, import whatever you like, circular
# dependencies and startup overhead minimisation permitting :)
# Public API #########################################################
from ._bootstrap import __import__
def invalidate_caches():
"""Call the invalidate_caches() method on all meta path finders stored in
sys.meta_path (where implemented)."""
for finder in sys.meta_path:
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
def find_loader(name, path=None):
"""Find the loader for the specified module.
First, sys.modules is checked to see if the module was already imported. If
so, then sys.modules[name].__loader__ is returned. If that happens to be
set to None, then ValueError is raised. If the module is not in
sys.modules, then sys.meta_path is searched for a suitable loader with the
value of 'path' given to the finders. None is returned if no loader could
be found.
Dotted names do not have their parent packages implicitly imported. You will
most likely need to explicitly import all parent packages in the proper
order for a submodule to get the correct loader.
"""
try:
loader = sys.modules[name].__loader__
if loader is None:
raise ValueError('{}.__loader__ is None'.format(name))
else:
return loader
except KeyError:
pass
return _bootstrap._find_module(name, path)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
level = 0
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
for character in name:
if character != '.':
break
level += 1
return _bootstrap._gcd_import(name[level:], package, level)
#need at least one import hook for importlib stuff to work.
import basehook
sys.meta_path.append(basehook.BaseHook())
|
gpl-3.0
| -5,447,187,424,159,281,000 | 5,005,459,655,737,775,000 | 34.793814 | 80 | 0.666187 | false |
PROGRAM-IX/pystroke
|
event_engine.py
|
1
|
1542
|
import pygame
from pygame.locals import *
class EventEngine:
"""
Reads the event queue and passes events to other engines
@author: James Heslin (PROGRAM_IX)
"""
def __init__(self, i_e):
"""
Takes an InputEngine and passes all relevant events to it
@type i_e: InputEngine
@param i_e: InputEngine to which input events should be passed
@author: James Heslin (PROGRAM_IX)
"""
self.input = i_e
def update(self):
"""
Pulls all relevant events from the event queue and passes
them to the appropriate engines
@author: James Heslin (PROGRAM_IX)
"""
for e in pygame.event.get():
if e.type == MOUSEMOTION:
self.input.mouse_motion(e)
elif e.type == MOUSEBUTTONDOWN:
self.input.mouse_b_down(e)
elif e.type == MOUSEBUTTONUP:
self.input.mouse_b_up(e)
elif e.type == KEYDOWN:
self.input.key_down(e)
elif e.type == KEYUP:
self.input.key_up(e)
def reset_input(self):
"""
Resets the InputEngine's values
@author: James Heslin (PROGRAM_IX)
"""
print "RESETTING INPUT"
self.input.reset()
def print_input_states(self):
"""
Prints the states of the InputEngine
@author: James Heslin (PROGRAM_IX)
"""
self.input.print_all_states()
|
mit
| 2,315,450,006,468,467,000 | 1,506,350,103,343,687,700 | 26.553571 | 70 | 0.527237 | false |
yamaneko1212/webpay-python
|
webpay/model/charge.py
|
1
|
1090
|
from webpay.model.card import Card
from .model import Model
class Charge(Model):
def __init__(self, client, data):
Model.__init__(self, client, data)
def _instantiate_field(self, key, value):
if key == 'card':
return Card(self._client, value)
else:
return Model._instantiate_field(self, key, value)
def refund(self, amount=None):
"""Refund this charge.
Arguments:
- `amount`: amount to refund.
If `amount` is not given or `None`, refund all.
If `amount` is less than this charge's amount, refund partially.
"""
self._update_attributes(self._client.charges.refund(self.id, amount))
def capture(self, amount=None):
"""Capture this charge.
This charge should be uncaptured (created with capture=false) and not
yet expired.
Arguments:
- `amount`: amount to capture.
If `amount` is not given or `None`, use `this.amount`.
"""
self._update_attributes(self._client.charges.capture(self.id, amount))
|
mit
| -7,352,203,314,320,268,000 | 7,849,607,977,285,629,000 | 30.142857 | 78 | 0.6 | false |
andyzsf/edx
|
cms/djangoapps/contentstore/views/tests/test_import_export.py
|
25
|
11737
|
"""
Unit tests for course import and export
"""
import copy
import json
import logging
import os
import shutil
import tarfile
import tempfile
from path import path
from uuid import uuid4
from django.test.utils import override_settings
from django.conf import settings
from contentstore.utils import reverse_course_url
from xmodule.modulestore.tests.factories import ItemFactory
from contentstore.tests.utils import CourseTestCase
from student import auth
from student.roles import CourseInstructorRole, CourseStaffRole
TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE)
TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex
log = logging.getLogger(__name__)
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
class ImportTestCase(CourseTestCase):
"""
Unit tests for importing a course
"""
def setUp(self):
super(ImportTestCase, self).setUp()
self.url = reverse_course_url('import_handler', self.course.id)
self.content_dir = path(tempfile.mkdtemp())
def touch(name):
""" Equivalent to shell's 'touch'"""
with file(name, 'a'):
os.utime(name, None)
# Create tar test files -----------------------------------------------
# OK course:
good_dir = tempfile.mkdtemp(dir=self.content_dir)
# test course being deeper down than top of tar file
embedded_dir = os.path.join(good_dir, "grandparent", "parent")
os.makedirs(os.path.join(embedded_dir, "course"))
with open(os.path.join(embedded_dir, "course.xml"), "w+") as f:
f.write('<course url_name="2013_Spring" org="EDx" course="0.00x"/>')
with open(os.path.join(embedded_dir, "course", "2013_Spring.xml"), "w+") as f:
f.write('<course></course>')
self.good_tar = os.path.join(self.content_dir, "good.tar.gz")
with tarfile.open(self.good_tar, "w:gz") as gtar:
gtar.add(good_dir)
# Bad course (no 'course.xml' file):
bad_dir = tempfile.mkdtemp(dir=self.content_dir)
touch(os.path.join(bad_dir, "bad.xml"))
self.bad_tar = os.path.join(self.content_dir, "bad.tar.gz")
with tarfile.open(self.bad_tar, "w:gz") as btar:
btar.add(bad_dir)
self.unsafe_common_dir = path(tempfile.mkdtemp(dir=self.content_dir))
def tearDown(self):
shutil.rmtree(self.content_dir)
def test_no_coursexml(self):
"""
Check that the response for a tar.gz import without a course.xml is
correct.
"""
with open(self.bad_tar) as btar:
resp = self.client.post(
self.url,
{
"name": self.bad_tar,
"course-data": [btar]
})
self.assertEquals(resp.status_code, 415)
# Check that `import_status` returns the appropriate stage (i.e., the
# stage at which import failed).
resp_status = self.client.get(
reverse_course_url(
'import_status_handler',
self.course.id,
kwargs={'filename': os.path.split(self.bad_tar)[1]}
)
)
self.assertEquals(json.loads(resp_status.content)["ImportStatus"], -2)
def test_with_coursexml(self):
"""
Check that the response for a tar.gz import with a course.xml is
correct.
"""
with open(self.good_tar) as gtar:
args = {"name": self.good_tar, "course-data": [gtar]}
resp = self.client.post(self.url, args)
self.assertEquals(resp.status_code, 200)
def test_import_in_existing_course(self):
"""
Check that course is imported successfully in existing course and users have their access roles
"""
# Create a non_staff user and add it to course staff only
__, nonstaff_user = self.create_non_staff_authed_user_client(authenticate=False)
auth.add_users(self.user, CourseStaffRole(self.course.id), nonstaff_user)
course = self.store.get_course(self.course.id)
self.assertIsNotNone(course)
display_name_before_import = course.display_name
# Check that global staff user can import course
with open(self.good_tar) as gtar:
args = {"name": self.good_tar, "course-data": [gtar]}
resp = self.client.post(self.url, args)
self.assertEquals(resp.status_code, 200)
course = self.store.get_course(self.course.id)
self.assertIsNotNone(course)
display_name_after_import = course.display_name
# Check that course display name have changed after import
self.assertNotEqual(display_name_before_import, display_name_after_import)
# Now check that non_staff user has his same role
self.assertFalse(CourseInstructorRole(self.course.id).has_user(nonstaff_user))
self.assertTrue(CourseStaffRole(self.course.id).has_user(nonstaff_user))
# Now course staff user can also successfully import course
self.client.login(username=nonstaff_user.username, password='foo')
with open(self.good_tar) as gtar:
args = {"name": self.good_tar, "course-data": [gtar]}
resp = self.client.post(self.url, args)
self.assertEquals(resp.status_code, 200)
# Now check that non_staff user has his same role
self.assertFalse(CourseInstructorRole(self.course.id).has_user(nonstaff_user))
self.assertTrue(CourseStaffRole(self.course.id).has_user(nonstaff_user))
## Unsafe tar methods #####################################################
# Each of these methods creates a tarfile with a single type of unsafe
# content.
def _fifo_tar(self):
"""
Tar file with FIFO
"""
fifop = self.unsafe_common_dir / "fifo.file"
fifo_tar = self.unsafe_common_dir / "fifo.tar.gz"
os.mkfifo(fifop)
with tarfile.open(fifo_tar, "w:gz") as tar:
tar.add(fifop)
return fifo_tar
def _symlink_tar(self):
"""
Tarfile with symlink to path outside directory.
"""
outsidep = self.unsafe_common_dir / "unsafe_file.txt"
symlinkp = self.unsafe_common_dir / "symlink.txt"
symlink_tar = self.unsafe_common_dir / "symlink.tar.gz"
outsidep.symlink(symlinkp)
with tarfile.open(symlink_tar, "w:gz") as tar:
tar.add(symlinkp)
return symlink_tar
def _outside_tar(self):
"""
Tarfile with file that extracts to outside directory.
Extracting this tarfile in directory <dir> will put its contents
directly in <dir> (rather than <dir/tarname>).
"""
outside_tar = self.unsafe_common_dir / "unsafe_file.tar.gz"
with tarfile.open(outside_tar, "w:gz") as tar:
tar.addfile(tarfile.TarInfo(str(self.content_dir / "a_file")))
return outside_tar
def _outside_tar2(self):
"""
Tarfile with file that extracts to outside directory.
The path here matches the basename (`self.unsafe_common_dir`), but
then "cd's out". E.g. "/usr/../etc" == "/etc", but the naive basename
of the first (but not the second) is "/usr"
Extracting this tarfile in directory <dir> will also put its contents
directly in <dir> (rather than <dir/tarname>).
"""
outside_tar = self.unsafe_common_dir / "unsafe_file.tar.gz"
with tarfile.open(outside_tar, "w:gz") as tar:
tar.addfile(tarfile.TarInfo(str(self.unsafe_common_dir / "../a_file")))
return outside_tar
def test_unsafe_tar(self):
"""
Check that safety measure work.
This includes:
'tarbombs' which include files or symlinks with paths
outside or directly in the working directory,
'special files' (character device, block device or FIFOs),
all raise exceptions/400s.
"""
def try_tar(tarpath):
""" Attempt to tar an unacceptable file """
with open(tarpath) as tar:
args = {"name": tarpath, "course-data": [tar]}
resp = self.client.post(self.url, args)
self.assertEquals(resp.status_code, 400)
self.assertTrue("SuspiciousFileOperation" in resp.content)
try_tar(self._fifo_tar())
try_tar(self._symlink_tar())
try_tar(self._outside_tar())
try_tar(self._outside_tar2())
# Check that `import_status` returns the appropriate stage (i.e.,
# either 3, indicating all previous steps are completed, or 0,
# indicating no upload in progress)
resp_status = self.client.get(
reverse_course_url(
'import_status_handler',
self.course.id,
kwargs={'filename': os.path.split(self.good_tar)[1]}
)
)
import_status = json.loads(resp_status.content)["ImportStatus"]
self.assertIn(import_status, (0, 3))
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
class ExportTestCase(CourseTestCase):
"""
Tests for export_handler.
"""
def setUp(self):
"""
Sets up the test course.
"""
super(ExportTestCase, self).setUp()
self.url = reverse_course_url('export_handler', self.course.id)
def test_export_html(self):
"""
Get the HTML for the page.
"""
resp = self.client.get_html(self.url)
self.assertEquals(resp.status_code, 200)
self.assertContains(resp, "Export My Course Content")
def test_export_json_unsupported(self):
"""
JSON is unsupported.
"""
resp = self.client.get(self.url, HTTP_ACCEPT='application/json')
self.assertEquals(resp.status_code, 406)
def test_export_targz(self):
"""
Get tar.gz file, using HTTP_ACCEPT.
"""
resp = self.client.get(self.url, HTTP_ACCEPT='application/x-tgz')
self._verify_export_succeeded(resp)
def test_export_targz_urlparam(self):
"""
Get tar.gz file, using URL parameter.
"""
resp = self.client.get(self.url + '?_accept=application/x-tgz')
self._verify_export_succeeded(resp)
def _verify_export_succeeded(self, resp):
""" Export success helper method. """
self.assertEquals(resp.status_code, 200)
self.assertTrue(resp.get('Content-Disposition').startswith('attachment'))
def test_export_failure_top_level(self):
"""
Export failure.
"""
fake_xblock = ItemFactory.create(parent_location=self.course.location, category='aawefawef')
self.store.publish(fake_xblock.location, self.user.id)
self._verify_export_failure(u'/container/{}'.format(self.course.location))
def test_export_failure_subsection_level(self):
"""
Slightly different export failure.
"""
vertical = ItemFactory.create(parent_location=self.course.location, category='vertical', display_name='foo')
ItemFactory.create(
parent_location=vertical.location,
category='aawefawef'
)
self._verify_export_failure(u'/container/{}'.format(vertical.location))
def _verify_export_failure(self, expected_text):
""" Export failure helper method. """
resp = self.client.get(self.url, HTTP_ACCEPT='application/x-tgz')
self.assertEquals(resp.status_code, 200)
self.assertIsNone(resp.get('Content-Disposition'))
self.assertContains(resp, 'Unable to create xml for module')
self.assertContains(resp, expected_text)
|
agpl-3.0
| -6,257,961,717,680,265,000 | 1,180,008,607,104,630,000 | 36.025237 | 116 | 0.6114 | false |
broesamle/servo
|
tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/__init__.py
|
552
|
8263
|
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebSocket extension for Apache HTTP Server.
mod_pywebsocket is a WebSocket extension for Apache HTTP Server
intended for testing or experimental purposes. mod_python is required.
Installation
============
0. Prepare an Apache HTTP Server for which mod_python is enabled.
1. Specify the following Apache HTTP Server directives to suit your
configuration.
If mod_pywebsocket is not in the Python path, specify the following.
<websock_lib> is the directory where mod_pywebsocket is installed.
PythonPath "sys.path+['<websock_lib>']"
Always specify the following. <websock_handlers> is the directory where
user-written WebSocket handlers are placed.
PythonOption mod_pywebsocket.handler_root <websock_handlers>
PythonHeaderParserHandler mod_pywebsocket.headerparserhandler
To limit the search for WebSocket handlers to a directory <scan_dir>
under <websock_handlers>, configure as follows:
PythonOption mod_pywebsocket.handler_scan <scan_dir>
<scan_dir> is useful in saving scan time when <websock_handlers>
contains many non-WebSocket handler files.
If you want to allow handlers whose canonical path is not under the root
directory (i.e. symbolic link is in root directory but its target is not),
configure as follows:
PythonOption mod_pywebsocket.allow_handlers_outside_root_dir On
Example snippet of httpd.conf:
(mod_pywebsocket is in /websock_lib, WebSocket handlers are in
/websock_handlers, port is 80 for ws, 443 for wss.)
<IfModule python_module>
PythonPath "sys.path+['/websock_lib']"
PythonOption mod_pywebsocket.handler_root /websock_handlers
PythonHeaderParserHandler mod_pywebsocket.headerparserhandler
</IfModule>
2. Tune Apache parameters for serving WebSocket. We'd like to note that at
least TimeOut directive from core features and RequestReadTimeout
directive from mod_reqtimeout should be modified not to kill connections
in only a few seconds of idle time.
3. Verify installation. You can use example/console.html to poke the server.
Writing WebSocket handlers
==========================
When a WebSocket request comes in, the resource name
specified in the handshake is considered as if it is a file path under
<websock_handlers> and the handler defined in
<websock_handlers>/<resource_name>_wsh.py is invoked.
For example, if the resource name is /example/chat, the handler defined in
<websock_handlers>/example/chat_wsh.py is invoked.
A WebSocket handler is composed of the following three functions:
web_socket_do_extra_handshake(request)
web_socket_transfer_data(request)
web_socket_passive_closing_handshake(request)
where:
request: mod_python request.
web_socket_do_extra_handshake is called during the handshake after the
headers are successfully parsed and WebSocket properties (ws_location,
ws_origin, and ws_resource) are added to request. A handler
can reject the request by raising an exception.
A request object has the following properties that you can use during the
extra handshake (web_socket_do_extra_handshake):
- ws_resource
- ws_origin
- ws_version
- ws_location (HyBi 00 only)
- ws_extensions (HyBi 06 and later)
- ws_deflate (HyBi 06 and later)
- ws_protocol
- ws_requested_protocols (HyBi 06 and later)
The last two are a bit tricky. See the next subsection.
Subprotocol Negotiation
-----------------------
For HyBi 06 and later, ws_protocol is always set to None when
web_socket_do_extra_handshake is called. If ws_requested_protocols is not
None, you must choose one subprotocol from this list and set it to
ws_protocol.
For HyBi 00, when web_socket_do_extra_handshake is called,
ws_protocol is set to the value given by the client in
Sec-WebSocket-Protocol header or None if
such header was not found in the opening handshake request. Finish extra
handshake with ws_protocol untouched to accept the request subprotocol.
Then, Sec-WebSocket-Protocol header will be sent to
the client in response with the same value as requested. Raise an exception
in web_socket_do_extra_handshake to reject the requested subprotocol.
Data Transfer
-------------
web_socket_transfer_data is called after the handshake completed
successfully. A handler can receive/send messages from/to the client
using request. mod_pywebsocket.msgutil module provides utilities
for data transfer.
You can receive a message by the following statement.
message = request.ws_stream.receive_message()
This call blocks until any complete text frame arrives, and the payload data
of the incoming frame will be stored into message. When you're using IETF
HyBi 00 or later protocol, receive_message() will return None on receiving
client-initiated closing handshake. When any error occurs, receive_message()
will raise some exception.
You can send a message by the following statement.
request.ws_stream.send_message(message)
Closing Connection
------------------
Executing the following statement or just return-ing from
web_socket_transfer_data cause connection close.
request.ws_stream.close_connection()
close_connection will wait
for closing handshake acknowledgement coming from the client. When it
couldn't receive a valid acknowledgement, raises an exception.
web_socket_passive_closing_handshake is called after the server receives
incoming closing frame from the client peer immediately. You can specify
code and reason by return values. They are sent as a outgoing closing frame
from the server. A request object has the following properties that you can
use in web_socket_passive_closing_handshake.
- ws_close_code
- ws_close_reason
Threading
---------
A WebSocket handler must be thread-safe if the server (Apache or
standalone.py) is configured to use threads.
Configuring WebSocket Extension Processors
------------------------------------------
See extensions.py for supported WebSocket extensions. Note that they are
unstable and their APIs are subject to change substantially.
A request object has these extension processing related attributes.
- ws_requested_extensions:
A list of common.ExtensionParameter instances representing extension
parameters received from the client in the client's opening handshake.
You shouldn't modify it manually.
- ws_extensions:
A list of common.ExtensionParameter instances representing extension
parameters to send back to the client in the server's opening handshake.
You shouldn't touch it directly. Instead, call methods on extension
processors.
- ws_extension_processors:
A list of loaded extension processors. Find the processor for the
extension you want to configure from it, and call its methods.
"""
# vi:sts=4 sw=4 et tw=72
|
mpl-2.0
| -8,401,460,587,558,554,000 | -5,692,365,398,325,840,000 | 35.888393 | 77 | 0.768607 | false |
damdam-s/OpenUpgrade
|
addons/project_issue/migrations/8.0.1.0/pre-migration.py
|
14
|
1454
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Odoo, a suite of business apps
# This module Copyright (C) 2014 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.openupgrade import openupgrade
column_renames = {'project_issue': [('priority', None)]}
xmlid_renames = [
('project_issue.mt_issue_closed', 'project_issue.mt_issue_ready'),
('project_issue.mt_issue_started', 'project_issue.mt_issue_assigned'),
('project_issue.mt_project_issue_started',
'project_issue.mt_project_issue_assigned'),
]
@openupgrade.migrate()
def migrate(cr, version):
openupgrade.rename_columns(cr, column_renames)
openupgrade.rename_xmlids(cr, xmlid_renames)
|
agpl-3.0
| 6,125,115,420,349,517,000 | -8,895,070,113,117,324,000 | 38.297297 | 78 | 0.65337 | false |
benwolfe/esp8266-Arduino
|
esp8266com/esp8266/tools/macosx/xtensa-lx106-elf/xtensa-lx106-elf/sysroot/lib/libstdc++.a-gdb.py
|
6
|
2433
|
# -*- python -*-
# Copyright (C) 2009-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/Users/igrokhotkov/e/ESPTools/crosstool-NG/builds/xtensa-lx106-elf/share/gcc-4.8.2/python'
libdir = '/Users/igrokhotkov/e/ESPTools/crosstool-NG/builds/xtensa-lx106-elf/xtensa-lx106-elf/lib'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Load the pretty-printers.
from libstdcxx.v6.printers import register_libstdcxx_printers
register_libstdcxx_printers (gdb.current_objfile ())
|
lgpl-2.1
| -1,774,483,809,825,891,600 | 2,695,146,778,644,522,500 | 39.55 | 103 | 0.72051 | false |
jenalgit/django
|
tests/urlpatterns_reverse/tests.py
|
154
|
50058
|
# -*- coding: utf-8 -*-
"""
Unit tests for reverse URL lookups.
"""
from __future__ import unicode_literals
import sys
import unittest
from admin_scripts.tests import AdminScriptTestCase
from django.conf import settings
from django.conf.urls import include, url
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.core.urlresolvers import (
NoReverseMatch, RegexURLPattern, RegexURLResolver, Resolver404,
ResolverMatch, get_callable, get_resolver, resolve, reverse, reverse_lazy,
)
from django.http import (
HttpRequest, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.shortcuts import redirect
from django.test import (
SimpleTestCase, TestCase, ignore_warnings, override_settings,
)
from django.test.utils import override_script_prefix
from django.utils import six
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango110Warning,
)
from . import middleware, urlconf_outer, views
from .views import empty_view
resolve_test_data = (
# These entries are in the format: (path, url_name, app_name, namespace, view_name, func, args, kwargs)
# Simple case
('/normal/42/37/', 'normal-view', '', '', 'normal-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/view_class/42/37/', 'view-class', '', '', 'view-class', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/normal/42/37/', 'inc-normal-view', '', '', 'inc-normal-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/view_class/42/37/', 'inc-view-class', '', '', 'inc-view-class', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
# Unnamed args are dropped if you have *any* kwargs in a pattern
('/mixed_args/42/37/', 'mixed-args', '', '', 'mixed-args', views.empty_view, tuple(), {'arg2': '37'}),
('/included/mixed_args/42/37/', 'inc-mixed-args', '', '', 'inc-mixed-args', views.empty_view, tuple(), {'arg2': '37'}),
('/included/12/mixed_args/42/37/', 'inc-mixed-args', '', '', 'inc-mixed-args', views.empty_view, tuple(), {'arg2': '37'}),
# Unnamed views should have None as the url_name. Regression data for #21157.
('/unnamed/normal/42/37/', None, '', '', 'urlpatterns_reverse.views.empty_view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/unnamed/view_class/42/37/', None, '', '', 'urlpatterns_reverse.views.ViewClass', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
# If you have no kwargs, you get an args list.
('/no_kwargs/42/37/', 'no-kwargs', '', '', 'no-kwargs', views.empty_view, ('42', '37'), {}),
('/included/no_kwargs/42/37/', 'inc-no-kwargs', '', '', 'inc-no-kwargs', views.empty_view, ('42', '37'), {}),
('/included/12/no_kwargs/42/37/', 'inc-no-kwargs', '', '', 'inc-no-kwargs', views.empty_view, ('12', '42', '37'), {}),
# Namespaces
('/test1/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns1', 'test-ns1:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/ns-included1/normal/42/37/', 'inc-normal-view', '', 'inc-ns1', 'inc-ns1:inc-normal-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/default/inner/42/37/', 'urlobject-view', 'testapp', 'testapp', 'testapp:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/other2/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns2', 'other-ns2:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/other1/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns1', 'other-ns1:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
# Nested namespaces
('/ns-included1/test3/inner/42/37/', 'urlobject-view', 'testapp', 'inc-ns1:test-ns3', 'inc-ns1:test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/ns-included1/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view', 'testapp', 'inc-ns1:inc-ns4:inc-ns2:test-ns3', 'inc-ns1:inc-ns4:inc-ns2:test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/app-included/test3/inner/42/37/', 'urlobject-view', 'inc-app:testapp', 'inc-app:test-ns3', 'inc-app:test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/app-included/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view', 'inc-app:testapp', 'inc-app:inc-ns4:inc-ns2:test-ns3', 'inc-app:inc-ns4:inc-ns2:test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
# Namespaces capturing variables
('/inc70/', 'inner-nothing', '', 'inc-ns5', 'inc-ns5:inner-nothing', views.empty_view, tuple(), {'outer': '70'}),
('/inc78/extra/foobar/', 'inner-extra', '', 'inc-ns5', 'inc-ns5:inner-extra', views.empty_view, tuple(), {'outer': '78', 'extra': 'foobar'}),
)
test_data = (
('places', '/places/3/', [3], {}),
('places', '/places/3/', ['3'], {}),
('places', NoReverseMatch, ['a'], {}),
('places', NoReverseMatch, [], {}),
('places?', '/place/', [], {}),
('places+', '/places/', [], {}),
('places*', '/place/', [], {}),
('places2?', '/', [], {}),
('places2+', '/places/', [], {}),
('places2*', '/', [], {}),
('places3', '/places/4/', [4], {}),
('places3', '/places/harlem/', ['harlem'], {}),
('places3', NoReverseMatch, ['harlem64'], {}),
('places4', '/places/3/', [], {'id': 3}),
('people', NoReverseMatch, [], {}),
('people', '/people/adrian/', ['adrian'], {}),
('people', '/people/adrian/', [], {'name': 'adrian'}),
('people', NoReverseMatch, ['name with spaces'], {}),
('people', NoReverseMatch, [], {'name': 'name with spaces'}),
('people2', '/people/name/', [], {}),
('people2a', '/people/name/fred/', ['fred'], {}),
('people_backref', '/people/nate-nate/', ['nate'], {}),
('people_backref', '/people/nate-nate/', [], {'name': 'nate'}),
('optional', '/optional/fred/', [], {'name': 'fred'}),
('optional', '/optional/fred/', ['fred'], {}),
('named_optional', '/optional/1/', [1], {}),
('named_optional', '/optional/1/', [], {'arg1': 1}),
('named_optional', '/optional/1/2/', [1, 2], {}),
('named_optional', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('named_optional_terminated', '/optional/1/2/', [1, 2], {}),
('named_optional_terminated', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('hardcoded', '/hardcoded/', [], {}),
('hardcoded2', '/hardcoded/doc.pdf', [], {}),
('people3', '/people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}),
('people3', NoReverseMatch, [], {'state': 'il'}),
('people3', NoReverseMatch, [], {'name': 'adrian'}),
('people4', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}),
('people6', '/people/il/test/adrian/', ['il/test', 'adrian'], {}),
('people6', '/people//adrian/', ['adrian'], {}),
('range', '/character_set/a/', [], {}),
('range2', '/character_set/x/', [], {}),
('price', '/price/$10/', ['10'], {}),
('price2', '/price/$10/', ['10'], {}),
('price3', '/price/$10/', ['10'], {}),
('product', '/product/chocolate+($2.00)/', [], {'price': '2.00', 'product': 'chocolate'}),
('headlines', '/headlines/2007.5.21/', [], dict(year=2007, month=5, day=21)),
('windows', r'/windows_path/C:%5CDocuments%20and%20Settings%5Cspam/', [], dict(drive_name='C', path=r'Documents and Settings\spam')),
('special', r'/special_chars/~@+%5C$*%7C/', [r'~@+\$*|'], {}),
('special', r'/special_chars/some%20resource/', [r'some resource'], {}),
('special', r'/special_chars/10%25%20complete/', [r'10% complete'], {}),
('special', r'/special_chars/some%20resource/', [], {'chars': r'some resource'}),
('special', r'/special_chars/10%25%20complete/', [], {'chars': r'10% complete'}),
('special', NoReverseMatch, [''], {}),
('mixed', '/john/0/', [], {'name': 'john'}),
('repeats', '/repeats/a/', [], {}),
('repeats2', '/repeats/aa/', [], {}),
('repeats3', '/repeats/aa/', [], {}),
('insensitive', '/CaseInsensitive/fred', ['fred'], {}),
('test', '/test/1', [], {}),
('test2', '/test/2', [], {}),
('inner-nothing', '/outer/42/', [], {'outer': '42'}),
('inner-nothing', '/outer/42/', ['42'], {}),
('inner-nothing', NoReverseMatch, ['foo'], {}),
('inner-extra', '/outer/42/extra/inner/', [], {'extra': 'inner', 'outer': '42'}),
('inner-extra', '/outer/42/extra/inner/', ['42', 'inner'], {}),
('inner-extra', NoReverseMatch, ['fred', 'inner'], {}),
('inner-no-kwargs', '/outer-no-kwargs/42/inner-no-kwargs/1/', ['42', '1'], {}),
('disjunction', NoReverseMatch, ['foo'], {}),
('inner-disjunction', NoReverseMatch, ['10', '11'], {}),
('extra-places', '/e-places/10/', ['10'], {}),
('extra-people', '/e-people/fred/', ['fred'], {}),
('extra-people', '/e-people/fred/', [], {'name': 'fred'}),
('part', '/part/one/', [], {'value': 'one'}),
('part', '/prefix/xx/part/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/part2/one/', [], {'value': 'one'}),
('part2', '/part2/', [], {}),
('part2', '/prefix/xx/part2/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/prefix/xx/part2/', [], {'prefix': 'xx'}),
# Tests for nested groups. Nested capturing groups will only work if you
# *only* supply the correct outer group.
('nested-noncapture', '/nested/noncapture/opt', [], {'p': 'opt'}),
('nested-capture', '/nested/capture/opt/', ['opt/'], {}),
('nested-capture', NoReverseMatch, [], {'p': 'opt'}),
('nested-mixedcapture', '/nested/capture/mixed/opt', ['opt'], {}),
('nested-mixedcapture', NoReverseMatch, [], {'p': 'opt'}),
('nested-namedcapture', '/nested/capture/named/opt/', [], {'outer': 'opt/'}),
('nested-namedcapture', NoReverseMatch, [], {'outer': 'opt/', 'inner': 'opt'}),
('nested-namedcapture', NoReverseMatch, [], {'inner': 'opt'}),
# Regression for #9038
# These views are resolved by method name. Each method is deployed twice -
# once with an explicit argument, and once using the default value on
# the method. This is potentially ambiguous, as you have to pick the
# correct view for the arguments provided.
('urlpatterns_reverse.views.absolute_kwargs_view', '/absolute_arg_view/', [], {}),
('urlpatterns_reverse.views.absolute_kwargs_view', '/absolute_arg_view/10/', [], {'arg1': 10}),
('non_path_include', '/includes/non_path_include/', [], {}),
# Tests for #13154
('defaults', '/defaults_view1/3/', [], {'arg1': 3, 'arg2': 1}),
('defaults', '/defaults_view2/3/', [], {'arg1': 3, 'arg2': 2}),
('defaults', NoReverseMatch, [], {'arg1': 3, 'arg2': 3}),
('defaults', NoReverseMatch, [], {'arg2': 1}),
# Security tests
('security', '/%2Fexample.com/security/', ['/example.com'], {}),
)
class URLObject(object):
urlpatterns = [
url(r'^inner/$', views.empty_view, name='urlobject-view'),
url(r'^inner/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='urlobject-view'),
url(r'^inner/\+\\\$\*/$', views.empty_view, name='urlobject-special-view'),
]
def __init__(self, app_name, namespace=None):
self.app_name = app_name
self.namespace = namespace
@property
def urls(self):
return self.urlpatterns, self.app_name, self.namespace
@property
def app_urls(self):
return self.urlpatterns, self.app_name
@override_settings(ROOT_URLCONF='urlpatterns_reverse.no_urls')
class NoURLPatternsTests(SimpleTestCase):
def test_no_urls_exception(self):
"""
RegexURLResolver should raise an exception when no urlpatterns exist.
"""
resolver = RegexURLResolver(r'^$', settings.ROOT_URLCONF)
self.assertRaisesMessage(
ImproperlyConfigured,
"The included urlconf 'urlpatterns_reverse.no_urls' does not "
"appear to have any patterns in it. If you see valid patterns in "
"the file then the issue is probably caused by a circular import.",
getattr, resolver, 'url_patterns'
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class URLPatternReverse(SimpleTestCase):
@ignore_warnings(category=RemovedInDjango110Warning)
def test_urlpattern_reverse(self):
for name, expected, args, kwargs in test_data:
try:
got = reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.assertEqual(expected, NoReverseMatch)
else:
self.assertEqual(got, expected)
def test_reverse_none(self):
# Reversing None should raise an error, not return the last un-named view.
self.assertRaises(NoReverseMatch, reverse, None)
@override_script_prefix('/{{invalid}}/')
def test_prefix_braces(self):
self.assertEqual(
'/%7B%7Binvalid%7D%7D/includes/non_path_include/',
reverse('non_path_include')
)
def test_prefix_parenthesis(self):
# Parentheses are allowed and should not cause errors or be escaped
with override_script_prefix('/bogus)/'):
self.assertEqual(
'/bogus)/includes/non_path_include/',
reverse('non_path_include')
)
with override_script_prefix('/(bogus)/'):
self.assertEqual(
'/(bogus)/includes/non_path_include/',
reverse('non_path_include')
)
@override_script_prefix('/bump%20map/')
def test_prefix_format_char(self):
self.assertEqual(
'/bump%2520map/includes/non_path_include/',
reverse('non_path_include')
)
@override_script_prefix('/%7Eme/')
def test_non_urlsafe_prefix_with_args(self):
# Regression for #20022, adjusted for #24013 because ~ is an unreserved
# character. Tests whether % is escaped.
self.assertEqual('/%257Eme/places/1/', reverse('places', args=[1]))
def test_patterns_reported(self):
# Regression for #17076
try:
# this url exists, but requires an argument
reverse("people", args=[])
except NoReverseMatch as e:
pattern_description = r"1 pattern(s) tried: ['people/(?P<name>\\w+)/$']"
self.assertIn(pattern_description, str(e))
else:
# we can't use .assertRaises, since we want to inspect the
# exception
self.fail("Expected a NoReverseMatch, but none occurred.")
@override_script_prefix('/script:name/')
def test_script_name_escaping(self):
self.assertEqual(
reverse('optional', args=['foo:bar']),
'/script:name/optional/foo:bar/'
)
def test_reverse_returns_unicode(self):
name, expected, args, kwargs = test_data[0]
self.assertIsInstance(
reverse(name, args=args, kwargs=kwargs),
six.text_type
)
class ResolverTests(unittest.TestCase):
@ignore_warnings(category=RemovedInDjango20Warning)
def test_resolver_repr(self):
"""
Test repr of RegexURLResolver, especially when urlconf_name is a list
(#17892).
"""
# Pick a resolver from a namespaced urlconf
resolver = get_resolver('urlpatterns_reverse.namespace_urls')
sub_resolver = resolver.namespace_dict['test-ns1'][1]
self.assertIn('<RegexURLPattern list>', repr(sub_resolver))
def test_reverse_lazy_object_coercion_by_resolve(self):
"""
Verifies lazy object returned by reverse_lazy is coerced to
text by resolve(). Previous to #21043, this would raise a TypeError.
"""
urls = 'urlpatterns_reverse.named_urls'
proxy_url = reverse_lazy('named-url1', urlconf=urls)
resolver = get_resolver(urls)
try:
resolver.resolve(proxy_url)
except TypeError:
self.fail('Failed to coerce lazy object to text')
def test_non_regex(self):
"""
Verifies that we raise a Resolver404 if what we are resolving doesn't
meet the basic requirements of a path to match - i.e., at the very
least, it matches the root pattern '^/'. We must never return None
from resolve, or we will get a TypeError further down the line.
Regression for #10834.
"""
self.assertRaises(Resolver404, resolve, '')
self.assertRaises(Resolver404, resolve, 'a')
self.assertRaises(Resolver404, resolve, '\\')
self.assertRaises(Resolver404, resolve, '.')
def test_404_tried_urls_have_names(self):
"""
Verifies that the list of URLs that come back from a Resolver404
exception contains a list in the right format for printing out in
the DEBUG 404 page with both the patterns and URL names, if available.
"""
urls = 'urlpatterns_reverse.named_urls'
# this list matches the expected URL types and names returned when
# you try to resolve a non-existent URL in the first level of included
# URLs in named_urls.py (e.g., '/included/non-existent-url')
url_types_names = [
[{'type': RegexURLPattern, 'name': 'named-url1'}],
[{'type': RegexURLPattern, 'name': 'named-url2'}],
[{'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url3'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url4'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLResolver}],
]
try:
resolve('/included/non-existent-url', urlconf=urls)
self.fail('resolve did not raise a 404')
except Resolver404 as e:
# make sure we at least matched the root ('/') url resolver:
self.assertIn('tried', e.args[0])
tried = e.args[0]['tried']
self.assertEqual(len(e.args[0]['tried']), len(url_types_names), 'Wrong number of tried URLs returned. Expected %s, got %s.' % (len(url_types_names), len(e.args[0]['tried'])))
for tried, expected in zip(e.args[0]['tried'], url_types_names):
for t, e in zip(tried, expected):
self.assertIsInstance(t, e['type']), str('%s is not an instance of %s') % (t, e['type'])
if 'name' in e:
if not e['name']:
self.assertIsNone(t.name, 'Expected no URL name but found %s.' % t.name)
else:
self.assertEqual(t.name, e['name'], 'Wrong URL name. Expected "%s", got "%s".' % (e['name'], t.name))
@override_settings(ROOT_URLCONF='urlpatterns_reverse.reverse_lazy_urls')
class ReverseLazyTest(TestCase):
def test_redirect_with_lazy_reverse(self):
response = self.client.get('/redirect/')
self.assertRedirects(response, "/redirected_to/", status_code=302)
def test_user_permission_with_lazy_reverse(self):
User.objects.create_user('alfred', '[email protected]', password='testpw')
response = self.client.get('/login_required_view/')
self.assertRedirects(response, "/login/?next=/login_required_view/", status_code=302)
self.client.login(username='alfred', password='testpw')
response = self.client.get('/login_required_view/')
self.assertEqual(response.status_code, 200)
def test_inserting_reverse_lazy_into_string(self):
self.assertEqual(
'Some URL: %s' % reverse_lazy('some-login-page'),
'Some URL: /login/'
)
if six.PY2:
self.assertEqual(
b'Some URL: %s' % reverse_lazy('some-login-page'),
'Some URL: /login/'
)
class ReverseLazySettingsTest(AdminScriptTestCase):
"""
Test that reverse_lazy can be used in settings without causing a circular
import error.
"""
def setUp(self):
self.write_settings('settings.py', extra="""
from django.core.urlresolvers import reverse_lazy
LOGIN_URL = reverse_lazy('login')""")
def tearDown(self):
self.remove_settings('settings.py')
def test_lazy_in_settings(self):
out, err = self.run_manage(['check'])
self.assertNoOutput(err)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class ReverseShortcutTests(SimpleTestCase):
def test_redirect_to_object(self):
# We don't really need a model; just something with a get_absolute_url
class FakeObj(object):
def get_absolute_url(self):
return "/hi-there/"
res = redirect(FakeObj())
self.assertIsInstance(res, HttpResponseRedirect)
self.assertEqual(res.url, '/hi-there/')
res = redirect(FakeObj(), permanent=True)
self.assertIsInstance(res, HttpResponsePermanentRedirect)
self.assertEqual(res.url, '/hi-there/')
def test_redirect_to_view_name(self):
res = redirect('hardcoded2')
self.assertEqual(res.url, '/hardcoded/doc.pdf')
res = redirect('places', 1)
self.assertEqual(res.url, '/places/1/')
res = redirect('headlines', year='2008', month='02', day='17')
self.assertEqual(res.url, '/headlines/2008.02.17/')
self.assertRaises(NoReverseMatch, redirect, 'not-a-view')
def test_redirect_to_url(self):
res = redirect('/foo/')
self.assertEqual(res.url, '/foo/')
res = redirect('http://example.com/')
self.assertEqual(res.url, 'http://example.com/')
# Assert that we can redirect using UTF-8 strings
res = redirect('/æøå/abc/')
self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5/abc/')
# Assert that no imports are attempted when dealing with a relative path
# (previously, the below would resolve in a UnicodeEncodeError from __import__ )
res = redirect('/æøå.abc/')
self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5.abc/')
res = redirect('os.path')
self.assertEqual(res.url, 'os.path')
def test_no_illegal_imports(self):
# modules that are not listed in urlpatterns should not be importable
redirect("urlpatterns_reverse.nonimported_module.view")
self.assertNotIn("urlpatterns_reverse.nonimported_module", sys.modules)
@ignore_warnings(category=RemovedInDjango110Warning)
def test_reverse_by_path_nested(self):
# Views that are added to urlpatterns using include() should be
# reversible by dotted path.
self.assertEqual(reverse('urlpatterns_reverse.views.nested_view'), '/includes/nested_path/')
def test_redirect_view_object(self):
from .views import absolute_kwargs_view
res = redirect(absolute_kwargs_view)
self.assertEqual(res.url, '/absolute_arg_view/')
self.assertRaises(NoReverseMatch, redirect, absolute_kwargs_view, wrong_argument=None)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls')
@ignore_warnings(category=RemovedInDjango20Warning)
class NamespaceTests(SimpleTestCase):
def test_ambiguous_object(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view')
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view', args=[37, 42])
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
def test_ambiguous_urlpattern(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing')
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing', args=[37, 42])
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing', kwargs={'arg1': 42, 'arg2': 37})
def test_non_existent_namespace(self):
"Non-existent namespaces raise errors"
self.assertRaises(NoReverseMatch, reverse, 'blahblah:urlobject-view')
self.assertRaises(NoReverseMatch, reverse, 'test-ns1:blahblah:urlobject-view')
def test_normal_name(self):
"Normal lookups work as expected"
self.assertEqual('/normal/', reverse('normal-view'))
self.assertEqual('/normal/37/42/', reverse('normal-view', args=[37, 42]))
self.assertEqual('/normal/42/37/', reverse('normal-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/+%5C$*/', reverse('special-view'))
def test_simple_included_name(self):
"Normal lookups work on names included from other patterns"
self.assertEqual('/included/normal/', reverse('inc-normal-view'))
self.assertEqual('/included/normal/37/42/', reverse('inc-normal-view', args=[37, 42]))
self.assertEqual('/included/normal/42/37/', reverse('inc-normal-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/included/+%5C$*/', reverse('inc-special-view'))
def test_namespace_object(self):
"Dynamic URL objects can be found using a namespace"
self.assertEqual('/test1/inner/', reverse('test-ns1:urlobject-view'))
self.assertEqual('/test1/inner/37/42/', reverse('test-ns1:urlobject-view', args=[37, 42]))
self.assertEqual('/test1/inner/42/37/', reverse('test-ns1:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/test1/inner/+%5C$*/', reverse('test-ns1:urlobject-special-view'))
def test_app_object(self):
"Dynamic URL objects can return a (pattern, app_name) 2-tuple, and include() can set the namespace"
self.assertEqual('/newapp1/inner/', reverse('new-ns1:urlobject-view'))
self.assertEqual('/newapp1/inner/37/42/', reverse('new-ns1:urlobject-view', args=[37, 42]))
self.assertEqual('/newapp1/inner/42/37/', reverse('new-ns1:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/newapp1/inner/+%5C$*/', reverse('new-ns1:urlobject-special-view'))
def test_app_object_default_namespace(self):
"Namespace defaults to app_name when including a (pattern, app_name) 2-tuple"
self.assertEqual('/new-default/inner/', reverse('newapp:urlobject-view'))
self.assertEqual('/new-default/inner/37/42/', reverse('newapp:urlobject-view', args=[37, 42]))
self.assertEqual('/new-default/inner/42/37/', reverse('newapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/new-default/inner/+%5C$*/', reverse('newapp:urlobject-special-view'))
def test_embedded_namespace_object(self):
"Namespaces can be installed anywhere in the URL pattern tree"
self.assertEqual('/included/test3/inner/', reverse('test-ns3:urlobject-view'))
self.assertEqual('/included/test3/inner/37/42/', reverse('test-ns3:urlobject-view', args=[37, 42]))
self.assertEqual('/included/test3/inner/42/37/', reverse('test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/included/test3/inner/+%5C$*/', reverse('test-ns3:urlobject-special-view'))
def test_namespace_pattern(self):
"Namespaces can be applied to include()'d urlpatterns"
self.assertEqual('/ns-included1/normal/', reverse('inc-ns1:inc-normal-view'))
self.assertEqual('/ns-included1/normal/37/42/', reverse('inc-ns1:inc-normal-view', args=[37, 42]))
self.assertEqual('/ns-included1/normal/42/37/', reverse('inc-ns1:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/ns-included1/+%5C$*/', reverse('inc-ns1:inc-special-view'))
def test_app_name_pattern(self):
"Namespaces can be applied to include()'d urlpatterns that set an app_name attribute"
self.assertEqual('/app-included1/normal/', reverse('app-ns1:inc-normal-view'))
self.assertEqual('/app-included1/normal/37/42/', reverse('app-ns1:inc-normal-view', args=[37, 42]))
self.assertEqual('/app-included1/normal/42/37/', reverse('app-ns1:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/app-included1/+%5C$*/', reverse('app-ns1:inc-special-view'))
def test_namespace_pattern_with_variable_prefix(self):
"When using an include with namespaces when there is a regex variable in front of it"
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', kwargs={'outer': 42}))
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', args=[42]))
self.assertEqual('/ns-outer/42/normal/37/4/', reverse('inc-outer:inc-normal-view', kwargs={'outer': 42, 'arg1': 37, 'arg2': 4}))
self.assertEqual('/ns-outer/42/normal/37/4/', reverse('inc-outer:inc-normal-view', args=[42, 37, 4]))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', kwargs={'outer': 42}))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', args=[42]))
def test_multiple_namespace_pattern(self):
"Namespaces can be embedded"
self.assertEqual('/ns-included1/test3/inner/', reverse('inc-ns1:test-ns3:urlobject-view'))
self.assertEqual('/ns-included1/test3/inner/37/42/', reverse('inc-ns1:test-ns3:urlobject-view', args=[37, 42]))
self.assertEqual('/ns-included1/test3/inner/42/37/', reverse('inc-ns1:test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:test-ns3:urlobject-special-view'))
def test_nested_namespace_pattern(self):
"Namespaces can be nested"
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view'))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/37/42/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', args=[37, 42]))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/42/37/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-special-view'))
def test_app_lookup_object(self):
"A default application namespace can be used for lookup"
self.assertEqual('/default/inner/', reverse('testapp:urlobject-view'))
self.assertEqual('/default/inner/37/42/', reverse('testapp:urlobject-view', args=[37, 42]))
self.assertEqual('/default/inner/42/37/', reverse('testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/default/inner/+%5C$*/', reverse('testapp:urlobject-special-view'))
def test_app_lookup_object_with_default(self):
"A default application namespace is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/included/test3/inner/', reverse('testapp:urlobject-view', current_app='test-ns3'))
self.assertEqual('/included/test3/inner/37/42/', reverse('testapp:urlobject-view', args=[37, 42], current_app='test-ns3'))
self.assertEqual('/included/test3/inner/42/37/', reverse('testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='test-ns3'))
self.assertEqual('/included/test3/inner/+%5C$*/', reverse('testapp:urlobject-special-view', current_app='test-ns3'))
def test_app_lookup_object_without_default(self):
"An application namespace without a default is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/other2/inner/', reverse('nodefault:urlobject-view'))
self.assertEqual('/other2/inner/37/42/', reverse('nodefault:urlobject-view', args=[37, 42]))
self.assertEqual('/other2/inner/42/37/', reverse('nodefault:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/other2/inner/+%5C$*/', reverse('nodefault:urlobject-special-view'))
self.assertEqual('/other1/inner/', reverse('nodefault:urlobject-view', current_app='other-ns1'))
self.assertEqual('/other1/inner/37/42/', reverse('nodefault:urlobject-view', args=[37, 42], current_app='other-ns1'))
self.assertEqual('/other1/inner/42/37/', reverse('nodefault:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='other-ns1'))
self.assertEqual('/other1/inner/+%5C$*/', reverse('nodefault:urlobject-special-view', current_app='other-ns1'))
def test_special_chars_namespace(self):
self.assertEqual('/+%5C$*/included/normal/', reverse('special:inc-normal-view'))
self.assertEqual('/+%5C$*/included/normal/37/42/', reverse('special:inc-normal-view', args=[37, 42]))
self.assertEqual('/+%5C$*/included/normal/42/37/', reverse('special:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/+%5C$*/included/+%5C$*/', reverse('special:inc-special-view'))
def test_namespaces_with_variables(self):
"Namespace prefixes can capture variables: see #15900"
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', kwargs={'outer': '70'}))
self.assertEqual('/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', kwargs={'outer': '78', 'extra': 'foobar'}))
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', args=['70']))
self.assertEqual('/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', args=['78', 'foobar']))
def test_nested_app_lookup(self):
"A nested current_app should be split in individual namespaces (#24904)"
self.assertEqual('/ns-included1/test4/inner/', reverse('inc-ns1:testapp:urlobject-view'))
self.assertEqual('/ns-included1/test4/inner/37/42/', reverse('inc-ns1:testapp:urlobject-view', args=[37, 42]))
self.assertEqual(
'/ns-included1/test4/inner/42/37/',
reverse('inc-ns1:testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/ns-included1/test4/inner/+%5C$*/', reverse('inc-ns1:testapp:urlobject-special-view'))
self.assertEqual(
'/ns-included1/test3/inner/',
reverse('inc-ns1:testapp:urlobject-view', current_app='inc-ns1:test-ns3')
)
self.assertEqual(
'/ns-included1/test3/inner/37/42/',
reverse('inc-ns1:testapp:urlobject-view', args=[37, 42], current_app='inc-ns1:test-ns3')
)
self.assertEqual(
'/ns-included1/test3/inner/42/37/',
reverse('inc-ns1:testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='inc-ns1:test-ns3')
)
self.assertEqual(
'/ns-included1/test3/inner/+%5C$*/',
reverse('inc-ns1:testapp:urlobject-special-view', current_app='inc-ns1:test-ns3')
)
def test_current_app_no_partial_match(self):
"current_app should either match the whole path or shouldn't be used"
self.assertEqual(
'/ns-included1/test4/inner/',
reverse('inc-ns1:testapp:urlobject-view', current_app='non-existant:test-ns3')
)
self.assertEqual(
'/ns-included1/test4/inner/37/42/',
reverse('inc-ns1:testapp:urlobject-view', args=[37, 42], current_app='non-existant:test-ns3')
)
self.assertEqual(
'/ns-included1/test4/inner/42/37/',
reverse('inc-ns1:testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37},
current_app='non-existant:test-ns3')
)
self.assertEqual(
'/ns-included1/test4/inner/+%5C$*/',
reverse('inc-ns1:testapp:urlobject-special-view', current_app='non-existant:test-ns3')
)
@override_settings(ROOT_URLCONF=urlconf_outer.__name__)
class RequestURLconfTests(SimpleTestCase):
def test_urlconf(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE_CLASSES=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
]
)
def test_urlconf_overridden(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:,inner:/second_test/')
@override_settings(
MIDDLEWARE_CLASSES=[
'%s.NullChangeURLconfMiddleware' % middleware.__name__,
]
)
def test_urlconf_overridden_with_null(self):
"""
Overriding request.urlconf with None will fall back to the default
URLconf.
"""
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE_CLASSES=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInResponseMiddleware' % middleware.__name__,
]
)
def test_reverse_inner_in_response_middleware(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a response middleware.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'/second_test/')
@override_settings(
MIDDLEWARE_CLASSES=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInResponseMiddleware' % middleware.__name__,
]
)
def test_reverse_outer_in_response_middleware(self):
"""
Test reversing an URL from the *default* URLconf from inside
a response middleware.
"""
message = "Reverse for 'outer' with arguments '()' and keyword arguments '{}' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
@override_settings(
MIDDLEWARE_CLASSES=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInStreaming' % middleware.__name__,
]
)
def test_reverse_inner_in_streaming(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a streaming response.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(b''.join(response), b'/second_test/')
@override_settings(
MIDDLEWARE_CLASSES=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInStreaming' % middleware.__name__,
]
)
def test_reverse_outer_in_streaming(self):
"""
Test reversing an URL from the *default* URLconf from inside
a streaming response.
"""
message = "Reverse for 'outer' with arguments '()' and keyword arguments '{}' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
b''.join(self.client.get('/second_test/'))
class ErrorHandlerResolutionTests(SimpleTestCase):
"""Tests for handler400, handler404 and handler500"""
def setUp(self):
urlconf = 'urlpatterns_reverse.urls_error_handlers'
urlconf_callables = 'urlpatterns_reverse.urls_error_handlers_callables'
self.resolver = RegexURLResolver(r'^$', urlconf)
self.callable_resolver = RegexURLResolver(r'^$', urlconf_callables)
def test_named_handlers(self):
handler = (empty_view, {})
self.assertEqual(self.resolver.resolve_error_handler(400), handler)
self.assertEqual(self.resolver.resolve_error_handler(404), handler)
self.assertEqual(self.resolver.resolve_error_handler(500), handler)
def test_callable_handers(self):
handler = (empty_view, {})
self.assertEqual(self.callable_resolver.resolve_error_handler(400), handler)
self.assertEqual(self.callable_resolver.resolve_error_handler(404), handler)
self.assertEqual(self.callable_resolver.resolve_error_handler(500), handler)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls_without_full_import')
class DefaultErrorHandlerTests(SimpleTestCase):
def test_default_handler(self):
"If the urls.py doesn't specify handlers, the defaults are used"
try:
response = self.client.get('/test/')
self.assertEqual(response.status_code, 404)
except AttributeError:
self.fail("Shouldn't get an AttributeError due to undefined 404 handler")
try:
self.assertRaises(ValueError, self.client.get, '/bad_view/')
except AttributeError:
self.fail("Shouldn't get an AttributeError due to undefined 500 handler")
@override_settings(ROOT_URLCONF=None)
class NoRootUrlConfTests(SimpleTestCase):
"""Tests for handler404 and handler500 if urlconf is None"""
def test_no_handler_exception(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/test/me/')
@override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls')
class ResolverMatchTests(SimpleTestCase):
@ignore_warnings(category=RemovedInDjango20Warning)
def test_urlpattern_resolve(self):
for path, url_name, app_name, namespace, view_name, func, args, kwargs in resolve_test_data:
# Test legacy support for extracting "function, args, kwargs"
match_func, match_args, match_kwargs = resolve(path)
self.assertEqual(match_func, func)
self.assertEqual(match_args, args)
self.assertEqual(match_kwargs, kwargs)
# Test ResolverMatch capabilities.
match = resolve(path)
self.assertEqual(match.__class__, ResolverMatch)
self.assertEqual(match.url_name, url_name)
self.assertEqual(match.app_name, app_name)
self.assertEqual(match.namespace, namespace)
self.assertEqual(match.view_name, view_name)
self.assertEqual(match.func, func)
self.assertEqual(match.args, args)
self.assertEqual(match.kwargs, kwargs)
# ... and for legacy purposes:
self.assertEqual(match[0], func)
self.assertEqual(match[1], args)
self.assertEqual(match[2], kwargs)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_resolver_match_on_request(self):
response = self.client.get('/resolver_match/')
resolver_match = response.resolver_match
self.assertEqual(resolver_match.url_name, 'test-resolver-match')
def test_resolver_match_on_request_before_resolution(self):
request = HttpRequest()
self.assertIsNone(request.resolver_match)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.erroneous_urls')
class ErroneousViewTests(SimpleTestCase):
def test_erroneous_resolve(self):
self.assertRaises(ImportError, self.client.get, '/erroneous_inner/')
self.assertRaises(ImportError, self.client.get, '/erroneous_outer/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/missing_inner/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/missing_outer/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/uncallable-dotted/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/uncallable-object/')
# Regression test for #21157
self.assertRaises(ImportError, self.client.get, '/erroneous_unqualified/')
def test_erroneous_reverse(self):
"""
Ensure that a useful exception is raised when a regex is invalid in the
URLConf (#6170).
"""
# The regex error will be hit before NoReverseMatch can be raised
self.assertRaises(ImproperlyConfigured, reverse, 'whatever blah blah')
class ViewLoadingTests(SimpleTestCase):
def test_view_loading(self):
self.assertEqual(get_callable('urlpatterns_reverse.views.empty_view'), empty_view)
# passing a callable should return the callable
self.assertEqual(get_callable(empty_view), empty_view)
def test_exceptions(self):
# A missing view (identified by an AttributeError) should raise
# ViewDoesNotExist, ...
with six.assertRaisesRegex(self, ViewDoesNotExist, ".*View does not exist in.*"):
get_callable('urlpatterns_reverse.views.i_should_not_exist')
# ... but if the AttributeError is caused by something else don't
# swallow it.
with self.assertRaises(AttributeError):
get_callable('urlpatterns_reverse.views_broken.i_am_broken')
class IncludeTests(SimpleTestCase):
url_patterns = [
url(r'^inner/$', views.empty_view, name='urlobject-view'),
url(r'^inner/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='urlobject-view'),
url(r'^inner/\+\\\$\*/$', views.empty_view, name='urlobject-special-view'),
]
app_urls = URLObject('inc-app')
def test_include_app_name_but_no_namespace(self):
msg = "Must specify a namespace if specifying app_name."
with self.assertRaisesMessage(ValueError, msg):
include(self.url_patterns, app_name='bar')
def test_include_urls(self):
self.assertEqual(include(self.url_patterns), (self.url_patterns, None, None))
@ignore_warnings(category=RemovedInDjango20Warning)
def test_include_namespace(self):
# no app_name -> deprecated
self.assertEqual(include(self.url_patterns, 'namespace'), (self.url_patterns, None, 'namespace'))
@ignore_warnings(category=RemovedInDjango20Warning)
def test_include_namespace_app_name(self):
# app_name argument to include -> deprecated
self.assertEqual(
include(self.url_patterns, 'namespace', 'app_name'),
(self.url_patterns, 'app_name', 'namespace')
)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_include_3_tuple(self):
# 3-tuple -> deprecated
self.assertEqual(
include((self.url_patterns, 'app_name', 'namespace')),
(self.url_patterns, 'app_name', 'namespace')
)
def test_include_2_tuple(self):
self.assertEqual(
include((self.url_patterns, 'app_name')),
(self.url_patterns, 'app_name', 'app_name')
)
def test_include_2_tuple_namespace(self):
self.assertEqual(
include((self.url_patterns, 'app_name'), namespace='namespace'),
(self.url_patterns, 'app_name', 'namespace')
)
def test_include_app_name(self):
self.assertEqual(
include(self.app_urls),
(self.app_urls, 'inc-app', 'inc-app')
)
def test_include_app_name_namespace(self):
self.assertEqual(
include(self.app_urls, 'namespace'),
(self.app_urls, 'inc-app', 'namespace')
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class LookaheadTests(SimpleTestCase):
def test_valid_resolve(self):
test_urls = [
'/lookahead-/a-city/',
'/lookbehind-/a-city/',
'/lookahead+/a-city/',
'/lookbehind+/a-city/',
]
for test_url in test_urls:
match = resolve(test_url)
self.assertEqual(match.kwargs, {'city': 'a-city'})
def test_invalid_resolve(self):
test_urls = [
'/lookahead-/not-a-city/',
'/lookbehind-/not-a-city/',
'/lookahead+/other-city/',
'/lookbehind+/other-city/',
]
for test_url in test_urls:
with self.assertRaises(Resolver404):
resolve(test_url)
def test_valid_reverse(self):
url = reverse('lookahead-positive', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookahead+/a-city/')
url = reverse('lookahead-negative', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookahead-/a-city/')
url = reverse('lookbehind-positive', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookbehind+/a-city/')
url = reverse('lookbehind-negative', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookbehind-/a-city/')
def test_invalid_reverse(self):
with self.assertRaises(NoReverseMatch):
reverse('lookahead-positive', kwargs={'city': 'other-city'})
with self.assertRaises(NoReverseMatch):
reverse('lookahead-negative', kwargs={'city': 'not-a-city'})
with self.assertRaises(NoReverseMatch):
reverse('lookbehind-positive', kwargs={'city': 'other-city'})
with self.assertRaises(NoReverseMatch):
reverse('lookbehind-negative', kwargs={'city': 'not-a-city'})
|
bsd-3-clause
| -5,653,425,987,511,364,000 | -6,528,513,955,558,916,000 | 48.556436 | 248 | 0.622752 | false |
nagisa/Feeds
|
gdist/gschemas.py
|
1
|
2161
|
import glob
import os
from distutils.dep_util import newer
from distutils.core import Command
from distutils.spawn import find_executable
from distutils.util import change_root
class build_gschemas(Command):
"""build message catalog files
Build message catalog (.mo) files from .po files using xgettext
and intltool. These are placed directly in the build tree.
"""
description = "build gschemas used for dconf"
user_options = []
build_base = None
def initialize_options(self):
pass
def finalize_options(self):
self.gschemas_directory = self.distribution.gschemas
self.set_undefined_options('build', ('build_base', 'build_base'))
def run(self):
if find_executable("glib-compile-schemas") is None:
raise SystemExit("Error: 'glib-compile-schemas' not found.")
basepath = os.path.join(self.build_base, 'share', 'glib-2.0', 'schemas')
self.copy_tree(self.gschemas_directory, basepath)
class install_gschemas(Command):
"""install message catalog files
Copy compiled message catalog files into their installation
directory, $prefix/share/locale/$lang/LC_MESSAGES/$package.mo.
"""
description = "install message catalog files"
user_options = []
skip_build = None
build_base = None
install_base = None
root = None
def initialize_options(self):
pass
def finalize_options(self):
self.set_undefined_options('build', ('build_base', 'build_base'))
self.set_undefined_options(
'install',
('root', 'root'),
('install_base', 'install_base'),
('skip_build', 'skip_build'))
def run(self):
if not self.skip_build:
self.run_command('build_gschemas')
src = os.path.join(self.build_base, 'share', 'glib-2.0', 'schemas')
dest = os.path.join(self.install_base, 'share', 'glib-2.0', 'schemas')
if self.root != None:
dest = change_root(self.root, dest)
self.copy_tree(src, dest)
self.spawn(['glib-compile-schemas', dest])
__all__ = ["build_gschemas", "install_gschemas"]
|
gpl-2.0
| -2,323,140,565,499,351,600 | -7,604,138,347,301,893,000 | 28.60274 | 80 | 0.63628 | false |
Distrotech/samba
|
source4/scripting/python/samba/tests/dcerpc/registry.py
|
20
|
1923
|
#!/usr/bin/env python
# Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <[email protected]> 2008
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for samba.dcerpc.registry."""
from samba.dcerpc import winreg
from samba.tests import RpcInterfaceTestCase
class WinregTests(RpcInterfaceTestCase):
def setUp(self):
super(WinregTests, self).setUp()
self.conn = winreg.winreg("ncalrpc:", self.get_loadparm(),
self.get_credentials())
def get_hklm(self):
return self.conn.OpenHKLM(None,
winreg.KEY_QUERY_VALUE | winreg.KEY_ENUMERATE_SUB_KEYS)
def test_hklm(self):
handle = self.conn.OpenHKLM(None,
winreg.KEY_QUERY_VALUE | winreg.KEY_ENUMERATE_SUB_KEYS)
self.conn.CloseKey(handle)
def test_getversion(self):
handle = self.get_hklm()
version = self.conn.GetVersion(handle)
self.assertEquals(int, version.__class__)
self.conn.CloseKey(handle)
def test_getkeyinfo(self):
handle = self.conn.OpenHKLM(None,
winreg.KEY_QUERY_VALUE | winreg.KEY_ENUMERATE_SUB_KEYS)
x = self.conn.QueryInfoKey(handle, winreg.String())
self.assertEquals(9, len(x)) # should return a 9-tuple
self.conn.CloseKey(handle)
|
gpl-3.0
| -8,159,711,030,767,410,000 | -6,578,681,884,009,665,000 | 35.283019 | 72 | 0.676027 | false |
geekboxzone/lollipop_external_chromium_org_third_party_WebKit
|
Tools/Scripts/webkitpy/style/main_unittest.py
|
53
|
3459
|
# Copyright (C) 2010 Chris Jerdonek ([email protected])
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from main import change_directory
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.logtesting import LogTesting
class ChangeDirectoryTest(unittest.TestCase):
_original_directory = "/original"
_checkout_root = "/WebKit"
def setUp(self):
self._log = LogTesting.setUp(self)
self.filesystem = MockFileSystem(dirs=[self._original_directory, self._checkout_root], cwd=self._original_directory)
def tearDown(self):
self._log.tearDown()
def _change_directory(self, paths, checkout_root):
return change_directory(self.filesystem, paths=paths, checkout_root=checkout_root)
def _assert_result(self, actual_return_value, expected_return_value,
expected_log_messages, expected_current_directory):
self.assertEqual(actual_return_value, expected_return_value)
self._log.assertMessages(expected_log_messages)
self.assertEqual(self.filesystem.getcwd(), expected_current_directory)
def test_paths_none(self):
paths = self._change_directory(checkout_root=self._checkout_root, paths=None)
self._assert_result(paths, None, [], self._checkout_root)
def test_paths_convertible(self):
paths = ["/WebKit/foo1.txt", "/WebKit/foo2.txt"]
paths = self._change_directory(checkout_root=self._checkout_root, paths=paths)
self._assert_result(paths, ["foo1.txt", "foo2.txt"], [], self._checkout_root)
def test_with_scm_paths_unconvertible(self):
paths = ["/WebKit/foo1.txt", "/outside/foo2.txt"]
paths = self._change_directory(checkout_root=self._checkout_root, paths=paths)
log_messages = [
"""WARNING: Path-dependent style checks may not work correctly:
One of the given paths is outside the WebKit checkout of the current
working directory:
Path: /outside/foo2.txt
Checkout root: /WebKit
Pass only files below the checkout root to ensure correct results.
See the help documentation for more info.
"""]
self._assert_result(paths, paths, log_messages, self._original_directory)
|
bsd-3-clause
| -9,014,878,840,772,824,000 | 182,603,253,415,172,350 | 45.12 | 124 | 0.732003 | false |
dfalt974/SickRage
|
lib/sqlalchemy/testing/config.py
|
76
|
2116
|
# testing/config.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import collections
requirements = None
db = None
db_url = None
db_opts = None
file_config = None
_current = None
class Config(object):
def __init__(self, db, db_opts, options, file_config):
self.db = db
self.db_opts = db_opts
self.options = options
self.file_config = file_config
_stack = collections.deque()
_configs = {}
@classmethod
def register(cls, db, db_opts, options, file_config, namespace):
"""add a config as one of the global configs.
If there are no configs set up yet, this config also
gets set as the "_current".
"""
cfg = Config(db, db_opts, options, file_config)
global _current
if not _current:
cls.set_as_current(cfg, namespace)
cls._configs[cfg.db.name] = cfg
cls._configs[(cfg.db.name, cfg.db.dialect)] = cfg
cls._configs[cfg.db] = cfg
@classmethod
def set_as_current(cls, config, namespace):
global db, _current, db_url
_current = config
db_url = config.db.url
namespace.db = db = config.db
@classmethod
def push_engine(cls, db, namespace):
assert _current, "Can't push without a default Config set up"
cls.push(
Config(db, _current.db_opts, _current.options, _current.file_config),
namespace
)
@classmethod
def push(cls, config, namespace):
cls._stack.append(_current)
cls.set_as_current(config, namespace)
@classmethod
def reset(cls, namespace):
if cls._stack:
cls.set_as_current(cls._stack[0], namespace)
cls._stack.clear()
@classmethod
def all_configs(cls):
for cfg in set(cls._configs.values()):
yield cfg
@classmethod
def all_dbs(cls):
for cfg in cls.all_configs():
yield cfg.db
|
gpl-3.0
| 2,509,864,379,988,371,000 | 2,390,337,527,246,078,000 | 26.480519 | 84 | 0.605388 | false |
tobspr/LUI
|
Demos/B_BlockText.py
|
1
|
2782
|
from DemoFramework import DemoFramework
from LUILabel import LUILabel
from LUIBlockText import LUIBlockText
from LUIScrollableRegion import LUIScrollableRegion
import random
f = DemoFramework()
f.prepare_demo("LUIBlockText")
# Constructor
f.add_constructor_parameter("text", "u'Label'")
f.add_constructor_parameter("shadow", "True")
f.add_constructor_parameter("font_size", "14")
f.add_constructor_parameter("font", "'label'")
# Functions
f.add_public_function("clear", [])
f.add_public_function("set_text", [("text", "string")])
f.add_public_function("set_wrap", [("wrap", "boolean")])
f.add_public_function("set_width", [("width", "integer")])
f.add_property("labels", "list")
# Events
f.construct_sourcecode("LUIBlockText")
text_container = LUIScrollableRegion(
parent=f.get_widget_node(),
width=340,
height=190,
padding=0,
)
#TODO: Support newline through charcode 10
#TODO: If space causes next line, dont print it
# Create a new label
label = LUIBlockText(parent=text_container, width=310)
# Paragraph with no line breaks
label.add(
text='''Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed malesuada sit amet erat non gravida. Pellentesque sit amet cursus risus Sed egestas, nulla in tempor cursus, ante felis cursus magna, nec vehicula nisi nulla eu nulla.''',
color=(0.9,0.9,.9),
wordwrap=True,
padding=5,
)
# Paragraph with some linebreaks
label.add(
text='''Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed malesuada sit amet erat non gravida.
Pellentesque sit amet cursus risus Sed egestas, nulla in tempor cursus, ante felis cursus magna, nec vehicula nisi nulla eu nulla.
Nulla sed pellentesque erat. Morbi facilisis at erat id auctor. Phasellus euismod facilisis sem, at molestie velit condimentum sit amet.
Nulla posuere rhoncus aliquam.''',
color=(0.9,0.9,.9),
wordwrap=True,
padding=5,
)
# Paragraph with no spaces or linebreaks
label.add(
text='''Loremipsumolorsitamet,consecteturadipiscingelit.Sedmalesuadasitameteratnongravida.PellentesquesitametcursusrisusSedegestas,nullaintemporcursus,antefeliscursusmagna,necvehiculanisinullaeunulla.''',
color=(0.9,0.9,.9),
wordwrap=True,
padding=5,
)
def setWidth(width):
label.set_width(width)
text_container.on_element_added()
def setWrap(wrap):
label.set_wrap(wrap)
text_container.on_element_added()
f.set_actions({
"Set Random Text": lambda: label.set_text(unicode(random.randint(100, 10000))),
"Set Random Color": lambda: label.set_color((random.random(), random.random(), random.random(), 1)),
"Clear": lambda: label.clear(),
"Smaller": lambda: setWidth(200),
"Larger": lambda: setWidth(310),
"Wrapping on": lambda: setWrap(True),
"Wrapping off": lambda: setWrap(False),
})
base.run()
|
mit
| 160,479,629,659,873,860 | 4,438,625,074,856,500,700 | 29.23913 | 242 | 0.734723 | false |
jmetzen/scikit-learn
|
sklearn/base.py
|
22
|
18131
|
"""Base classes for all estimators."""
# Author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import copy
import warnings
import numpy as np
from scipy import sparse
from .externals import six
from .utils.fixes import signature
from .utils.deprecation import deprecated
from .exceptions import ChangedBehaviorWarning as ChangedBehaviorWarning_
class ChangedBehaviorWarning(ChangedBehaviorWarning_):
pass
ChangedBehaviorWarning = deprecated("ChangedBehaviorWarning has been moved "
"into the sklearn.exceptions module. "
"It will not be available here from "
"version 0.19")(ChangedBehaviorWarning)
##############################################################################
def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator: estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe: boolean, optional
If safe is false, clone will fall back to a deepcopy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator "
"as it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in six.iteritems(new_object_params):
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if isinstance(param1, np.ndarray):
# For most ndarrays, we do not test for complete equality
if not isinstance(param2, type(param1)):
equality_test = False
elif (param1.ndim > 0
and param1.shape[0] > 0
and isinstance(param2, np.ndarray)
and param2.ndim > 0
and param2.shape[0] > 0):
equality_test = (
param1.shape == param2.shape
and param1.dtype == param2.dtype
# We have to use '.flat' for 2D arrays
and param1.flat[0] == param2.flat[0]
and param1.flat[-1] == param2.flat[-1]
)
else:
equality_test = np.all(param1 == param2)
elif sparse.issparse(param1):
# For sparse matrices equality doesn't work
if not sparse.issparse(param2):
equality_test = False
elif param1.size == 0 or param2.size == 0:
equality_test = (
param1.__class__ == param2.__class__
and param1.size == 0
and param2.size == 0
)
else:
equality_test = (
param1.__class__ == param2.__class__
and param1.data[0] == param2.data[0]
and param1.data[-1] == param2.data[-1]
and param1.nnz == param2.nnz
and param1.shape == param2.shape
)
else:
new_obj_val = new_object_params[name]
params_set_val = params_set[name]
# The following construct is required to check equality on special
# singletons such as np.nan that are not equal to them-selves:
equality_test = (new_obj_val == params_set_val or
new_obj_val is params_set_val)
if not equality_test:
raise RuntimeError('Cannot clone object %s, as the constructor '
'does not seem to set parameter %s' %
(estimator, name))
return new_object
###############################################################################
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params: dict
The dictionary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
###############################################################################
class ClassifierMixin(object):
"""Mixin class for all classifiers in scikit-learn."""
_estimator_type = "classifier"
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class RegressorMixin(object):
"""Mixin class for all regression estimators in scikit-learn."""
_estimator_type = "regressor"
def score(self, X, y, sample_weight=None):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the regression
sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual
sum of squares ((y_true - y_true.mean()) ** 2).sum().
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
"""
from .metrics import r2_score
return r2_score(y, self.predict(X), sample_weight=sample_weight,
multioutput='variance_weighted')
###############################################################################
class ClusterMixin(object):
"""Mixin class for all cluster estimators in scikit-learn."""
_estimator_type = "clusterer"
def fit_predict(self, X, y=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
class BiclusterMixin(object):
"""Mixin class for all bicluster estimators in scikit-learn"""
@property
def biclusters_(self):
"""Convenient way to get row and column indicators together.
Returns the ``rows_`` and ``columns_`` members.
"""
return self.rows_, self.columns_
def get_indices(self, i):
"""Row and column indices of the i'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Returns
-------
row_ind : np.array, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : np.array, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
"""
rows = self.rows_[i]
columns = self.columns_[i]
return np.nonzero(rows)[0], np.nonzero(columns)[0]
def get_shape(self, i):
"""Shape of the i'th bicluster.
Returns
-------
shape : (int, int)
Number of rows and columns (resp.) in the bicluster.
"""
indices = self.get_indices(i)
return tuple(len(i) for i in indices)
def get_submatrix(self, i, data):
"""Returns the submatrix corresponding to bicluster `i`.
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
"""
from .utils.validation import check_array
data = check_array(data, accept_sparse='csr')
row_ind, col_ind = self.get_indices(i)
return data[row_ind[:, np.newaxis], col_ind]
###############################################################################
class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
###############################################################################
class MetaEstimatorMixin(object):
"""Mixin class for all meta estimators in scikit-learn."""
# this is just a tag for the moment
###############################################################################
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier."""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Returns True if the given estimator is (probably) a regressor."""
return getattr(estimator, "_estimator_type", None) == "regressor"
|
bsd-3-clause
| 3,684,395,435,732,382,000 | 485,463,683,305,251,900 | 36.538302 | 79 | 0.540069 | false |
bioinformatics-ua/montra
|
emif/notifications/models.py
|
2
|
1769
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Universidade de Aveiro, DETI/IEETA, Bioinformatics Group - http://bioinformatics.ua.pt/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django.contrib.auth.models import User
# Model for a new notification
class Notification(models.Model):
MESSAGE = 0
SYSTEM = 1
NOTIFICATION_TYPES = (
(MESSAGE, 'Private Message'),
(SYSTEM, 'System Notification'),
)
destiny = models.ForeignKey(User, related_name="destiny") # destinatary user for the notifications
origin = models.ForeignKey(User, related_name="origin") #all notification have origin (this way we may later use this as a messaging system also ?)
type = models.IntegerField(choices=NOTIFICATION_TYPES, default=SYSTEM)
href = models.TextField(null=True) # this page can have a reference to somewhere
notification = models.TextField()
created_date = models.DateTimeField(auto_now_add=True)
read_date = models.DateTimeField(null=True)
read = models.BooleanField(default=False)
removed = models.BooleanField(default=False)
def __str__(self):
return str(self.notification)
|
gpl-3.0
| 7,448,595,419,114,039,000 | -7,824,169,553,204,832,000 | 44.358974 | 151 | 0.733183 | false |
weblyzard/ewrt
|
src/eWRT/ws/yahoo/__init__.py
|
1
|
7712
|
#!/usr/bin/env python
""" @package eWRT.ws.yahoo
support for the yahoo! search
@remarks
this module is based on yahoo's boss search service
"""
from __future__ import print_function
# (C)opyrights 2008-2010 by Albert Weichselbraun <[email protected]>
# Heinz Peter Lang <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from future import standard_library
standard_library.install_aliases()
from builtins import map
from builtins import object
__version__ = "$Header$"
from urllib.request import urlopen
from urllib.parse import urlencode, quote
from urllib.error import URLError
from nose.plugins.attrib import attr
from socket import setdefaulttimeout, timeout
from eWRT.ws.TagInfoService import TagInfoService
from eWRT.config import YAHOO_APP_ID, YAHOO_SEARCH_URL
from eWRT.input.conv.html import HtmlToText
from eWRT.access.http import Retrieve
setdefaulttimeout(60)
class Yahoo(TagInfoService):
""" interfaces with yahoo's search service
* Search: Yahoo! BOSS
(see http://developer.yahoo.com/search/boss)
"""
__slots__ = ('r', )
def __init__(self):
self.r = Retrieve( Yahoo.__name__, sleep_time=0 )
def query(self, terms, count=0, queryParams={} ):
""" returns search results for the given terms
@param[in] terms ... a list of search terms
@param[in] count ... number of results to return (0 if we are
interested on the search meta data only).
@param[in] queryParams ... a dictionary of query parameters to add to
the request
@returns the search results
"""
assert ( isinstance(terms, tuple) or isinstance(terms, list) )
queryParams.update( {'appid': YAHOO_APP_ID,
'count': count,
'format': 'json'
} )
params = urlencode( queryParams )
url = YAHOO_SEARCH_URL % "%2B".join(map( quote, terms) ) +"?"+ params
print(url)
try:
result = eval( self.r.open(url).read().replace("\\/", "/" ))
return result['ysearchresponse']
except (timeout, URLError):
return ""
@staticmethod
def getSearchResults(query_result):
""" returns a list of all search results returned by the given
query result.
@param[in] query_result Result of the query
"""
return [ YahooSearchResult(r) for r in query_result['resultset_web'] ] \
if 'resultset_web' in query_result else []
def getTagInfo(self, tag):
""" @Override """
return int( self.query(tag)['totalhits'] )
class YahooSearchResult(object):
""" Perfom manipulations on yahoo search results """
__slots__ = ('search_result')
def __init__(self, search_result):
""" @param[in] search_result ... search result to query """
self.search_result = search_result
def getKeywords(self):
""" @returns the keywords for the given search_result """
return self.search_result['keywords']['terms']
def getPageContent(self):
""" @returns the content of the found web page """
return urlopen( self.search_result['url'] ).read()
def getPageText(self):
""" @returns the text of the found web page """
try:
return HtmlToText.getText( self.getPageContent() )
except:
return ""
class TestYahoo(object):
""" tests the yahoo search API """
SEARCH_QUERIES = {
'energy': ( ('energy', 'coal'), ('energy', 'sustainable') ),
'latex' : ( ('latex', 'bibtex'), ('latex', 'knutz') )
}
def __init__(self):
self.y = Yahoo()
@attr("remote")
def testSearchCounts(self):
for query, refinedQueries in self.SEARCH_QUERIES.items():
qCount = int(self.y.query( (query, ) )['totalhits'])
for q in refinedQueries:
print(query, q, "**",qCount, int(self.y.query( q )['totalhits']))
assert qCount > int(self.y.query( q )['totalhits'])
@attr("remote")
def testTagInfo(self):
""" tests the tag info service """
assert self.y.getTagInfo( ('weblyzard',)) > 10
assert self.y.getTagInfo( ('a_query_which_should_not_appear_at_all', )) == 0
@attr("remote")
def testYahooSearchResult(self):
""" tests the Yahoo Search Result objects """
for resultSite in Yahoo.getSearchResults(self.y.query( ("linux", "firefox", ), \
count=1, queryParams={'view':'keyterms', 'abstract': 'long'} )):
print(resultSite.search_result['keyterms']['terms'])
assert len( resultSite.getPageText() ) > len(resultSite.search_result['abstract'])
assert 'http' in resultSite.search_result['url']
@attr("remote")
def testBorderlineYahooSearchResult(self):
""" tests borderline cases such as empty search results """
assert len( Yahoo.getSearchResults(self.y.query( ('ksdaf', 'sadfj93', 'kd9', ), count=10, queryParams={'view':'keyterms', 'abstract': 'long'}) ) ) == 0
@attr("remote")
def testMultiProcessingRetrieve(self):
""" tests the multi processing capabilities of this module """
from multiprocessing import Pool
p = Pool(4)
TEST_URLS = ['http://www.derstandard.at',
'http://www.dilbert.com',
'http://www.wetter.at',
'http://www.wu.ac.at',
'http://www.ai.wu.ac.at',
'http://www.tuwien.ac.at',
'http://www.boku.ac.at',
'http://www.univie.ac.at',
]
# f=open("/tmp/aw", "w")
for res in p.map( p_fetchWebPage, TEST_URLS ):
# f.write(res)
# f.write("\n-----\n\n\n")
assert len(res) > 20
# f.close()
@attr("remote")
def testFailingUrls(self):
""" tests the module with URLs known to fail(!) """
TEST_URLS = ['http://www.mfsa.com.mt/insguide/english/glossarysearch.jsp?letter=all',
]
for url in TEST_URLS:
assert len( p_fetchWebPage(url).strip() ) > 0
def p_fetchWebPage(url):
""" fetches the web page specified in the given yahoo result object
@param[in] url the url to fetch
@remarks
helper function for the testMultiProcessing test
"""
r = YahooSearchResult( {'url': url} )
return r.getPageText()
if __name__ == '__main__':
y = Yahoo()
#print y.query( ("energy",) )
#print y.query( ("energy", "coal") )
#print y.query( ("d'alembert", "law") )
r = y.query( ("linux", "python", ), count=5, queryParams={'view': 'keyterms', 'abstract': 'long'} )
print("***", r)
for entry in r['resultset_web']:
print(list(entry.keys()))
print(entry['keyterms']['terms'])
print(entry['url'])
print(entry['abstract'])
|
gpl-3.0
| -72,229,230,926,106,480 | 739,073,464,559,219,500 | 34.869767 | 159 | 0.58584 | false |
numba/numba
|
numba/tests/test_svml.py
|
3
|
17178
|
import math
import numpy as np
import subprocess
import numbers
import importlib
import sys
import re
import traceback
import multiprocessing as mp
from itertools import chain, combinations
import numba
from numba.core import config, cpu
from numba import prange, njit
from numba.core.compiler import compile_isolated, Flags
from numba.tests.support import TestCase, tag, override_env_config
import unittest
needs_svml = unittest.skipUnless(config.USING_SVML,
"SVML tests need SVML to be present")
# a map of float64 vector lenghs with corresponding CPU architecture
vlen2cpu = {2: 'nehalem', 4: 'haswell', 8: 'skylake-avx512'}
# force LLVM to use AVX512 registers for vectorization
# https://reviews.llvm.org/D67259
vlen2cpu_features = {2: '', 4: '', 8: '-prefer-256-bit'}
# K: SVML functions, V: python functions which are expected to be SIMD-vectorized
# using SVML, explicit references to Python functions here are mostly for sake of
# instant import checks.
# TODO: [] and comments below mean unused/untested SVML function, it's to be
# either enabled or to be replaced with the explanation why the function
# cannot be used in Numba
# TODO: this test does not support functions with more than 1 arguments yet
# The test logic should be modified if there is an SVML function being used under
# different name or module from Python
svml_funcs = {
"sin": [np.sin, math.sin],
"cos": [np.cos, math.cos],
"pow": [], # pow, math.pow],
"exp": [np.exp, math.exp],
"log": [np.log, math.log],
"acos": [math.acos],
"acosh": [math.acosh],
"asin": [math.asin],
"asinh": [math.asinh],
"atan2": [], # math.atan2],
"atan": [math.atan],
"atanh": [math.atanh],
"cbrt": [], # np.cbrt],
"cdfnorm": [],
"cdfnorminv": [],
"ceil": [], # np.ceil, math.ceil],
"cosd": [],
"cosh": [np.cosh, math.cosh],
"erf": [math.erf], # np.erf is available in Intel Distribution
"erfc": [math.erfc],
"erfcinv": [],
"erfinv": [],
"exp10": [],
"exp2": [], # np.exp2],
"expm1": [np.expm1, math.expm1],
"floor": [], # np.floor, math.floor],
"fmod": [], # np.fmod, math.fmod],
"hypot": [], # np.hypot, math.hypot],
"invsqrt": [], # available in Intel Distribution
"log10": [np.log10, math.log10],
"log1p": [np.log1p, math.log1p],
"log2": [], # np.log2],
"logb": [],
"nearbyint": [],
"rint": [], # np.rint],
"round": [], # round],
"sind": [],
"sinh": [np.sinh, math.sinh],
"sqrt": [np.sqrt, math.sqrt],
"tan": [np.tan, math.tan],
"tanh": [np.tanh, math.tanh],
"trunc": [], # np.trunc, math.trunc],
}
# TODO: these functions are not vectorizable with complex types
complex_funcs_exclude = ["sqrt", "tan", "log10", "expm1", "log1p", "tanh", "log"]
# remove untested entries
svml_funcs = {k: v for k, v in svml_funcs.items() if len(v) > 0}
# lists for functions which belong to numpy and math modules correpondently
numpy_funcs = [f for f, v in svml_funcs.items() if "<ufunc" in \
[str(p).split(' ')[0] for p in v]]
other_funcs = [f for f, v in svml_funcs.items() if "<built-in" in \
[str(p).split(' ')[0] for p in v]]
def func_patterns(func, args, res, dtype, mode, vlen, fastmath, pad=' '*8):
"""
For a given function and its usage modes,
returns python code and assembly patterns it should and should not generate
"""
# generate a function call according to the usecase
if mode == "scalar":
arg_list = ','.join([a+'[0]' for a in args])
body = '%s%s[0] += math.%s(%s)\n' % (pad, res, func, arg_list)
elif mode == "numpy":
body = '%s%s += np.%s(%s)' % (pad, res, func, ','.join(args))
body += '.astype(np.%s)\n' % dtype if dtype.startswith('int') else '\n'
else:
assert mode == "range" or mode == "prange"
arg_list = ','.join([a+'[i]' for a in args])
body = '{pad}for i in {mode}({res}.size):\n' \
'{pad}{pad}{res}[i] += math.{func}({arg_list})\n'. \
format(**locals())
# TODO: refactor so this for-loop goes into umbrella function,
# 'mode' can be 'numpy', '0', 'i' instead
# TODO: it will enable mixed usecases like prange + numpy
# type specialization
is_f32 = dtype == 'float32' or dtype == 'complex64'
f = func+'f' if is_f32 else func
v = vlen*2 if is_f32 else vlen
# general expectations
prec_suff = '' if fastmath else '_ha'
scalar_func = '$_'+f if config.IS_OSX else '$'+f
svml_func = '__svml_%s%d%s,' % (f, v, prec_suff)
if mode == "scalar":
contains = [scalar_func]
avoids = ['__svml_', svml_func]
else: # will vectorize
contains = [svml_func]
avoids = [] # [scalar_func] - TODO: if possible, force LLVM to prevent
# generating the failsafe scalar paths
if vlen != 8 and (is_f32 or dtype == 'int32'): # Issue #3016
avoids += ['%zmm', '__svml_%s%d%s,' % (f, v*2, prec_suff)]
# special handling
if func == 'sqrt':
if mode == "scalar":
contains = ['sqrts']
avoids = [scalar_func, svml_func] # LLVM uses CPU instruction
elif vlen == 8:
contains = ['vsqrtp']
avoids = [scalar_func, svml_func] # LLVM uses CPU instruction
# else expect use of SVML for older architectures
return body, contains, avoids
def usecase_name(dtype, mode, vlen, name):
""" Returns pretty name for given set of modes """
return f"{dtype}_{mode}{vlen}_{name}"
def combo_svml_usecase(dtype, mode, vlen, fastmath, name):
""" Combine multiple function calls under single umbrella usecase """
name = usecase_name(dtype, mode, vlen, name)
body = """def {name}(n):
x = np.empty(n*8, dtype=np.{dtype})
ret = np.empty_like(x)\n""".format(**locals())
funcs = set(numpy_funcs if mode == "numpy" else other_funcs)
if dtype.startswith('complex'):
funcs = funcs.difference(complex_funcs_exclude)
contains = set()
avoids = set()
# fill body and expectation patterns
for f in funcs:
b, c, a = func_patterns(f, ['x'], 'ret', dtype, mode, vlen, fastmath)
avoids.update(a)
body += b
contains.update(c)
body += " "*8 + "return ret"
# now compile and return it along with its body in __doc__ and patterns
ldict = {}
exec(body, globals(), ldict)
ldict[name].__doc__ = body
return ldict[name], contains, avoids
@needs_svml
class TestSVMLGeneration(TestCase):
""" Tests all SVML-generating functions produce desired calls """
# env mutating, must not run in parallel
_numba_parallel_test_ = False
# RE for a generic symbol reference and for each particular SVML function
asm_filter = re.compile('|'.join(['\$[a-z_]\w+,']+list(svml_funcs)))
@classmethod
def mp_runner(cls, testname, outqueue):
method = getattr(cls, testname)
try:
ok, msg = method()
except Exception:
msg = traceback.format_exc()
ok = False
outqueue.put({'status': ok, 'msg': msg})
@classmethod
def _inject_test(cls, dtype, mode, vlen, flags):
# unsupported combinations
if dtype.startswith('complex') and mode != 'numpy':
return
# TODO: address skipped tests below
skipped = dtype.startswith('int') and vlen == 2
sig = (numba.int64,)
# unit test body template
@staticmethod
def run_template():
fn, contains, avoids = combo_svml_usecase(dtype, mode, vlen,
flags['fastmath'],
flags['name'])
# look for specific patters in the asm for a given target
with override_env_config('NUMBA_CPU_NAME', vlen2cpu[vlen]), \
override_env_config('NUMBA_CPU_FEATURES', vlen2cpu_features[vlen]):
# recompile for overridden CPU
try:
jitted_fn = njit(sig, fastmath=flags['fastmath'],
error_model=flags['error_model'],)(fn)
except:
raise Exception("raised while compiling "+fn.__doc__)
asm = jitted_fn.inspect_asm(sig)
missed = [pattern for pattern in contains if not pattern in asm]
found = [pattern for pattern in avoids if pattern in asm]
ok = not missed and not found
detail = '\n'.join(
[line for line in asm.split('\n')
if cls.asm_filter.search(line) and not '"' in line])
msg = (
f"While expecting {missed} and not {found},\n"
f"it contains:\n{detail}\n"
f"when compiling {fn.__doc__}"
)
return ok, msg
# inject it into the class
postfix = usecase_name(dtype, mode, vlen, flags['name'])
testname = f"run_{postfix}"
setattr(cls, testname, run_template)
@unittest.skipUnless(not skipped, "Not implemented")
def test_runner(self):
ctx = mp.get_context("spawn")
q = ctx.Queue()
p = ctx.Process(target=type(self).mp_runner, args=[testname, q])
p.start()
# timeout to avoid hanging and long enough to avoid bailing too early
p.join(timeout=10)
self.assertEqual(p.exitcode, 0, msg="process ended unexpectedly")
out = q.get()
status = out['status']
msg = out['msg']
self.assertTrue(status, msg=msg)
setattr(cls, f"test_{postfix}", test_runner)
@classmethod
def autogenerate(cls):
flag_list = [{'fastmath':False, 'error_model':'numpy',
'name':'usecase'},
{'fastmath':True, 'error_model':'numpy',
'name':'fastmath_usecase'},]
# main loop covering all the modes and use-cases
for dtype in ('complex64', 'float64', 'float32', 'int32', ):
for vlen in vlen2cpu:
for flags in flag_list:
for mode in "scalar", "range", "prange", "numpy":
cls._inject_test(dtype, mode, vlen, dict(flags))
# mark important
for n in ( "test_int32_range4_usecase", # issue #3016
):
setattr(cls, n, tag("important")(getattr(cls, n)))
TestSVMLGeneration.autogenerate()
def math_sin_scalar(x):
return math.sin(x)
def math_sin_loop(n):
ret = np.empty(n, dtype=np.float64)
for x in range(n):
ret[x] = math.sin(np.float64(x))
return ret
@needs_svml
class TestSVML(TestCase):
""" Tests SVML behaves as expected """
# env mutating, must not run in parallel
_numba_parallel_test_ = False
def __init__(self, *args):
self.flags = Flags()
self.flags.nrt = True
# flags for njit(fastmath=True)
self.fastflags = Flags()
self.fastflags.nrt = True
self.fastflags.fastmath = cpu.FastMathOptions(True)
super(TestSVML, self).__init__(*args)
def compile(self, func, *args, **kwargs):
assert not kwargs
sig = tuple([numba.typeof(x) for x in args])
std = compile_isolated(func, sig, flags=self.flags)
fast = compile_isolated(func, sig, flags=self.fastflags)
return std, fast
def copy_args(self, *args):
if not args:
return tuple()
new_args = []
for x in args:
if isinstance(x, np.ndarray):
new_args.append(x.copy('k'))
elif isinstance(x, np.number):
new_args.append(x.copy())
elif isinstance(x, numbers.Number):
new_args.append(x)
else:
raise ValueError('Unsupported argument type encountered')
return tuple(new_args)
def check(self, pyfunc, *args, **kwargs):
jitstd, jitfast = self.compile(pyfunc, *args)
std_pattern = kwargs.pop('std_pattern', None)
fast_pattern = kwargs.pop('fast_pattern', None)
cpu_name = kwargs.pop('cpu_name', 'skylake-avx512')
# force LLVM to use AVX512 registers for vectorization
# https://reviews.llvm.org/D67259
cpu_features = kwargs.pop('cpu_features', '-prefer-256-bit')
# python result
py_expected = pyfunc(*self.copy_args(*args))
# jit result
jitstd_result = jitstd.entry_point(*self.copy_args(*args))
# fastmath result
jitfast_result = jitfast.entry_point(*self.copy_args(*args))
# assert numerical equality
np.testing.assert_almost_equal(jitstd_result, py_expected, **kwargs)
np.testing.assert_almost_equal(jitfast_result, py_expected, **kwargs)
# look for specific patters in the asm for a given target
with override_env_config('NUMBA_CPU_NAME', cpu_name), \
override_env_config('NUMBA_CPU_FEATURES', cpu_features):
# recompile for overridden CPU
jitstd, jitfast = self.compile(pyfunc, *args)
if std_pattern:
self.check_svml_presence(jitstd, std_pattern)
if fast_pattern:
self.check_svml_presence(jitfast, fast_pattern)
def check_svml_presence(self, func, pattern):
asm = func.library.get_asm_str()
self.assertIn(pattern, asm)
def test_scalar_context(self):
# SVML will not be used.
pat = '$_sin' if config.IS_OSX else '$sin'
self.check(math_sin_scalar, 7., std_pattern=pat)
self.check(math_sin_scalar, 7., fast_pattern=pat)
def test_svml(self):
# loops both with and without fastmath should use SVML.
# The high accuracy routines are dropped if `fastmath` is set
std = "__svml_sin8_ha,"
fast = "__svml_sin8," # No `_ha`!
self.check(math_sin_loop, 10, std_pattern=std, fast_pattern=fast)
def test_svml_disabled(self):
code = """if 1:
import os
import numpy as np
import math
def math_sin_loop(n):
ret = np.empty(n, dtype=np.float64)
for x in range(n):
ret[x] = math.sin(np.float64(x))
return ret
def check_no_svml():
try:
# ban the use of SVML
os.environ['NUMBA_DISABLE_INTEL_SVML'] = '1'
# delay numba imports to account for env change as
# numba.__init__ picks up SVML and it is too late by
# then to override using `numba.config`
import numba
from numba import config
from numba.core import cpu
from numba.tests.support import override_env_config
from numba.core.compiler import compile_isolated, Flags
# compile for overridden CPU, with and without fastmath
with override_env_config('NUMBA_CPU_NAME', 'skylake-avx512'), \
override_env_config('NUMBA_CPU_FEATURES', ''):
sig = (numba.int32,)
f = Flags()
f.nrt = True
std = compile_isolated(math_sin_loop, sig, flags=f)
f.fastmath = cpu.FastMathOptions(True)
fast = compile_isolated(math_sin_loop, sig, flags=f)
fns = std, fast
# assert no SVML call is present in the asm
for fn in fns:
asm = fn.library.get_asm_str()
assert '__svml_sin' not in asm
finally:
# not really needed as process is separate
os.environ['NUMBA_DISABLE_INTEL_SVML'] = '0'
config.reload_config()
check_no_svml()
"""
popen = subprocess.Popen(
[sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError(
"process failed with code %s: stderr follows\n%s\n" %
(popen.returncode, err.decode()))
def test_svml_working_in_non_isolated_context(self):
@njit(fastmath={'fast'}, error_model="numpy")
def impl(n):
x = np.empty(n * 8, dtype=np.float64)
ret = np.empty_like(x)
for i in range(ret.size):
ret[i] += math.cosh(x[i])
return ret
impl(1)
self.assertTrue('intel_svmlcc' in impl.inspect_llvm(impl.signatures[0]))
if __name__ == '__main__':
unittest.main()
|
bsd-2-clause
| 7,166,739,711,290,618,000 | 2,163,685,815,290,817,800 | 37.515695 | 84 | 0.550413 | false |
loco-odoo/localizacion_co
|
openerp/sql_db.py
|
39
|
23723
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
The PostgreSQL connector is a connectivity layer between the OpenERP code and
the database, *not* a database abstraction toolkit. Database abstraction is what
the ORM does, in fact.
"""
from contextlib import contextmanager
from functools import wraps
import logging
import urlparse
import uuid
import psycopg2.extras
import psycopg2.extensions
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT, ISOLATION_LEVEL_READ_COMMITTED, ISOLATION_LEVEL_REPEATABLE_READ
from psycopg2.pool import PoolError
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
_logger = logging.getLogger(__name__)
types_mapping = {
'date': (1082,),
'time': (1083,),
'datetime': (1114,),
}
def unbuffer(symb, cr):
if symb is None:
return None
return str(symb)
def undecimalize(symb, cr):
if symb is None:
return None
return float(symb)
for name, typeoid in types_mapping.items():
psycopg2.extensions.register_type(psycopg2.extensions.new_type(typeoid, name, lambda x, cr: x))
psycopg2.extensions.register_type(psycopg2.extensions.new_type((700, 701, 1700,), 'float', undecimalize))
import tools
from tools.func import frame_codeinfo
from datetime import datetime as mdt
from datetime import timedelta
import threading
from inspect import currentframe
import re
re_from = re.compile('.* from "?([a-zA-Z_0-9]+)"? .*$')
re_into = re.compile('.* into "?([a-zA-Z_0-9]+)"? .*$')
sql_counter = 0
class Cursor(object):
"""Represents an open transaction to the PostgreSQL DB backend,
acting as a lightweight wrapper around psycopg2's
``cursor`` objects.
``Cursor`` is the object behind the ``cr`` variable used all
over the OpenERP code.
.. rubric:: Transaction Isolation
One very important property of database transactions is the
level of isolation between concurrent transactions.
The SQL standard defines four levels of transaction isolation,
ranging from the most strict *Serializable* level, to the least
strict *Read Uncommitted* level. These levels are defined in
terms of the phenomena that must not occur between concurrent
transactions, such as *dirty read*, etc.
In the context of a generic business data management software
such as OpenERP, we need the best guarantees that no data
corruption can ever be cause by simply running multiple
transactions in parallel. Therefore, the preferred level would
be the *serializable* level, which ensures that a set of
transactions is guaranteed to produce the same effect as
running them one at a time in some order.
However, most database management systems implement a limited
serializable isolation in the form of
`snapshot isolation <http://en.wikipedia.org/wiki/Snapshot_isolation>`_,
providing most of the same advantages as True Serializability,
with a fraction of the performance cost.
With PostgreSQL up to version 9.0, this snapshot isolation was
the implementation of both the ``REPEATABLE READ`` and
``SERIALIZABLE`` levels of the SQL standard.
As of PostgreSQL 9.1, the previous snapshot isolation implementation
was kept for ``REPEATABLE READ``, while a new ``SERIALIZABLE``
level was introduced, providing some additional heuristics to
detect a concurrent update by parallel transactions, and forcing
one of them to rollback.
OpenERP implements its own level of locking protection
for transactions that are highly likely to provoke concurrent
updates, such as stock reservations or document sequences updates.
Therefore we mostly care about the properties of snapshot isolation,
but we don't really need additional heuristics to trigger transaction
rollbacks, as we are taking care of triggering instant rollbacks
ourselves when it matters (and we can save the additional performance
hit of these heuristics).
As a result of the above, we have selected ``REPEATABLE READ`` as
the default transaction isolation level for OpenERP cursors, as
it will be mapped to the desired ``snapshot isolation`` level for
all supported PostgreSQL version (8.3 - 9.x).
Note: up to psycopg2 v.2.4.2, psycopg2 itself remapped the repeatable
read level to serializable before sending it to the database, so it would
actually select the new serializable mode on PostgreSQL 9.1. Make
sure you use psycopg2 v2.4.2 or newer if you use PostgreSQL 9.1 and
the performance hit is a concern for you.
.. attribute:: cache
Cache dictionary with a "request" (-ish) lifecycle, only lives as
long as the cursor itself does and proactively cleared when the
cursor is closed.
This cache should *only* be used to store repeatable reads as it
ignores rollbacks and savepoints, it should not be used to store
*any* data which may be modified during the life of the cursor.
"""
IN_MAX = 1000 # decent limit on size of IN queries - guideline = Oracle limit
def check(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
if self._closed:
msg = 'Unable to use a closed cursor.'
if self.__closer:
msg += ' It was closed at %s, line %s' % self.__closer
raise psycopg2.OperationalError(msg)
return f(self, *args, **kwargs)
return wrapper
def __init__(self, pool, dbname, dsn, serialized=True):
self.sql_from_log = {}
self.sql_into_log = {}
# default log level determined at cursor creation, could be
# overridden later for debugging purposes
self.sql_log = _logger.isEnabledFor(logging.DEBUG)
self.sql_log_count = 0
# avoid the call of close() (by __del__) if an exception
# is raised by any of the following initialisations
self._closed = True
self.__pool = pool
self.dbname = dbname
# Whether to enable snapshot isolation level for this cursor.
# see also the docstring of Cursor.
self._serialized = serialized
self._cnx = pool.borrow(dsn)
self._obj = self._cnx.cursor()
if self.sql_log:
self.__caller = frame_codeinfo(currentframe(), 2)
else:
self.__caller = False
self._closed = False # real initialisation value
self.autocommit(False)
self.__closer = False
self._default_log_exceptions = True
self.cache = {}
def __build_dict(self, row):
return {d.name: row[i] for i, d in enumerate(self._obj.description)}
def dictfetchone(self):
row = self._obj.fetchone()
return row and self.__build_dict(row)
def dictfetchmany(self, size):
return map(self.__build_dict, self._obj.fetchmany(size))
def dictfetchall(self):
return map(self.__build_dict, self._obj.fetchall())
def __del__(self):
if not self._closed and not self._cnx.closed:
# Oops. 'self' has not been closed explicitly.
# The cursor will be deleted by the garbage collector,
# but the database connection is not put back into the connection
# pool, preventing some operation on the database like dropping it.
# This can also lead to a server overload.
msg = "Cursor not closed explicitly\n"
if self.__caller:
msg += "Cursor was created at %s:%s" % self.__caller
else:
msg += "Please enable sql debugging to trace the caller."
_logger.warning(msg)
self._close(True)
@check
def execute(self, query, params=None, log_exceptions=None):
if '%d' in query or '%f' in query:
_logger.warning(query)
_logger.warning("SQL queries cannot contain %d or %f anymore. Use only %s")
if params and not isinstance(params, (tuple, list, dict)):
_logger.error("SQL query parameters should be a tuple, list or dict; got %r", params)
raise ValueError("SQL query parameters should be a tuple, list or dict; got %r" % (params,))
if self.sql_log:
now = mdt.now()
try:
params = params or None
res = self._obj.execute(query, params)
except psycopg2.ProgrammingError, pe:
if self._default_log_exceptions if log_exceptions is None else log_exceptions:
_logger.error("Programming error: %s, in query %s", pe, query)
raise
except Exception:
if self._default_log_exceptions if log_exceptions is None else log_exceptions:
_logger.exception("bad query: %s", self._obj.query or query)
raise
# simple query count is always computed
self.sql_log_count += 1
# advanced stats only if sql_log is enabled
if self.sql_log:
delay = mdt.now() - now
delay = delay.seconds * 1E6 + delay.microseconds
_logger.debug("query: %s", self._obj.query)
res_from = re_from.match(query.lower())
if res_from:
self.sql_from_log.setdefault(res_from.group(1), [0, 0])
self.sql_from_log[res_from.group(1)][0] += 1
self.sql_from_log[res_from.group(1)][1] += delay
res_into = re_into.match(query.lower())
if res_into:
self.sql_into_log.setdefault(res_into.group(1), [0, 0])
self.sql_into_log[res_into.group(1)][0] += 1
self.sql_into_log[res_into.group(1)][1] += delay
return res
def split_for_in_conditions(self, ids):
"""Split a list of identifiers into one or more smaller tuples
safe for IN conditions, after uniquifying them."""
return tools.misc.split_every(self.IN_MAX, set(ids))
def print_log(self):
global sql_counter
if not self.sql_log:
return
def process(type):
sqllogs = {'from': self.sql_from_log, 'into': self.sql_into_log}
sum = 0
if sqllogs[type]:
sqllogitems = sqllogs[type].items()
sqllogitems.sort(key=lambda k: k[1][1])
_logger.debug("SQL LOG %s:", type)
sqllogitems.sort(lambda x, y: cmp(x[1][0], y[1][0]))
for r in sqllogitems:
delay = timedelta(microseconds=r[1][1])
_logger.debug("table: %s: %s/%s", r[0], delay, r[1][0])
sum += r[1][1]
sqllogs[type].clear()
sum = timedelta(microseconds=sum)
_logger.debug("SUM %s:%s/%d [%d]", type, sum, self.sql_log_count, sql_counter)
sqllogs[type].clear()
process('from')
process('into')
self.sql_log_count = 0
self.sql_log = False
@check
def close(self):
return self._close(False)
def _close(self, leak=False):
global sql_counter
if not self._obj:
return
del self.cache
if self.sql_log:
self.__closer = frame_codeinfo(currentframe(), 3)
# simple query count is always computed
sql_counter += self.sql_log_count
# advanced stats only if sql_log is enabled
self.print_log()
self._obj.close()
# This force the cursor to be freed, and thus, available again. It is
# important because otherwise we can overload the server very easily
# because of a cursor shortage (because cursors are not garbage
# collected as fast as they should). The problem is probably due in
# part because browse records keep a reference to the cursor.
del self._obj
self._closed = True
# Clean the underlying connection.
self._cnx.rollback()
if leak:
self._cnx.leaked = True
else:
chosen_template = tools.config['db_template']
templates_list = tuple(set(['template0', 'template1', 'postgres', chosen_template]))
keep_in_pool = self.dbname not in templates_list
self.__pool.give_back(self._cnx, keep_in_pool=keep_in_pool)
@check
def autocommit(self, on):
if on:
isolation_level = ISOLATION_LEVEL_AUTOCOMMIT
else:
# If a serializable cursor was requested, we
# use the appropriate PotsgreSQL isolation level
# that maps to snaphsot isolation.
# For all supported PostgreSQL versions (8.3-9.x),
# this is currently the ISOLATION_REPEATABLE_READ.
# See also the docstring of this class.
# NOTE: up to psycopg 2.4.2, repeatable read
# is remapped to serializable before being
# sent to the database, so it is in fact
# unavailable for use with pg 9.1.
isolation_level = \
ISOLATION_LEVEL_REPEATABLE_READ \
if self._serialized \
else ISOLATION_LEVEL_READ_COMMITTED
self._cnx.set_isolation_level(isolation_level)
@check
def commit(self):
""" Perform an SQL `COMMIT`
"""
return self._cnx.commit()
@check
def rollback(self):
""" Perform an SQL `ROLLBACK`
"""
return self._cnx.rollback()
def __enter__(self):
""" Using the cursor as a contextmanager automatically commits and
closes it::
with cr:
cr.execute(...)
# cr is committed if no failure occurred
# cr is closed in any case
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
self.commit()
self.close()
@contextmanager
@check
def savepoint(self):
"""context manager entering in a new savepoint"""
name = uuid.uuid1().hex
self.execute('SAVEPOINT "%s"' % name)
try:
yield
self.execute('RELEASE SAVEPOINT "%s"' % name)
except:
self.execute('ROLLBACK TO SAVEPOINT "%s"' % name)
raise
@check
def __getattr__(self, name):
return getattr(self._obj, name)
class TestCursor(Cursor):
""" A cursor to be used for tests. It keeps the transaction open across
several requests, and simulates committing, rolling back, and closing.
"""
def __init__(self, *args, **kwargs):
super(TestCursor, self).__init__(*args, **kwargs)
# in order to simulate commit and rollback, the cursor maintains a
# savepoint at its last commit
self.execute("SAVEPOINT test_cursor")
# we use a lock to serialize concurrent requests
self._lock = threading.RLock()
def acquire(self):
self._lock.acquire()
def release(self):
self._lock.release()
def force_close(self):
super(TestCursor, self).close()
def close(self):
if not self._closed:
self.rollback() # for stuff that has not been committed
self.release()
def autocommit(self, on):
_logger.debug("TestCursor.autocommit(%r) does nothing", on)
def commit(self):
self.execute("RELEASE SAVEPOINT test_cursor")
self.execute("SAVEPOINT test_cursor")
def rollback(self):
self.execute("ROLLBACK TO SAVEPOINT test_cursor")
self.execute("SAVEPOINT test_cursor")
class PsycoConnection(psycopg2.extensions.connection):
pass
class ConnectionPool(object):
""" The pool of connections to database(s)
Keep a set of connections to pg databases open, and reuse them
to open cursors for all transactions.
The connections are *not* automatically closed. Only a close_db()
can trigger that.
"""
def locked(fun):
@wraps(fun)
def _locked(self, *args, **kwargs):
self._lock.acquire()
try:
return fun(self, *args, **kwargs)
finally:
self._lock.release()
return _locked
def __init__(self, maxconn=64):
self._connections = []
self._maxconn = max(maxconn, 1)
self._lock = threading.Lock()
def __repr__(self):
used = len([1 for c, u in self._connections[:] if u])
count = len(self._connections)
return "ConnectionPool(used=%d/count=%d/max=%d)" % (used, count, self._maxconn)
def _debug(self, msg, *args):
_logger.debug(('%r ' + msg), self, *args)
@locked
def borrow(self, dsn):
# free dead and leaked connections
for i, (cnx, _) in tools.reverse_enumerate(self._connections):
if cnx.closed:
self._connections.pop(i)
self._debug('Removing closed connection at index %d: %r', i, cnx.dsn)
continue
if getattr(cnx, 'leaked', False):
delattr(cnx, 'leaked')
self._connections.pop(i)
self._connections.append((cnx, False))
_logger.warning('%r: Free leaked connection to %r', self, cnx.dsn)
for i, (cnx, used) in enumerate(self._connections):
if not used and cnx._original_dsn == dsn:
try:
cnx.reset()
except psycopg2.OperationalError:
self._debug('Cannot reset connection at index %d: %r', i, cnx.dsn)
# psycopg2 2.4.4 and earlier do not allow closing a closed connection
if not cnx.closed:
cnx.close()
continue
self._connections.pop(i)
self._connections.append((cnx, True))
self._debug('Borrow existing connection to %r at index %d', cnx.dsn, i)
return cnx
if len(self._connections) >= self._maxconn:
# try to remove the oldest connection not used
for i, (cnx, used) in enumerate(self._connections):
if not used:
self._connections.pop(i)
if not cnx.closed:
cnx.close()
self._debug('Removing old connection at index %d: %r', i, cnx.dsn)
break
else:
# note: this code is called only if the for loop has completed (no break)
raise PoolError('The Connection Pool Is Full')
try:
result = psycopg2.connect(dsn=dsn, connection_factory=PsycoConnection)
except psycopg2.Error:
_logger.exception('Connection to the database failed')
raise
result._original_dsn = dsn
self._connections.append((result, True))
self._debug('Create new connection')
return result
@locked
def give_back(self, connection, keep_in_pool=True):
self._debug('Give back connection to %r', connection.dsn)
for i, (cnx, used) in enumerate(self._connections):
if cnx is connection:
self._connections.pop(i)
if keep_in_pool:
self._connections.append((cnx, False))
self._debug('Put connection to %r in pool', cnx.dsn)
else:
self._debug('Forgot connection to %r', cnx.dsn)
cnx.close()
break
else:
raise PoolError('This connection does not below to the pool')
@locked
def close_all(self, dsn=None):
count = 0
last = None
for i, (cnx, used) in tools.reverse_enumerate(self._connections):
if dsn is None or cnx._original_dsn == dsn:
cnx.close()
last = self._connections.pop(i)[0]
count += 1
_logger.info('%r: Closed %d connections %s', self, count,
(dsn and last and 'to %r' % last.dsn) or '')
class Connection(object):
""" A lightweight instance of a connection to postgres
"""
def __init__(self, pool, dbname, dsn):
self.dbname = dbname
self.dsn = dsn
self.__pool = pool
def cursor(self, serialized=True):
cursor_type = serialized and 'serialized ' or ''
_logger.debug('create %scursor to %r', cursor_type, self.dsn)
return Cursor(self.__pool, self.dbname, self.dsn, serialized=serialized)
def test_cursor(self, serialized=True):
cursor_type = serialized and 'serialized ' or ''
_logger.debug('create test %scursor to %r', cursor_type, self.dsn)
return TestCursor(self.__pool, self.dbname, self.dsn, serialized=serialized)
# serialized_cursor is deprecated - cursors are serialized by default
serialized_cursor = cursor
def __nonzero__(self):
"""Check if connection is possible"""
try:
_logger.warning("__nonzero__() is deprecated. (It is too expensive to test a connection.)")
cr = self.cursor()
cr.close()
return True
except Exception:
return False
def dsn(db_or_uri):
"""parse the given `db_or_uri` and return a 2-tuple (dbname, uri)"""
if db_or_uri.startswith(('postgresql://', 'postgres://')):
# extract db from uri
us = urlparse.urlsplit(db_or_uri)
if len(us.path) > 1:
db_name = us.path[1:]
elif us.username:
db_name = us.username
else:
db_name = us.hostname
return db_name, db_or_uri
_dsn = ''
for p in ('host', 'port', 'user', 'password'):
cfg = tools.config['db_' + p]
if cfg:
_dsn += '%s=%s ' % (p, cfg)
return db_or_uri, '%sdbname=%s' % (_dsn, db_or_uri)
_Pool = None
def db_connect(to, allow_uri=False):
global _Pool
if _Pool is None:
_Pool = ConnectionPool(int(tools.config['db_maxconn']))
db, uri = dsn(to)
if not allow_uri and db != to:
raise ValueError('URI connections not allowed')
return Connection(_Pool, db, uri)
def close_db(db_name):
""" You might want to call openerp.modules.registry.RegistryManager.delete(db_name) along this function."""
global _Pool
if _Pool:
_Pool.close_all(dsn(db_name)[1])
def close_all():
global _Pool
if _Pool:
_Pool.close_all()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| 8,960,451,368,528,406,000 | 315,663,243,339,181,400 | 36.359055 | 123 | 0.592632 | false |
haiwen/pyes
|
tests/test_rivers.py
|
5
|
2907
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
from pyes.tests import ESTestCase
from pyes.rivers import CouchDBRiver, RabbitMQRiver, TwitterRiver
class RiversTestCase(ESTestCase):
def setUp(self):
super(RiversTestCase, self).setUp()
def testCreateCouchDBRiver(self):
"""
Testing deleting a river
"""
test_river = CouchDBRiver(index_name='text_index', index_type='test_type')
result = self.conn.create_river(test_river, river_name='test_index')
self.assertResultContains(result, {'ok': True})
def testDeleteCouchDBRiver(self):
"""
Testing deleting a river
"""
test_river = CouchDBRiver(index_name='text_index', index_type='test_type')
result = self.conn.delete_river(test_river, river_name='test_index')
self.assertResultContains(result, {'ok': True})
def testCreateRabbitMQRiver(self):
"""
Testing deleting a river
"""
test_river = RabbitMQRiver(index_name='text_index', index_type='test_type')
result = self.conn.create_river(test_river, river_name='test_index')
self.assertResultContains(result, {'ok': True})
def testDeleteRabbitMQRiver(self):
"""
Delete RabbitMQ river
"""
test_river = RabbitMQRiver(index_name='text_index', index_type='test_type')
result = self.conn.create_river(test_river, river_name='test_index')
result = self.conn.delete_river(test_river, river_name='test_index')
self.assertResultContains(result, {'ok': True})
def testCreateTwitterRiver(self):
"""
Create twitter river
"""
test_river = TwitterRiver('test', 'test', index_name='text_index', index_type='status')
result = self.conn.create_river(test_river, river_name='test_index')
self.assertResultContains(result, {'ok': True})
def testDeleteTwitterRiver(self):
"""
Delete Twitter river
"""
test_river = TwitterRiver('test', 'test', index_name='text_index', index_type='status')
result = self.conn.create_river(test_river, river_name='test_index')
result = self.conn.delete_river(test_river, river_name='test_index')
self.assertResultContains(result, {'ok': True})
def testCreateTwitterRiverOAuth(self):
test_river = TwitterRiver('test', 'test', index_name='text_index', index_type='test_type',
consumer_key="aaa",
consumer_secret="aaa",
access_token="aaa",
access_token_secret="aaa",
)
result = self.conn.create_river(test_river, river_name='test_index')
self.assertResultContains(result, {'ok': True})
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
| -3,525,943,027,657,839,600 | 7,371,952,396,761,766,000 | 38.283784 | 98 | 0.604747 | false |
htygithub/bokeh
|
bokeh/sampledata/gapminder.py
|
41
|
2655
|
from __future__ import absolute_import
import pandas as pd
from os.path import join
import sys
from . import _data_dir
'''
This module provides a pandas DataFrame instance of four
of the datasets from gapminder.org.
These are read in from csvs that have been downloaded from Bokeh's
sample data on S3. But the original code that generated the csvs from the
raw gapminder data is available at the bottom of this file.
'''
data_dir = _data_dir()
datasets = [
'fertility',
'life_expectancy',
'population',
'regions',
]
for dataset in datasets:
filename = join(data_dir, 'gapminder_%s.csv' % dataset)
try:
setattr(
sys.modules[__name__],
dataset,
pd.read_csv(filename, index_col='Country')
)
except (IOError, OSError):
raise RuntimeError('Could not load gapminder data file "%s". Please execute bokeh.sampledata.download()' % filename)
__all__ = datasets
# ====================================================
# Original data is from Gapminder - www.gapminder.org.
# The google docs links are maintained by gapminder
# The following script was used to get the data from gapminder
# and process it into the csvs stored in bokeh's sampledata.
"""
population_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0XOoBL_n5tAQ&output=xls"
fertility_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0TAlJeCEzcGQ&output=xls"
life_expectancy_url = "http://spreadsheets.google.com/pub?key=tiAiXcrneZrUnnJ9dBU-PAw&output=xls"
regions_url = "https://docs.google.com/spreadsheets/d/1OxmGUNWeADbPJkQxVPupSOK5MbAECdqThnvyPrwG5Os/pub?gid=1&output=xls"
def _get_data(url):
# Get the data from the url and return only 1962 - 2013
df = pd.read_excel(url, index_col=0)
df = df.unstack().unstack()
df = df[(df.index >= 1964) & (df.index <= 2013)]
df = df.unstack().unstack()
return df
fertility_df = _get_data(fertility_url)
life_expectancy_df = _get_data(life_expectancy_url)
population_df = _get_data(population_url)
regions_df = pd.read_excel(regions_url, index_col=0)
# have common countries across all data
fertility_df = fertility_df.drop(fertility_df.index.difference(life_expectancy_df.index))
population_df = population_df.drop(population_df.index.difference(life_expectancy_df.index))
regions_df = regions_df.drop(regions_df.index.difference(life_expectancy_df.index))
fertility_df.to_csv('gapminder_fertility.csv')
population_df.to_csv('gapminder_population.csv')
life_expectancy_df.to_csv('gapminder_life_expectancy.csv')
regions_df.to_csv('gapminder_regions.csv')
"""
# ======================================================
|
bsd-3-clause
| -713,295,233,231,854,100 | -1,605,256,454,928,422,700 | 33.038462 | 124 | 0.693409 | false |
chirilo/remo
|
vendor-local/lib/python/unidecode/x0c2.py
|
253
|
4710
|
data = (
'syon', # 0x00
'syonj', # 0x01
'syonh', # 0x02
'syod', # 0x03
'syol', # 0x04
'syolg', # 0x05
'syolm', # 0x06
'syolb', # 0x07
'syols', # 0x08
'syolt', # 0x09
'syolp', # 0x0a
'syolh', # 0x0b
'syom', # 0x0c
'syob', # 0x0d
'syobs', # 0x0e
'syos', # 0x0f
'syoss', # 0x10
'syong', # 0x11
'syoj', # 0x12
'syoc', # 0x13
'syok', # 0x14
'syot', # 0x15
'syop', # 0x16
'syoh', # 0x17
'su', # 0x18
'sug', # 0x19
'sugg', # 0x1a
'sugs', # 0x1b
'sun', # 0x1c
'sunj', # 0x1d
'sunh', # 0x1e
'sud', # 0x1f
'sul', # 0x20
'sulg', # 0x21
'sulm', # 0x22
'sulb', # 0x23
'suls', # 0x24
'sult', # 0x25
'sulp', # 0x26
'sulh', # 0x27
'sum', # 0x28
'sub', # 0x29
'subs', # 0x2a
'sus', # 0x2b
'suss', # 0x2c
'sung', # 0x2d
'suj', # 0x2e
'suc', # 0x2f
'suk', # 0x30
'sut', # 0x31
'sup', # 0x32
'suh', # 0x33
'sweo', # 0x34
'sweog', # 0x35
'sweogg', # 0x36
'sweogs', # 0x37
'sweon', # 0x38
'sweonj', # 0x39
'sweonh', # 0x3a
'sweod', # 0x3b
'sweol', # 0x3c
'sweolg', # 0x3d
'sweolm', # 0x3e
'sweolb', # 0x3f
'sweols', # 0x40
'sweolt', # 0x41
'sweolp', # 0x42
'sweolh', # 0x43
'sweom', # 0x44
'sweob', # 0x45
'sweobs', # 0x46
'sweos', # 0x47
'sweoss', # 0x48
'sweong', # 0x49
'sweoj', # 0x4a
'sweoc', # 0x4b
'sweok', # 0x4c
'sweot', # 0x4d
'sweop', # 0x4e
'sweoh', # 0x4f
'swe', # 0x50
'sweg', # 0x51
'swegg', # 0x52
'swegs', # 0x53
'swen', # 0x54
'swenj', # 0x55
'swenh', # 0x56
'swed', # 0x57
'swel', # 0x58
'swelg', # 0x59
'swelm', # 0x5a
'swelb', # 0x5b
'swels', # 0x5c
'swelt', # 0x5d
'swelp', # 0x5e
'swelh', # 0x5f
'swem', # 0x60
'sweb', # 0x61
'swebs', # 0x62
'swes', # 0x63
'swess', # 0x64
'sweng', # 0x65
'swej', # 0x66
'swec', # 0x67
'swek', # 0x68
'swet', # 0x69
'swep', # 0x6a
'sweh', # 0x6b
'swi', # 0x6c
'swig', # 0x6d
'swigg', # 0x6e
'swigs', # 0x6f
'swin', # 0x70
'swinj', # 0x71
'swinh', # 0x72
'swid', # 0x73
'swil', # 0x74
'swilg', # 0x75
'swilm', # 0x76
'swilb', # 0x77
'swils', # 0x78
'swilt', # 0x79
'swilp', # 0x7a
'swilh', # 0x7b
'swim', # 0x7c
'swib', # 0x7d
'swibs', # 0x7e
'swis', # 0x7f
'swiss', # 0x80
'swing', # 0x81
'swij', # 0x82
'swic', # 0x83
'swik', # 0x84
'swit', # 0x85
'swip', # 0x86
'swih', # 0x87
'syu', # 0x88
'syug', # 0x89
'syugg', # 0x8a
'syugs', # 0x8b
'syun', # 0x8c
'syunj', # 0x8d
'syunh', # 0x8e
'syud', # 0x8f
'syul', # 0x90
'syulg', # 0x91
'syulm', # 0x92
'syulb', # 0x93
'syuls', # 0x94
'syult', # 0x95
'syulp', # 0x96
'syulh', # 0x97
'syum', # 0x98
'syub', # 0x99
'syubs', # 0x9a
'syus', # 0x9b
'syuss', # 0x9c
'syung', # 0x9d
'syuj', # 0x9e
'syuc', # 0x9f
'syuk', # 0xa0
'syut', # 0xa1
'syup', # 0xa2
'syuh', # 0xa3
'seu', # 0xa4
'seug', # 0xa5
'seugg', # 0xa6
'seugs', # 0xa7
'seun', # 0xa8
'seunj', # 0xa9
'seunh', # 0xaa
'seud', # 0xab
'seul', # 0xac
'seulg', # 0xad
'seulm', # 0xae
'seulb', # 0xaf
'seuls', # 0xb0
'seult', # 0xb1
'seulp', # 0xb2
'seulh', # 0xb3
'seum', # 0xb4
'seub', # 0xb5
'seubs', # 0xb6
'seus', # 0xb7
'seuss', # 0xb8
'seung', # 0xb9
'seuj', # 0xba
'seuc', # 0xbb
'seuk', # 0xbc
'seut', # 0xbd
'seup', # 0xbe
'seuh', # 0xbf
'syi', # 0xc0
'syig', # 0xc1
'syigg', # 0xc2
'syigs', # 0xc3
'syin', # 0xc4
'syinj', # 0xc5
'syinh', # 0xc6
'syid', # 0xc7
'syil', # 0xc8
'syilg', # 0xc9
'syilm', # 0xca
'syilb', # 0xcb
'syils', # 0xcc
'syilt', # 0xcd
'syilp', # 0xce
'syilh', # 0xcf
'syim', # 0xd0
'syib', # 0xd1
'syibs', # 0xd2
'syis', # 0xd3
'syiss', # 0xd4
'sying', # 0xd5
'syij', # 0xd6
'syic', # 0xd7
'syik', # 0xd8
'syit', # 0xd9
'syip', # 0xda
'syih', # 0xdb
'si', # 0xdc
'sig', # 0xdd
'sigg', # 0xde
'sigs', # 0xdf
'sin', # 0xe0
'sinj', # 0xe1
'sinh', # 0xe2
'sid', # 0xe3
'sil', # 0xe4
'silg', # 0xe5
'silm', # 0xe6
'silb', # 0xe7
'sils', # 0xe8
'silt', # 0xe9
'silp', # 0xea
'silh', # 0xeb
'sim', # 0xec
'sib', # 0xed
'sibs', # 0xee
'sis', # 0xef
'siss', # 0xf0
'sing', # 0xf1
'sij', # 0xf2
'sic', # 0xf3
'sik', # 0xf4
'sit', # 0xf5
'sip', # 0xf6
'sih', # 0xf7
'ssa', # 0xf8
'ssag', # 0xf9
'ssagg', # 0xfa
'ssags', # 0xfb
'ssan', # 0xfc
'ssanj', # 0xfd
'ssanh', # 0xfe
'ssad', # 0xff
)
|
bsd-3-clause
| -1,378,079,770,810,230,800 | 284,670,654,189,439,940 | 17.255814 | 19 | 0.454989 | false |
crafty78/ansible
|
lib/ansible/modules/network/ios/ios_facts.py
|
28
|
13900
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = """
---
module: ios_facts
version_added: "2.2"
author: "Peter Sprygada (@privateip)"
short_description: Collect facts from remote devices running IOS
description:
- Collects a base set of device facts from a remote device that
is running IOS. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
extends_documentation_fragment: ios
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: '!config'
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: cisco
password: cisco
transport: cli
# Collect all facts from the device
- ios_facts:
gather_subset: all
provider: "{{ cli }}"
# Collect only the config and default facts
- ios_facts:
gather_subset:
- config
provider: "{{ cli }}"
# Do not collect hardware facts
- ios_facts:
gather_subset:
- "!hardware"
provider: "{{ cli }}"
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
# default
ansible_net_model:
description: The model name returned from the device
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the remote device
returned: always
type: str
ansible_net_version:
description: The operating system version running on the remote device
returned: always
type: str
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: string
ansible_net_image:
description: The image file the device is running
returned: always
type: string
# hardware
ansible_net_filesystems:
description: All file system names available on the device
returned: when hardware is configured
type: list
ansible_net_memfree_mb:
description: The available free memory on the remote device in Mb
returned: when hardware is configured
type: int
ansible_net_memtotal_mb:
description: The total memory on the remote device in Mb
returned: when hardware is configured
type: int
# config
ansible_net_config:
description: The current active config from the device
returned: when config is configured
type: str
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All IPv6 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the system
returned: when interfaces is configured
type: dict
ansible_net_neighbors:
description: The list of LLDP neighbors from the remote device
returned: when interfaces is configured
type: dict
"""
import re
import itertools
import ansible.module_utils.ios
from ansible.module_utils.network import NetworkModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import zip
class FactsBase(object):
def __init__(self, module):
self.module = module
self.facts = dict()
self.failed_commands = list()
def run(self, cmd):
try:
return self.module.cli(cmd)[0]
except:
self.failed_commands.append(cmd)
class Default(FactsBase):
def populate(self):
data = self.run('show version')
if data:
self.facts['version'] = self.parse_version(data)
self.facts['serialnum'] = self.parse_serialnum(data)
self.facts['model'] = self.parse_model(data)
self.facts['image'] = self.parse_image(data)
self.facts['hostname'] = self.parse_hostname(data)
def parse_version(self, data):
match = re.search(r'Version (\S+),', data)
if match:
return match.group(1)
def parse_hostname(self, data):
match = re.search(r'^(.+) uptime', data, re.M)
if match:
return match.group(1)
def parse_model(self, data):
match = re.search(r'^Cisco (.+) \(revision', data, re.M)
if match:
return match.group(1)
def parse_image(self, data):
match = re.search(r'image file is "(.+)"', data)
if match:
return match.group(1)
def parse_serialnum(self, data):
match = re.search(r'board ID (\S+)', data)
if match:
return match.group(1)
class Hardware(FactsBase):
def populate(self):
data = self.run('dir | include Directory')
if data:
self.facts['filesystems'] = self.parse_filesystems(data)
data = self.run('show memory statistics | include Processor')
if data:
match = re.findall(r'\s(\d+)\s', data)
if match:
self.facts['memtotal_mb'] = int(match[0]) / 1024
self.facts['memfree_mb'] = int(match[1]) / 1024
def parse_filesystems(self, data):
return re.findall(r'^Directory of (\S+)/', data, re.M)
class Config(FactsBase):
def populate(self):
data = self.run('show running-config')
if data:
self.facts['config'] = data
class Interfaces(FactsBase):
def populate(self):
self.facts['all_ipv4_addresses'] = list()
self.facts['all_ipv6_addresses'] = list()
data = self.run('show interfaces')
if data:
interfaces = self.parse_interfaces(data)
self.facts['interfaces'] = self.populate_interfaces(interfaces)
data = self.run('show ipv6 interface')
if data:
data = self.parse_interfaces(data)
self.populate_ipv6_interfaces(data)
data = self.run('show lldp')
if 'LLDP is not enabled' not in data:
neighbors = self.run('show lldp neighbors detail')
if neighbors:
self.facts['neighbors'] = self.parse_neighbors(neighbors)
def populate_interfaces(self, interfaces):
facts = dict()
for key, value in iteritems(interfaces):
intf = dict()
intf['description'] = self.parse_description(value)
intf['macaddress'] = self.parse_macaddress(value)
ipv4 = self.parse_ipv4(value)
intf['ipv4'] = self.parse_ipv4(value)
if ipv4:
self.add_ip_address(ipv4['address'], 'ipv4')
intf['mtu'] = self.parse_mtu(value)
intf['bandwidth'] = self.parse_bandwidth(value)
intf['mediatype'] = self.parse_mediatype(value)
intf['duplex'] = self.parse_duplex(value)
intf['lineprotocol'] = self.parse_lineprotocol(value)
intf['operstatus'] = self.parse_operstatus(value)
intf['type'] = self.parse_type(value)
facts[key] = intf
return facts
def populate_ipv6_interfaces(self, data):
for key, value in iteritems(data):
self.facts['interfaces'][key]['ipv6'] = list()
addresses = re.findall(r'\s+(.+), subnet', value, re.M)
subnets = re.findall(r', subnet is (.+)$', value, re.M)
for addr, subnet in zip(addresses, subnets):
ipv6 = dict(address=addr.strip(), subnet=subnet.strip())
self.add_ip_address(addr.strip(), 'ipv6')
self.facts['interfaces'][key]['ipv6'].append(ipv6)
def add_ip_address(self, address, family):
if family == 'ipv4':
self.facts['all_ipv4_addresses'].append(address)
else:
self.facts['all_ipv6_addresses'].append(address)
def parse_neighbors(self, neighbors):
facts = dict()
for entry in neighbors.split('------------------------------------------------'):
if entry == '':
continue
intf = self.parse_lldp_intf(entry)
if intf not in facts:
facts[intf] = list()
fact = dict()
fact['host'] = self.parse_lldp_host(entry)
fact['port'] = self.parse_lldp_port(entry)
facts[intf].append(fact)
return facts
def parse_interfaces(self, data):
parsed = dict()
key = ''
for line in data.split('\n'):
if len(line) == 0:
continue
elif line[0] == ' ':
parsed[key] += '\n%s' % line
else:
match = re.match(r'^(\S+)', line)
if match:
key = match.group(1)
parsed[key] = line
return parsed
def parse_description(self, data):
match = re.search(r'Description: (.+)$', data, re.M)
if match:
return match.group(1)
def parse_macaddress(self, data):
match = re.search(r'address is (\S+)', data)
if match:
return match.group(1)
def parse_ipv4(self, data):
match = re.search(r'Internet address is (\S+)', data)
if match:
addr, masklen = match.group(1).split('/')
return dict(address=addr, masklen=int(masklen))
def parse_mtu(self, data):
match = re.search(r'MTU (\d+)', data)
if match:
return int(match.group(1))
def parse_bandwidth(self, data):
match = re.search(r'BW (\d+)', data)
if match:
return int(match.group(1))
def parse_duplex(self, data):
match = re.search(r'(\w+) Duplex', data, re.M)
if match:
return match.group(1)
def parse_mediatype(self, data):
match = re.search(r'media type is (.+)$', data, re.M)
if match:
return match.group(1)
def parse_type(self, data):
match = re.search(r'Hardware is (.+),', data, re.M)
if match:
return match.group(1)
def parse_lineprotocol(self, data):
match = re.search(r'line protocol is (.+)$', data, re.M)
if match:
return match.group(1)
def parse_operstatus(self, data):
match = re.search(r'^(?:.+) is (.+),', data, re.M)
if match:
return match.group(1)
def parse_lldp_intf(self, data):
match = re.search(r'^Local Intf: (.+)$', data, re.M)
if match:
return match.group(1)
def parse_lldp_host(self, data):
match = re.search(r'System Name: (.+)$', data, re.M)
if match:
return match.group(1)
def parse_lldp_port(self, data):
match = re.search(r'Port id: (.+)$', data, re.M)
if match:
return match.group(1)
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,
interfaces=Interfaces,
config=Config,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
module = NetworkModule(argument_spec=spec, supports_check_mode=True)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset')
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
failed_commands = list()
try:
for inst in instances:
inst.populate()
failed_commands.extend(inst.failed_commands)
facts.update(inst.facts)
except Exception:
exc = get_exception()
module.fail_json(msg=str(exc))
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts, failed_commands=failed_commands)
if __name__ == '__main__':
main()
|
gpl-3.0
| 414,345,731,725,042,800 | -3,683,423,377,942,166,000 | 29.151844 | 89 | 0.604317 | false |
Kazade/NeHe-Website
|
google_appengine/lib/django-1.4/tests/regressiontests/fixtures_regress/models.py
|
33
|
5387
|
from __future__ import absolute_import
from django.contrib.auth.models import User
from django.db import models
class Animal(models.Model):
name = models.CharField(max_length=150)
latin_name = models.CharField(max_length=150)
count = models.IntegerField()
weight = models.FloatField()
# use a non-default name for the default manager
specimens = models.Manager()
def __unicode__(self):
return self.name
class Plant(models.Model):
name = models.CharField(max_length=150)
class Meta:
# For testing when upper case letter in app name; regression for #4057
db_table = "Fixtures_regress_plant"
class Stuff(models.Model):
name = models.CharField(max_length=20, null=True)
owner = models.ForeignKey(User, null=True)
def __unicode__(self):
return unicode(self.name) + u' is owned by ' + unicode(self.owner)
class Absolute(models.Model):
name = models.CharField(max_length=40)
load_count = 0
def __init__(self, *args, **kwargs):
super(Absolute, self).__init__(*args, **kwargs)
Absolute.load_count += 1
class Parent(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ('id',)
class Child(Parent):
data = models.CharField(max_length=10)
# Models to regression test #7572
class Channel(models.Model):
name = models.CharField(max_length=255)
class Article(models.Model):
title = models.CharField(max_length=255)
channels = models.ManyToManyField(Channel)
class Meta:
ordering = ('id',)
# Models to regression test #11428
class Widget(models.Model):
name = models.CharField(max_length=255)
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
class WidgetProxy(Widget):
class Meta:
proxy = True
# Check for forward references in FKs and M2Ms with natural keys
class TestManager(models.Manager):
def get_by_natural_key(self, key):
return self.get(name=key)
class Store(models.Model):
objects = TestManager()
name = models.CharField(max_length=255)
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
def natural_key(self):
return (self.name,)
class Person(models.Model):
objects = TestManager()
name = models.CharField(max_length=255)
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
# Person doesn't actually have a dependency on store, but we need to define
# one to test the behavior of the dependency resolution algorithm.
def natural_key(self):
return (self.name,)
natural_key.dependencies = ['fixtures_regress.store']
class Book(models.Model):
name = models.CharField(max_length=255)
author = models.ForeignKey(Person)
stores = models.ManyToManyField(Store)
class Meta:
ordering = ('name',)
def __unicode__(self):
return u'%s by %s (available at %s)' % (
self.name,
self.author.name,
', '.join(s.name for s in self.stores.all())
)
class NKManager(models.Manager):
def get_by_natural_key(self, data):
return self.get(data=data)
class NKChild(Parent):
data = models.CharField(max_length=10, unique=True)
objects = NKManager()
def natural_key(self):
return self.data
def __unicode__(self):
return u'NKChild %s:%s' % (self.name, self.data)
class RefToNKChild(models.Model):
text = models.CharField(max_length=10)
nk_fk = models.ForeignKey(NKChild, related_name='ref_fks')
nk_m2m = models.ManyToManyField(NKChild, related_name='ref_m2ms')
def __unicode__(self):
return u'%s: Reference to %s [%s]' % (
self.text,
self.nk_fk,
', '.join(str(o) for o in self.nk_m2m.all())
)
# ome models with pathological circular dependencies
class Circle1(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle2']
class Circle2(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle1']
class Circle3(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle3']
class Circle4(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle5']
class Circle5(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle6']
class Circle6(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle4']
class ExternalDependency(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.book']
# Model for regression test of #11101
class Thingy(models.Model):
name = models.CharField(max_length=255)
|
bsd-3-clause
| 3,083,839,765,638,054,000 | -2,680,810,055,019,055,000 | 22.836283 | 79 | 0.652125 | false |
inveniosoftware/invenio-collections
|
invenio_collections/alembic/97faa437d867_create_collections_tables.py
|
3
|
2576
|
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Create collections tables."""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '97faa437d867'
down_revision = 'ce7adcbe1c6c'
branch_labels = ()
depends_on = None
def upgrade():
"""Upgrade database."""
op.create_table(
'collection',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('dbquery', sa.Text(), nullable=True),
sa.Column('rgt', sa.Integer(), nullable=False),
sa.Column('lft', sa.Integer(), nullable=False),
sa.Column('level', sa.Integer(), nullable=False),
sa.Column('parent_id', sa.Integer(), nullable=True),
sa.Column('tree_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
['parent_id'], ['collection.id'], ondelete='CASCADE'
),
sa.PrimaryKeyConstraint('id')
)
op.create_index(
'collection_level_idx', 'collection', ['level'], unique=False
)
op.create_index('collection_lft_idx', 'collection', ['lft'], unique=False)
op.create_index('collection_rgt_idx', 'collection', ['rgt'], unique=False)
op.create_index(
op.f('ix_collection_name'), 'collection', ['name'], unique=True
)
def downgrade():
"""Downgrade database."""
op.drop_index(op.f('ix_collection_name'), table_name='collection')
op.drop_index('collection_rgt_idx', table_name='collection')
op.drop_index('collection_lft_idx', table_name='collection')
op.drop_index('collection_level_idx', table_name='collection')
op.drop_table('collection')
|
gpl-2.0
| 430,517,983,844,147,100 | -257,311,494,262,747,040 | 36.333333 | 78 | 0.680901 | false |
eezee-it/project-service
|
service_desk_issue/__openerp__.py
|
8
|
1526
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012-2013 Daniel Reis
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Service Desk for Issues',
'summary': 'Use Project Issues for Service Desks and service teams',
'version': '8.0.1.1.0',
"category": "Project Management",
'description': """\
This module extends the ``service_desk`` module to also work with Issues.
Please refer to that module's description.
""",
'author': "Daniel Reis,Odoo Community Association (OCA)",
'website': '',
'license': 'AGPL-3',
'depends': [
'project_issue',
'service_desk',
],
'data': [
'service_desk_view.xml',
],
'installable': True,
'auto_install': True,
}
|
agpl-3.0
| -2,943,720,333,004,728,300 | 5,888,633,854,632,266,000 | 36.219512 | 78 | 0.591743 | false |
sobomax/virtualbox_64bit_edd
|
src/libs/xpcom18a4/python/xpt.py
|
26
|
17606
|
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is the Python XPCOM language bindings.
#
# The Initial Developer of the Original Code is
# ActiveState Tool Corp.
# Portions created by the Initial Developer are Copyright (C) 2000, 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# David Ascher <[email protected]> (original author)
# Mark Hammond <[email protected]>
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
"""
Program: xpt.py
Task: describe interfaces etc using XPCOM reflection.
Subtasks:
output (nearly) exactly the same stuff as xpt_dump, for verification
output Python source code that can be used as a template for an interface
Status: Works pretty well if you ask me :-)
Author:
David Ascher did an original version that parsed XPT files
directly. Mark Hammond changed it to use the reflection interfaces,
but kept most of the printing logic.
Revision:
0.1: March 6, 2000
0.2: April 2000 - Mark removed lots of Davids lovely parsing code in favour
of the new xpcom interfaces that provide this info.
May 2000 - Moved into Perforce - track the log there!
Early 2001 - Moved into the Mozilla CVS tree - track the log there!
Todo:
Fill out this todo list.
"""
import string, sys
import xpcom
import xpcom._xpcom
from xpcom_consts import *
class Interface:
def __init__(self, iid):
iim = xpcom._xpcom.XPTI_GetInterfaceInfoManager()
if hasattr(iid, "upper"): # Is it a stringy thing.
item = iim.GetInfoForName(iid)
else:
item = iim.GetInfoForIID(iid)
self.interface_info = item
self.namespace = "" # where does this come from?
self.methods = Methods(item)
self.constants = Constants(item)
# delegate attributes to the real interface
def __getattr__(self, attr):
return getattr(self.interface_info, attr)
def GetParent(self):
try:
raw_parent = self.interface_info.GetParent()
if raw_parent is None:
return None
return Interface(raw_parent.GetIID())
except xpcom.Exception:
# Parent interface is probably not scriptable - assume nsISupports.
if xpcom.verbose:
# The user may be confused as to why this is happening!
print "The parent interface of IID '%s' can not be located - assuming nsISupports"
return Interface(xpcom._xpcom.IID_nsISupports)
def Describe_Python(self):
method_reprs = []
methods = filter(lambda m: not m.IsNotXPCOM(), self.methods)
for m in methods:
method_reprs.append(m.Describe_Python())
method_joiner = "\n"
methods_repr = method_joiner.join(method_reprs)
return \
"""class %s:
_com_interfaces_ = xpcom.components.interfaces.%s
# If this object needs to be registered, the following 2 are also needed.
# _reg_clsid_ = "{a new clsid generated for this object}"
# _reg_contractid_ = "The.Object.Name"\n%s""" % (self.GetName(), self.GetIID().name, methods_repr)
def Describe(self):
# Make the IID look like xtp_dump - "(" instead of "{"
iid_use = "(" + str(self.GetIID())[1:-1] + ")"
s = ' - '+self.namespace+'::'+ self.GetName() + ' ' + iid_use + ':\n'
parent = self.GetParent()
if parent is not None:
s = s + ' Parent: ' + parent.namespace + '::' + parent.GetName() + '\n'
s = s + ' Flags:\n'
if self.IsScriptable(): word = 'TRUE'
else: word = 'FALSE'
s = s + ' Scriptable: ' + word + '\n'
s = s + ' Methods:\n'
methods = filter(lambda m: not m.IsNotXPCOM(), self.methods)
if len(methods):
for m in methods:
s = s + ' ' + m.Describe() + '\n'
else:
s = s + ' No Methods\n'
s = s + ' Constants:\n'
if self.constants:
for c in self.constants:
s = s + ' ' + c.Describe() + '\n'
else:
s = s + ' No Constants\n'
return s
# A class that allows caching and iterating of methods.
class Methods:
def __init__(self, interface_info):
self.interface_info = interface_info
try:
self.items = [None] * interface_info.GetMethodCount()
except xpcom.Exception:
if xpcom.verbose:
print "** GetMethodCount failed?? - assuming no methods"
self.items = []
def __len__(self):
return len(self.items)
def __getitem__(self, index):
ret = self.items[index]
if ret is None:
mi = self.interface_info.GetMethodInfo(index)
ret = self.items[index] = Method(mi, index, self.interface_info)
return ret
class Method:
def __init__(self, method_info, method_index, interface_info = None):
self.interface_info = interface_info
self.method_index = method_index
self.flags, self.name, param_descs, self.result_desc = method_info
# Build the params.
self.params = []
pi=0
for pd in param_descs:
self.params.append( Parameter(pd, pi, method_index, interface_info) )
pi = pi + 1
# Run over the params setting the "sizeof" params to hidden.
for p in self.params:
td = p.type_desc
tag = XPT_TDP_TAG(td[0])
if tag==T_ARRAY and p.IsIn():
self.params[td[1]].hidden_indicator = 2
elif tag in [T_PSTRING_SIZE_IS, T_PWSTRING_SIZE_IS] and p.IsIn():
self.params[td[1]].hidden_indicator = 1
def IsGetter(self):
return (self.flags & XPT_MD_GETTER)
def IsSetter(self):
return (self.flags & XPT_MD_SETTER)
def IsNotXPCOM(self):
return (self.flags & XPT_MD_NOTXPCOM)
def IsConstructor(self):
return (self.flags & XPT_MD_CTOR)
def IsHidden(self):
return (self.flags & XPT_MD_HIDDEN)
def Describe_Python(self):
if self.method_index < 3: # Ignore QI etc
return ""
base_name = self.name
if self.IsGetter():
name = "get_%s" % (base_name,)
elif self.IsSetter():
name = "set_%s" % (base_name,)
else:
name = base_name
param_decls = ["self"]
in_comments = []
out_descs = []
result_comment = "Result: void - None"
for p in self.params:
in_desc, in_desc_comments, out_desc, this_result_comment = p.Describe_Python()
if in_desc is not None:
param_decls.append(in_desc)
if in_desc_comments is not None:
in_comments.append(in_desc_comments)
if out_desc is not None:
out_descs.append(out_desc)
if this_result_comment is not None:
result_comment = this_result_comment
joiner = "\n # "
in_comment = out_desc = ""
if in_comments: in_comment = joiner + joiner.join(in_comments)
if out_descs: out_desc = joiner + joiner.join(out_descs)
return """ def %s( %s ):
# %s%s%s
pass""" % (name, ", ".join(param_decls), result_comment, in_comment, out_desc)
def Describe(self):
s = ''
if self.IsGetter():
G = 'G'
else:
G = ' '
if self.IsSetter():
S = 'S'
else: S = ' '
if self.IsHidden():
H = 'H'
else:
H = ' '
if self.IsNotXPCOM():
N = 'N'
else:
N = ' '
if self.IsConstructor():
C = 'C'
else:
C = ' '
def desc(a): return a.Describe()
method_desc = string.join(map(desc, self.params), ', ')
result_type = TypeDescriber(self.result_desc[0], None)
return_desc = result_type.Describe()
i = string.find(return_desc, 'retval ')
if i != -1:
return_desc = return_desc[:i] + return_desc[i+len('retval '):]
return G+S+H+N+C+' '+return_desc+' '+self.name + '('+ method_desc + ');'
class Parameter:
def __init__(self, param_desc, param_index, method_index, interface_info = None):
self.param_flags, self.type_desc = param_desc
self.hidden_indicator = 0 # Is this a special "size" type param that will be hidden from Python?
self.param_index = param_index
self.method_index= method_index
self.interface_info = interface_info
def __repr__(self):
return "<param %(param_index)d (method %(method_index)d) - flags = 0x%(param_flags)x, type = %(type_desc)s>" % self.__dict__
def IsIn(self):
return XPT_PD_IS_IN(self.param_flags)
def IsOut(self):
return XPT_PD_IS_OUT(self.param_flags)
def IsInOut(self):
return self.IsIn() and self.IsOut()
def IsRetval(self):
return XPT_PD_IS_RETVAL(self.param_flags)
def IsShared(self):
return XPT_PD_IS_SHARED(self.param_flags)
def IsDipper(self):
return XPT_PD_IS_DIPPER(self.param_flags)
def Describe_Python(self):
name = "param%d" % (self.param_index,)
if self.hidden_indicator:
# Could remove the comment - Im trying to tell the user where that param has
# gone from the signature!
return None, "%s is a hidden parameter" % (name,), None, None
t = TypeDescriber(self.type_desc[0], self)
decl = in_comment = out_comment = result_comment = None
type_desc = t.Describe()
if self.IsIn() and not self.IsDipper():
decl = name
extra=""
if self.IsOut():
extra = "Out"
in_comment = "In%s: %s: %s" % (extra, name, type_desc)
elif self.IsOut() or self.IsDipper():
if self.IsRetval():
result_comment = "Result: %s" % (type_desc,)
else:
out_comment = "Out: %s" % (type_desc,)
return decl, in_comment, out_comment, result_comment
def Describe(self):
parts = []
if self.IsInOut():
parts.append('inout')
elif self.IsIn():
parts.append('in')
elif self.IsOut():
parts.append('out')
if self.IsDipper(): parts.append("dipper")
if self.IsRetval(): parts.append('retval')
if self.IsShared(): parts.append('shared')
t = TypeDescriber(self.type_desc[0], self)
type_str = t.Describe()
parts.append(type_str)
return string.join(parts)
# A class that allows caching and iterating of constants.
class Constants:
def __init__(self, interface_info):
self.interface_info = interface_info
try:
self.items = [None] * interface_info.GetConstantCount()
except xpcom.Exception:
if xpcom.verbose:
print "** GetConstantCount failed?? - assuming no constants"
self.items = []
def __len__(self):
return len(self.items)
def __getitem__(self, index):
ret = self.items[index]
if ret is None:
ci = self.interface_info.GetConstant(index)
ret = self.items[index] = Constant(ci)
return ret
class Constant:
def __init__(self, ci):
self.name, self.type, self.value = ci
def Describe(self):
return TypeDescriber(self.type, None).Describe() + ' ' +self.name+' = '+str(self.value)+';'
__str__ = Describe
def MakeReprForInvoke(param):
tag = param.type_desc[0] & XPT_TDP_TAGMASK
if tag == T_INTERFACE:
i_info = param.interface_info
try:
iid = i_info.GetIIDForParam(param.method_index, param.param_index)
except xpcom.Exception:
# IID not available (probably not scriptable) - just use nsISupports.
iid = xpcom._xpcom.IID_nsISupports
return param.type_desc[0], 0, 0, str(iid)
elif tag == T_ARRAY:
i_info = param.interface_info
array_desc = i_info.GetTypeForParam(param.method_index, param.param_index, 1)
return param.type_desc[:-1] + array_desc[:1]
return param.type_desc
class TypeDescriber:
def __init__(self, type_flags, param):
self.type_flags = type_flags
self.tag = XPT_TDP_TAG(self.type_flags)
self.param = param
def IsPointer(self):
return XPT_TDP_IS_POINTER(self.type_flags)
def IsUniquePointer(self):
return XPT_TDP_IS_UNIQUE_POINTER(self.type_flags)
def IsReference(self):
return XPT_TDP_IS_REFERENCE(self.type_flags)
def repr_for_invoke(self):
return (self.type_flags,)
def GetName(self):
is_ptr = self.IsPointer()
data = type_info_map.get(self.tag)
if data is None:
data = ("unknown",)
if self.IsReference():
if len(data) > 2:
return data[2]
return data[0] + " &"
if self.IsPointer():
if len(data)>1:
return data[1]
return data[0] + " *"
return data[0]
def Describe(self):
if self.tag == T_ARRAY:
# NOTE - Adding a type specifier to the array is different from xpt_dump.exe
if self.param is None or self.param.interface_info is None:
type_desc = "" # Dont have explicit info about the array type :-(
else:
i_info = self.param.interface_info
type_code = i_info.GetTypeForParam(self.param.method_index, self.param.param_index, 1)
type_desc = TypeDescriber( type_code[0], None).Describe()
return self.GetName() + "[" + type_desc + "]"
elif self.tag == T_INTERFACE:
if self.param is None or self.param.interface_info is None:
return "nsISomething" # Dont have explicit info about the IID :-(
i_info = self.param.interface_info
m_index = self.param.method_index
p_index = self.param.param_index
try:
iid = i_info.GetIIDForParam(m_index, p_index)
return iid.name
except xpcom.Exception:
return "nsISomething"
return self.GetName()
# These are just for output purposes, so should be
# the same as xpt_dump uses
type_info_map = {
T_I8 : ("int8",),
T_I16 : ("int16",),
T_I32 : ("int32",),
T_I64 : ("int64",),
T_U8 : ("uint8",),
T_U16 : ("uint16",),
T_U32 : ("uint32",),
T_U64 : ("uint64",),
T_FLOAT : ("float",),
T_DOUBLE : ("double",),
T_BOOL : ("boolean",),
T_CHAR : ("char",),
T_WCHAR : ("wchar_t", "wstring"),
T_VOID : ("void",),
T_IID : ("reserved", "nsIID *", "nsIID &"),
T_DOMSTRING : ("DOMString",),
T_CHAR_STR : ("reserved", "string"),
T_WCHAR_STR : ("reserved", "wstring"),
T_INTERFACE : ("reserved", "Interface"),
T_INTERFACE_IS : ("reserved", "InterfaceIs *"),
T_ARRAY : ("reserved", "Array"),
T_PSTRING_SIZE_IS : ("reserved", "string_s"),
T_PWSTRING_SIZE_IS : ("reserved", "wstring_s"),
}
def dump_interface(iid, mode):
interface = Interface(iid)
describer_name = "Describe"
if mode == "xptinfo": mode = None
if mode is not None:
describer_name = describer_name + "_" + mode.capitalize()
describer = getattr(interface, describer_name)
print describer()
if __name__=='__main__':
if len(sys.argv) == 1:
print "Usage: xpt.py [-xptinfo] interface_name, ..."
print " -info: Dump in a style similar to the xptdump tool"
print "Dumping nsISupports and nsIInterfaceInfo"
sys.argv.append('nsIInterfaceInfo')
sys.argv.append('-xptinfo')
sys.argv.append('nsISupports')
sys.argv.append('nsIInterfaceInfo')
mode = "Python"
for i in sys.argv[1:]:
if i[0] == "-":
mode = i[1:]
else:
dump_interface(i, mode)
|
gpl-2.0
| 2,814,357,227,243,330,600 | 649,219,478,632,684,500 | 36.380042 | 132 | 0.569749 | false |
yuruofeifei/mxnet
|
python/mxnet/gluon/model_zoo/vision/densenet.py
|
10
|
7848
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ
"""DenseNet, implemented in Gluon."""
__all__ = ['DenseNet', 'densenet121', 'densenet161', 'densenet169', 'densenet201']
from ....context import cpu
from ...block import HybridBlock
from ... import nn
from ..custom_layers import HybridConcurrent, Identity
# Helpers
def _make_dense_block(num_layers, bn_size, growth_rate, dropout, stage_index):
out = nn.HybridSequential(prefix='stage%d_'%stage_index)
with out.name_scope():
for _ in range(num_layers):
out.add(_make_dense_layer(growth_rate, bn_size, dropout))
return out
def _make_dense_layer(growth_rate, bn_size, dropout):
new_features = nn.HybridSequential(prefix='')
new_features.add(nn.BatchNorm())
new_features.add(nn.Activation('relu'))
new_features.add(nn.Conv2D(bn_size * growth_rate, kernel_size=1, use_bias=False))
new_features.add(nn.BatchNorm())
new_features.add(nn.Activation('relu'))
new_features.add(nn.Conv2D(growth_rate, kernel_size=3, padding=1, use_bias=False))
if dropout:
new_features.add(nn.Dropout(dropout))
out = HybridConcurrent(concat_dim=1, prefix='')
out.add(Identity())
out.add(new_features)
return out
def _make_transition(num_output_features):
out = nn.HybridSequential(prefix='')
out.add(nn.BatchNorm())
out.add(nn.Activation('relu'))
out.add(nn.Conv2D(num_output_features, kernel_size=1, use_bias=False))
out.add(nn.AvgPool2D(pool_size=2, strides=2))
return out
# Net
class DenseNet(HybridBlock):
r"""Densenet-BC model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
num_init_features : int
Number of filters to learn in the first convolution layer.
growth_rate : int
Number of filters to add each layer (`k` in the paper).
block_config : list of int
List of integers for numbers of layers in each pooling block.
bn_size : int, default 4
Multiplicative factor for number of bottle neck layers.
(i.e. bn_size * k features in the bottleneck layer)
dropout : float, default 0
Rate of dropout after each dense layer.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self, num_init_features, growth_rate, block_config,
bn_size=4, dropout=0, classes=1000, **kwargs):
super(DenseNet, self).__init__(**kwargs)
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
self.features.add(nn.Conv2D(num_init_features, kernel_size=7,
strides=2, padding=3, use_bias=False))
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1))
# Add dense blocks
num_features = num_init_features
for i, num_layers in enumerate(block_config):
self.features.add(_make_dense_block(num_layers, bn_size, growth_rate, dropout, i+1))
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
self.features.add(_make_transition(num_features // 2))
num_features = num_features // 2
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.AvgPool2D(pool_size=7))
self.features.add(nn.Flatten())
self.output = nn.Dense(classes)
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
# Specification
densenet_spec = {121: (64, 32, [6, 12, 24, 16]),
161: (96, 48, [6, 12, 36, 24]),
169: (64, 32, [6, 12, 32, 32]),
201: (64, 32, [6, 12, 48, 32])}
# Constructor
def get_densenet(num_layers, pretrained=False, ctx=cpu(), root='~/.mxnet/models', **kwargs):
r"""Densenet-BC model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
num_layers : int
Number of layers for the variant of densenet. Options are 121, 161, 169, 201.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
num_init_features, growth_rate, block_config = densenet_spec[num_layers]
net = DenseNet(num_init_features, growth_rate, block_config, **kwargs)
if pretrained:
from ..model_store import get_model_file
net.load_params(get_model_file('densenet%d'%(num_layers), root=root), ctx=ctx)
return net
def densenet121(**kwargs):
r"""Densenet-BC 121-layer model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(121, **kwargs)
def densenet161(**kwargs):
r"""Densenet-BC 161-layer model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(161, **kwargs)
def densenet169(**kwargs):
r"""Densenet-BC 169-layer model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(169, **kwargs)
def densenet201(**kwargs):
r"""Densenet-BC 201-layer model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(201, **kwargs)
|
apache-2.0
| -7,705,459,179,846,439,000 | -4,606,947,119,919,871,500 | 37.851485 | 100 | 0.650102 | false |
maestrano/openerp
|
openerp/addons/account_payment/wizard/account_payment_populate_statement.py
|
40
|
6057
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
class account_payment_populate_statement(osv.osv_memory):
_name = "account.payment.populate.statement"
_description = "Account Payment Populate Statement"
_columns = {
'lines': fields.many2many('payment.line', 'payment_line_rel_', 'payment_id', 'line_id', 'Payment Lines')
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
line_obj = self.pool.get('payment.line')
res = super(account_payment_populate_statement, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False)
line_ids = line_obj.search(cr, uid, [
('move_line_id.reconcile_id', '=', False),
('bank_statement_line_id', '=', False),
('move_line_id.state','=','valid')])
line_ids.extend(line_obj.search(cr, uid, [
('move_line_id.reconcile_id', '=', False),
('order_id.mode', '=', False),
('move_line_id.state','=','valid')]))
domain = '[("id", "in", '+ str(line_ids)+')]'
doc = etree.XML(res['arch'])
nodes = doc.xpath("//field[@name='lines']")
for node in nodes:
node.set('domain', domain)
res['arch'] = etree.tostring(doc)
return res
def populate_statement(self, cr, uid, ids, context=None):
line_obj = self.pool.get('payment.line')
statement_obj = self.pool.get('account.bank.statement')
statement_line_obj = self.pool.get('account.bank.statement.line')
currency_obj = self.pool.get('res.currency')
voucher_obj = self.pool.get('account.voucher')
voucher_line_obj = self.pool.get('account.voucher.line')
move_line_obj = self.pool.get('account.move.line')
if context is None:
context = {}
data = self.read(cr, uid, ids, [], context=context)[0]
line_ids = data['lines']
if not line_ids:
return {'type': 'ir.actions.act_window_close'}
statement = statement_obj.browse(cr, uid, context['active_id'], context=context)
for line in line_obj.browse(cr, uid, line_ids, context=context):
ctx = context.copy()
ctx['date'] = line.ml_maturity_date # was value_date earlier,but this field exists no more now
amount = currency_obj.compute(cr, uid, line.currency.id,
statement.currency.id, line.amount_currency, context=ctx)
if not line.move_line_id.id:
continue
context.update({'move_line_ids': [line.move_line_id.id]})
result = voucher_obj.onchange_partner_id(cr, uid, [], partner_id=line.partner_id.id, journal_id=statement.journal_id.id, amount=abs(amount), currency_id= statement.currency.id, ttype='payment', date=line.ml_maturity_date, context=context)
if line.move_line_id:
voucher_res = {
'type': 'payment',
'name': line.name,
'partner_id': line.partner_id.id,
'journal_id': statement.journal_id.id,
'account_id': result['value'].get('account_id', statement.journal_id.default_credit_account_id.id),
'company_id': statement.company_id.id,
'currency_id': statement.currency.id,
'date': line.date or time.strftime('%Y-%m-%d'),
'amount': abs(amount),
'period_id': statement.period_id.id,
}
voucher_id = voucher_obj.create(cr, uid, voucher_res, context=context)
voucher_line_dict = {}
for line_dict in result['value']['line_cr_ids'] + result['value']['line_dr_ids']:
move_line = move_line_obj.browse(cr, uid, line_dict['move_line_id'], context)
if line.move_line_id.move_id.id == move_line.move_id.id:
voucher_line_dict = line_dict
if voucher_line_dict:
voucher_line_dict.update({'voucher_id': voucher_id})
voucher_line_obj.create(cr, uid, voucher_line_dict, context=context)
st_line_id = statement_line_obj.create(cr, uid, {
'name': line.order_id.reference or '?',
'amount': - amount,
'type': 'supplier',
'partner_id': line.partner_id.id,
'account_id': line.move_line_id.account_id.id,
'statement_id': statement.id,
'ref': line.communication,
'voucher_id': voucher_id,
}, context=context)
line_obj.write(cr, uid, [line.id], {'bank_statement_line_id': st_line_id})
return {'type': 'ir.actions.act_window_close'}
account_payment_populate_statement()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| -8,153,124,896,560,761,000 | -5,121,296,370,671,850,000 | 48.243902 | 250 | 0.56348 | false |
ammaradil/fibonacci
|
Lib/site-packages/django/contrib/gis/db/models/aggregates.py
|
414
|
2395
|
from django.contrib.gis.db.models.fields import ExtentField
from django.db.models.aggregates import Aggregate
__all__ = ['Collect', 'Extent', 'Extent3D', 'MakeLine', 'Union']
class GeoAggregate(Aggregate):
function = None
is_extent = False
def as_sql(self, compiler, connection):
# this will be called again in parent, but it's needed now - before
# we get the spatial_aggregate_name
connection.ops.check_expression_support(self)
self.function = connection.ops.spatial_aggregate_name(self.name)
return super(GeoAggregate, self).as_sql(compiler, connection)
def as_oracle(self, compiler, connection):
if not hasattr(self, 'tolerance'):
self.tolerance = 0.05
self.extra['tolerance'] = self.tolerance
if not self.is_extent:
self.template = '%(function)s(SDOAGGRTYPE(%(expressions)s,%(tolerance)s))'
return self.as_sql(compiler, connection)
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = super(GeoAggregate, self).resolve_expression(query, allow_joins, reuse, summarize, for_save)
for expr in c.get_source_expressions():
if not hasattr(expr.field, 'geom_type'):
raise ValueError('Geospatial aggregates only allowed on geometry fields.')
return c
def convert_value(self, value, expression, connection, context):
return connection.ops.convert_geom(value, self.output_field)
class Collect(GeoAggregate):
name = 'Collect'
class Extent(GeoAggregate):
name = 'Extent'
is_extent = '2D'
def __init__(self, expression, **extra):
super(Extent, self).__init__(expression, output_field=ExtentField(), **extra)
def convert_value(self, value, expression, connection, context):
return connection.ops.convert_extent(value, context.get('transformed_srid'))
class Extent3D(GeoAggregate):
name = 'Extent3D'
is_extent = '3D'
def __init__(self, expression, **extra):
super(Extent3D, self).__init__(expression, output_field=ExtentField(), **extra)
def convert_value(self, value, expression, connection, context):
return connection.ops.convert_extent3d(value, context.get('transformed_srid'))
class MakeLine(GeoAggregate):
name = 'MakeLine'
class Union(GeoAggregate):
name = 'Union'
|
mit
| 7,080,334,992,157,257,000 | 8,186,818,255,283,014,000 | 34.220588 | 108 | 0.673904 | false |
lihui7115/ChromiumGStreamerBackend
|
tools/telemetry/telemetry/web_perf/metrics/blob_timeline_unittest.py
|
14
|
5994
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from collections import namedtuple
from telemetry.internal.results import page_test_results
from telemetry.page import page
from telemetry.web_perf.metrics import blob_timeline
from telemetry.web_perf import timeline_interaction_record
FakeEvent = namedtuple('Event', 'name, start, end, thread_duration, args')
Interaction = timeline_interaction_record.TimelineInteractionRecord
TEST_INTERACTION_LABEL = 'Action_TestInteraction'
WRITE_EVENT_NAME = 'Registry::RegisterBlob'
READ_EVENT_NAME = 'BlobRequest'
def GetBlobMetrics(events, interactions):
results = page_test_results.PageTestResults()
test_page = page.Page('file://blank.html')
results.WillRunPage(test_page)
blob_timeline.BlobTimelineMetric()._AddWriteResultsInternal(
events, interactions, results) # pylint:disable=protected-access
blob_timeline.BlobTimelineMetric()._AddReadResultsInternal(
events, interactions, results) # pylint:disable=protected-access
return_dict = dict((value.name, value.values) for value in
results.current_page_run.values)
results.DidRunPage(test_page)
return return_dict
def FakeWriteEvent(start, end, thread_duration=None):
if not thread_duration:
thread_duration = end - start
return FakeEvent(blob_timeline.WRITE_EVENT_NAME,
start, end, thread_duration, {'uuid':'fakeuuid'})
def FakeReadEvent(start, end, uuid, thread_duration=None):
if not thread_duration:
thread_duration = end - start
return FakeEvent(blob_timeline.READ_EVENT_NAME,
start, end, thread_duration, {'uuid': uuid})
def TestInteraction(start, end):
return Interaction(TEST_INTERACTION_LABEL, start, end)
class BlobTimelineMetricUnitTest(unittest.TestCase):
def testWriteMetric(self):
events = [FakeWriteEvent(0, 1),
FakeWriteEvent(9, 11),
FakeWriteEvent(10, 13),
FakeWriteEvent(20, 24),
FakeWriteEvent(21, 26),
FakeWriteEvent(29, 35),
FakeWriteEvent(30, 37),
FakeWriteEvent(40, 48),
FakeWriteEvent(41, 50),
FakeEvent('something', 10, 13, 3, {}),
FakeEvent('FrameView::something', 20, 24, 4, {}),
FakeEvent('SomeThing::performLayout', 30, 37, 7, {}),
FakeEvent('something else', 40, 48, 8, {})]
interactions = [TestInteraction(10, 20),
TestInteraction(30, 40)]
self.assertEqual({'blob-reads': None, 'blob-writes': None},
GetBlobMetrics(events, []))
self.assertEqual({'blob-reads': None, 'blob-writes': None},
GetBlobMetrics([], interactions))
# The first event starts before the first interaction, so it is ignored.
# The second event starts before the first interaction, so it is ignored.
# The third event starts during the first interaction, and its duration is
# 13 - 10 = 3.
# The fourth event starts during the first interaction, and its duration is
# 24 - 20 = 4.
# The fifth event starts between the two interactions, so it is ignored.
# The sixth event starts between the two interactions, so it is ignored.
# The seventh event starts during the second interaction, and its duration
# is 37 - 30 = 7.
# The eighth event starts during the second interaction and its duration is
# 48 - 40 = 8.
# The ninth event starts after the last interaction, so it is ignored.
# The rest of the events are not layout events, so they are ignored.
self.assertEqual({'blob-reads': None, 'blob-writes': [3, 4, 7, 8]},
GetBlobMetrics(events, interactions))
def testReadMetric(self):
events = [FakeReadEvent(0, 1, 'a'),
FakeReadEvent(9, 11, 'a'),
FakeReadEvent(10, 13, 'b', 1), # counts
FakeReadEvent(15, 18, 'b'), # counts
FakeReadEvent(21, 26, 'b'),
FakeReadEvent(29, 35, 'c'),
FakeReadEvent(31, 32, 'e'), # counts
FakeReadEvent(34, 36, 'e', 1), # counts
FakeReadEvent(32, 37, 'd'), # counts
FakeEvent('something', 10, 13, 3, {}),
FakeEvent('something else', 40, 48, 8, {})]
interactions = [TestInteraction(10, 20),
TestInteraction(30, 40)]
self.assertEqual({'blob-reads': None, 'blob-writes': None},
GetBlobMetrics(events, []))
self.assertEqual({'blob-reads': None, 'blob-writes': None},
GetBlobMetrics([], interactions))
# We ignore events outside of the interaction intervals, and we use the
# begining of the first event of the interval and the end of the last
# event.
# 18 - 10 = 8
# 37 - 32 = 5
self.assertEqual({'blob-reads': [4, 2, 5], 'blob-writes': None},
GetBlobMetrics(events, interactions))
def testReadAndWriteMetrics(self):
events = [FakeReadEvent(0, 1, 'a'),
FakeReadEvent(9, 11, 'a'),
FakeReadEvent(10, 13, 'b'), # counts
FakeWriteEvent(15, 18), # counts
FakeReadEvent(21, 26, 'c'),
FakeReadEvent(29, 35, 'd'),
FakeWriteEvent(31, 34, 1), # counts
FakeReadEvent(32, 33, 'e'), # counts
FakeReadEvent(34, 35, 'e'), # counts
FakeEvent('something', 31, 33, 2, {})]
interactions = [TestInteraction(10, 20),
TestInteraction(30, 35)]
self.assertEqual({'blob-reads': None, 'blob-writes': None},
GetBlobMetrics(events, []))
self.assertEqual({'blob-reads': None, 'blob-writes': None},
GetBlobMetrics([], interactions))
# We use the read events in the interactions, so the same as the test above.
self.assertEqual({'blob-reads': [3, 2], 'blob-writes': [3, 1]},
GetBlobMetrics(events, interactions))
|
bsd-3-clause
| 1,700,911,770,882,989,300 | -4,897,666,850,931,097,000 | 42.122302 | 80 | 0.634801 | false |
huongttlan/bokeh
|
bokeh/compat/mplexporter/renderers/base.py
|
44
|
14355
|
import warnings
import itertools
from contextlib import contextmanager
import numpy as np
from matplotlib import transforms
from .. import utils
from .. import _py3k_compat as py3k
class Renderer(object):
@staticmethod
def ax_zoomable(ax):
return bool(ax and ax.get_navigate())
@staticmethod
def ax_has_xgrid(ax):
return bool(ax and ax.xaxis._gridOnMajor and ax.yaxis.get_gridlines())
@staticmethod
def ax_has_ygrid(ax):
return bool(ax and ax.yaxis._gridOnMajor and ax.yaxis.get_gridlines())
@property
def current_ax_zoomable(self):
return self.ax_zoomable(self._current_ax)
@property
def current_ax_has_xgrid(self):
return self.ax_has_xgrid(self._current_ax)
@property
def current_ax_has_ygrid(self):
return self.ax_has_ygrid(self._current_ax)
@contextmanager
def draw_figure(self, fig, props):
if hasattr(self, "_current_fig") and self._current_fig is not None:
warnings.warn("figure embedded in figure: something is wrong")
self._current_fig = fig
self._fig_props = props
self.open_figure(fig=fig, props=props)
yield
self.close_figure(fig=fig)
self._current_fig = None
self._fig_props = {}
@contextmanager
def draw_axes(self, ax, props):
if hasattr(self, "_current_ax") and self._current_ax is not None:
warnings.warn("axes embedded in axes: something is wrong")
self._current_ax = ax
self._ax_props = props
self.open_axes(ax=ax, props=props)
yield
self.close_axes(ax=ax)
self._current_ax = None
self._ax_props = {}
@contextmanager
def draw_legend(self, legend, props):
self._current_legend = legend
self._legend_props = props
self.open_legend(legend=legend, props=props)
yield
self.close_legend(legend=legend)
self._current_legend = None
self._legend_props = {}
# Following are the functions which should be overloaded in subclasses
def open_figure(self, fig, props):
"""
Begin commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The Figure which will contain the ensuing axes and elements
props : dictionary
The dictionary of figure properties
"""
pass
def close_figure(self, fig):
"""
Finish commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The figure which is finished being drawn.
"""
pass
def open_axes(self, ax, props):
"""
Begin commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which will contain the ensuing axes and elements
props : dictionary
The dictionary of axes properties
"""
pass
def close_axes(self, ax):
"""
Finish commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which is finished being drawn.
"""
pass
def open_legend(self, legend, props):
"""
Beging commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend that will contain the ensuing elements
props : dictionary
The dictionary of legend properties
"""
pass
def close_legend(self, legend):
"""
Finish commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend which is finished being drawn
"""
pass
def draw_marked_line(self, data, coordinates, linestyle, markerstyle,
label, mplobj=None):
"""Draw a line that also has markers.
If this isn't reimplemented by a renderer object, by default, it will
make a call to BOTH draw_line and draw_markers when both markerstyle
and linestyle are not None in the same Line2D object.
"""
if linestyle is not None:
self.draw_line(data, coordinates, linestyle, label, mplobj)
if markerstyle is not None:
self.draw_markers(data, coordinates, markerstyle, label, mplobj)
def draw_line(self, data, coordinates, style, label, mplobj=None):
"""
Draw a line. By default, draw the line via the draw_path() command.
Some renderers might wish to override this and provide more
fine-grained behavior.
In matplotlib, lines are generally created via the plt.plot() command,
though this command also can create marker collections.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the line.
mplobj : matplotlib object
the matplotlib plot element which generated this line
"""
pathcodes = ['M'] + (data.shape[0] - 1) * ['L']
pathstyle = dict(facecolor='none', **style)
pathstyle['edgecolor'] = pathstyle.pop('color')
pathstyle['edgewidth'] = pathstyle.pop('linewidth')
self.draw_path(data=data, coordinates=coordinates,
pathcodes=pathcodes, style=pathstyle, mplobj=mplobj)
@staticmethod
def _iter_path_collection(paths, path_transforms, offsets, styles):
"""Build an iterator over the elements of the path collection"""
N = max(len(paths), len(offsets))
if not path_transforms:
path_transforms = [np.eye(3)]
edgecolor = styles['edgecolor']
if np.size(edgecolor) == 0:
edgecolor = ['none']
facecolor = styles['facecolor']
if np.size(facecolor) == 0:
facecolor = ['none']
elements = [paths, path_transforms, offsets,
edgecolor, styles['linewidth'], facecolor]
it = itertools
return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N)
def draw_path_collection(self, paths, path_coordinates, path_transforms,
offsets, offset_coordinates, offset_order,
styles, mplobj=None):
"""
Draw a collection of paths. The paths, offsets, and styles are all
iterables, and the number of paths is max(len(paths), len(offsets)).
By default, this is implemented via multiple calls to the draw_path()
function. For efficiency, Renderers may choose to customize this
implementation.
Examples of path collections created by matplotlib are scatter plots,
histograms, contour plots, and many others.
Parameters
----------
paths : list
list of tuples, where each tuple has two elements:
(data, pathcodes). See draw_path() for a description of these.
path_coordinates: string
the coordinates code for the paths, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
path_transforms: array_like
an array of shape (*, 3, 3), giving a series of 2D Affine
transforms for the paths. These encode translations, rotations,
and scalings in the standard way.
offsets: array_like
An array of offsets of shape (N, 2)
offset_coordinates : string
the coordinates code for the offsets, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
offset_order : string
either "before" or "after". This specifies whether the offset
is applied before the path transform, or after. The matplotlib
backend equivalent is "before"->"data", "after"->"screen".
styles: dictionary
A dictionary in which each value is a list of length N, containing
the style(s) for the paths.
mplobj : matplotlib object
the matplotlib plot element which generated this collection
"""
if offset_order == "before":
raise NotImplementedError("offset before transform")
for tup in self._iter_path_collection(paths, path_transforms,
offsets, styles):
(path, path_transform, offset, ec, lw, fc) = tup
vertices, pathcodes = path
path_transform = transforms.Affine2D(path_transform)
vertices = path_transform.transform(vertices)
# This is a hack:
if path_coordinates == "figure":
path_coordinates = "points"
style = {"edgecolor": utils.color_to_hex(ec),
"facecolor": utils.color_to_hex(fc),
"edgewidth": lw,
"dasharray": "10,0",
"alpha": styles['alpha'],
"zorder": styles['zorder']}
self.draw_path(data=vertices, coordinates=path_coordinates,
pathcodes=pathcodes, style=style, offset=offset,
offset_coordinates=offset_coordinates,
mplobj=mplobj)
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"""
Draw a set of markers. By default, this is done by repeatedly
calling draw_path(), but renderers should generally overload
this method to provide a more efficient implementation.
In matplotlib, markers are created using the plt.plot() command.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the markers.
mplobj : matplotlib object
the matplotlib plot element which generated this marker collection
"""
vertices, pathcodes = style['markerpath']
pathstyle = dict((key, style[key]) for key in ['alpha', 'edgecolor',
'facecolor', 'zorder',
'edgewidth'])
pathstyle['dasharray'] = "10,0"
for vertex in data:
self.draw_path(data=vertices, coordinates="points",
pathcodes=pathcodes, style=pathstyle,
offset=vertex, offset_coordinates=coordinates,
mplobj=mplobj)
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
"""
Draw text on the image.
Parameters
----------
text : string
The text to draw
position : tuple
The (x, y) position of the text
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the text.
text_type : string or None
if specified, a type of text such as "xlabel", "ylabel", "title"
mplobj : matplotlib object
the matplotlib plot element which generated this text
"""
raise NotImplementedError()
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
"""
Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path
"""
raise NotImplementedError()
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
"""
Draw an image.
Parameters
----------
imdata : string
base64 encoded png representation of the image
extent : list
the axes extent of the image: [xmin, xmax, ymin, ymax]
coordinates: string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the image
mplobj : matplotlib object
the matplotlib plot object which generated this image
"""
raise NotImplementedError()
|
bsd-3-clause
| -4,197,095,628,390,846,500 | 8,781,523,822,893,984,000 | 36.480418 | 78 | 0.5814 | false |
edison7500/dugong
|
apps/images/migrations/0001_initial.py
|
1
|
1994
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2018-12-25 06:14
from __future__ import unicode_literals
import apps.images.handlers
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [("contenttypes", "0002_remove_content_type_name")]
operations = [
migrations.CreateModel(
name="Image",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"file",
models.ImageField(
upload_to=apps.images.handlers.UUIDFilename("images/")
),
),
("description", models.CharField(blank=True, max_length=255)),
("is_cover", models.BooleanField(default=False)),
(
"object_id",
models.PositiveIntegerField(db_index=True, null=True),
),
(
"uploaded_at",
models.DateTimeField(default=django.utils.timezone.now),
),
(
"content_type",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="contenttypes.ContentType",
),
),
],
options={
"verbose_name": "photo",
"verbose_name_plural": "photos",
"db_table": "generic_image",
"ordering": ["-uploaded_at"],
"abstract": False,
},
)
]
|
gpl-3.0
| -6,324,386,594,268,345,000 | -4,208,346,311,055,290,400 | 30.650794 | 78 | 0.427282 | false |
bawaaaaah/Phoenix
|
tools/xml2sqlite.py
|
1
|
4763
|
'''
This file is used to combine multiple no-intro xml files into one
sqlite file. This is used for checksum lookups to set up the artwork scraper.
'''
import xml.etree.ElementTree as ET
import sqlite3
import os
import sys
import getopt
UNKNOWN_VALUE = ""
TABLE_VALUE = "NOINTRO"
VERSION_FILE = "console_database_version.txt"
def xml2sqlite(file, connection):
'''
Converts opens up an XML file and inserts found elements into an SQL database.
'''
with open(file, 'r') as open_file:
tree = ET.parse(open_file)
root = tree.getroot()
c = connection.cursor()
try:
for child in root: #(child.tag == game), (child.attrib == [name])
if child.tag != "game":
continue
gamename = UNKNOWN_VALUE
description = UNKNOWN_VALUE
romname = UNKNOWN_VALUE
size = UNKNOWN_VALUE
crc = UNKNOWN_VALUE
sha1 = UNKNOWN_VALUE
if "name" in child.attrib.keys():
gamename = child.attrib['name']
for subchild in child: #(subchild.tag == ["description", "rom"]), (subchild.attrib == [name, size, crc, sha1])
keys = subchild.attrib.keys()
if subchild.tag == "description":
print("[ADD] ", subchild.text)
description = subchild.text
if "name" in keys:
romname = subchild.attrib['name']
if "size" in keys:
size = subchild.attrib['size']
if "crc" in keys:
crc = subchild.attrib['crc']
if "sha1" in keys:
sha1 = subchild.attrib['sha1']
params = (gamename, description, romname, size, crc, sha1)
if "" in params:
continue;
c.execute("INSERT INTO " + TABLE_VALUE + " VALUES (?, ?, ?, ?, ?, ?)", params)
connection.commit()
c.close()
return True;
except sqlite3.Error as err:
for i in err.args:
print(i)
c.close()
return False
def create_version_file(files_list):
with open(VERSION_FILE, "w") as out_file:
for i in files_list:
out_file.write("{:}\n".format(i))
def main(argv):
files_list = list()
out_file = ""
try:
opts, args = getopt.getopt(argv,"hi:o:",["input=","output="])
except getopt.GetoptError:
print("xml2sqlite.py -i '[<input_file>, <input_file>]' -o <output_file>")
sys.exit(2)
for opt, arg in opts:
if opt == "-h":
print("\nxml2sqlite.py -i <input_directory> -o <output_file>")
print("\n-i, --input = Takes in directory to where the xml files are located")
print("-o, --output = Is a single file")
sys.exit()
elif opt in ("-i", "--input"):
if not os.path.isdir(arg):
print("Input directory does not exist.")
sys.exit(2)
files_list = [os.path.join(arg, i) for i in os.listdir(arg) if os.path.isfile(os.path.join(arg, i)) ]
elif opt in ("-o", "--output"):
out_file = arg
if len(files_list) == 0 or out_file == "":
print("args aren't correct")
sys.exit(2)
if os.path.isfile(out_file):
os.remove(out_file)
connection = sqlite3.connect(out_file)
c = connection.cursor()
c.execute("CREATE TABLE "
+ TABLE_VALUE
+ " (gamename TEXT, description TEXT, romname TEXT, size TEXT, crc TEXT, sha1 TEXT)")
print("Generating database...")
results = list()
for i in files_list:
if ".xml" not in i:
print("\nSkipping ", i, "\n")
continue
status = xml2sqlite(i, connection)
if status:
results.append("[OK] {:}".format(i))
else:
results.append("[Error] {:}".format(i))
c.close()
create_version_file(results)
print()
for i in results:
print(i)
def test_read():
'''
This used to test the created database with single lookups.
'''
out_file = os.path.dirname(os.path.realpath(__file__)) + "/gamedatabase.db"
connection = sqlite3.connect(out_file)
c = connection.cursor()
c.execute("SELECT * FROM " + TABLE_VALUE)
for row in c:
print(row)
c.close()
if __name__ == "__main__":
main(sys.argv[1:])
|
gpl-2.0
| -945,065,449,372,287,100 | -2,464,423,542,722,176,000 | 31.401361 | 126 | 0.498425 | false |
xiandiancloud/edx-platform
|
lms/djangoapps/certificates/migrations/0002_auto__add_field_generatedcertificate_download_url.py
|
188
|
6807
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GeneratedCertificate.download_url'
db.add_column('certificates_generatedcertificate', 'download_url',
self.gf('django.db.models.fields.CharField')(max_length=128, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'GeneratedCertificate.download_url'
db.delete_column('certificates_generatedcertificate', 'download_url')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'certificates.generatedcertificate': {
'Meta': {'object_name': 'GeneratedCertificate'},
'certificate_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'download_url': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates']
|
agpl-3.0
| -2,573,561,553,004,483,000 | -3,397,872,523,385,458,700 | 73.802198 | 182 | 0.563684 | false |
chvrga/outdoor-explorer
|
java/play-1.4.4/python/Lib/xml/dom/minicompat.py
|
7
|
3439
|
"""Python version compatibility support for minidom."""
# This module should only be imported using "import *".
#
# The following names are defined:
#
# NodeList -- lightest possible NodeList implementation
#
# EmptyNodeList -- lightest possible NodeList that is guarateed to
# remain empty (immutable)
#
# StringTypes -- tuple of defined string types
#
# defproperty -- function used in conjunction with GetattrMagic;
# using these together is needed to make them work
# as efficiently as possible in both Python 2.2+
# and older versions. For example:
#
# class MyClass(GetattrMagic):
# def _get_myattr(self):
# return something
#
# defproperty(MyClass, "myattr",
# "return some value")
#
# For Python 2.2 and newer, this will construct a
# property object on the class, which avoids
# needing to override __getattr__(). It will only
# work for read-only attributes.
#
# For older versions of Python, inheriting from
# GetattrMagic will use the traditional
# __getattr__() hackery to achieve the same effect,
# but less efficiently.
#
# defproperty() should be used for each version of
# the relevant _get_<property>() function.
__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"]
import xml.dom
try:
unicode
except NameError:
StringTypes = type(''),
else:
StringTypes = type(''), type(unicode(''))
class NodeList(list):
__slots__ = ()
def item(self, index):
if 0 <= index < len(self):
return self[index]
def _get_length(self):
return len(self)
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class EmptyNodeList(tuple):
__slots__ = ()
def __add__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def __radd__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def item(self, index):
return None
def _get_length(self):
return 0
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def defproperty(klass, name, doc):
get = getattr(klass, ("_get_" + name)).im_func
def set(self, value, name=name):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute " + repr(name))
assert not hasattr(klass, "_set_" + name), \
"expected not to find _set_" + name
prop = property(get, set, doc=doc)
setattr(klass, name, prop)
|
mit
| -3,473,143,189,785,212,400 | 3,429,667,984,579,515,400 | 29.263636 | 70 | 0.540273 | false |
codelucas/shorten.tv
|
bg.py
|
1
|
2146
|
#!/usr/bin/env python2.7
"""
Here is shorten.tv's main background task to re-load
and cache popular youtube videos so users have less
wait time when using the webapp.
"""
import requests
import string
import backend
import urllib
letters = list(string.lowercase) # a, b, c ... z
popular = ["Rihanna", "Usher", "Katy Perry", "Eminem", "Shakira",
"Taylor Swift", "Akon", "Lady Gaga", "Paramore", "Jay Z",
"Led Zepplin", "Guns N Roses", "Aerosmith", "Borat",
"Fallout Boy", "Blink 182", "Justin Bieber", "Drake"]
searches = letters + popular
numb_thumbs = "5"
numb_queries = 5
def encodeURIComponent(input_str):
"""
Python equivalent of javascript's encodeURIComponent
"""
return urllib.quote(unicode(input_str).encode('utf-8'), safe='~()*!.\'')
def top_query(term):
"""
Retrieves top google autocompletion api query
"""
url = "http://suggestqueries.google.com/complete/search?" + \
"hl=en&ds=yt&client=youtube&json=t&q=" + \
encodeURIComponent(term) + "&cp=1"
results = requests.get(url).json()
queries = results[1][:5]
print "Autocomplete results for", results[0], "are", queries
return queries[0] # top query
def youtube_top_five(query):
"""
Retrieves top five yotube video (ids) based on
a google autocompelte query
"""
url = "http://gdata.youtube.com/feeds/api/videos?q=" + \
encodeURIComponent(query) + "&format=5&max-results=" + \
numb_thumbs + "&v=2&alt=jsonc"
resp = requests.get(url).json()
data = resp["data"]
items = data["items"]
ids = [video["id"] for video in items]
return ids
if __name__ == '__main__':
for search in searches:
query = top_query(search)
ids = youtube_top_five(query)
for yt_id in ids:
clips, duration = backend.check_youtube(yt_id)
yt_dat = {'hotclips': clips, 'duration': duration}
backend.redis.setex(yt_id, yt_dat, backend.HOTCLIP_CACHE_TIME)
print 'Summarization data cached for id', yt_id, \
'~~~~ hotclips:', clips, 'duration:', duration
|
mit
| 1,835,887,741,974,329,600 | -3,554,947,924,737,621,000 | 28.805556 | 76 | 0.609972 | false |
knxd/PyKNyX
|
pyknyx/stack/multicastSocket.py
|
2
|
5201
|
# -*- coding: utf-8 -*-
""" Python KNX framework
License
=======
- B{PyKNyX} (U{https://github.com/knxd/pyknyx}) is Copyright:
- © 2016-2017 Matthias Urlichs
- PyKNyX is a fork of pKNyX
- © 2013-2015 Frédéric Mantegazza
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
or see:
- U{http://www.gnu.org/licenses/gpl.html}
Module purpose
==============
UDP Multicast support.
Implements
==========
- B{McastSockValueError}
- B{MulticastSocketBase}
- B{MulticastSocketReceive}
- B{MulticastSocketTransmit}
Documentation
=============
See U{http://www.tldp.org/HOWTO/Multicast-HOWTO.html}
Usage
=====
@author: Frédéric Mantegazza
@author: Jakub Wroniecki
@copyright: (C) 2013-2015 Frédéric Mantegazza
@copyright: (C) 2009 Jakub Wroniecki, STANSAT
@license: GPL
"""
import socket
import struct
import six
from pyknyx.common.exception import PyKNyXValueError
from pyknyx.services.logger import logging; logger = logging.getLogger(__name__)
class McastSockValueError(PyKNyXValueError):
"""
"""
class MulticastSocketBase(socket.socket):
""" Multicast socket
"""
def __init__(self, localAddr, localPort, ttl=32, loop=1):
""" Init the multicast socket base class
@param localAddr: IP address used as local address
@type: localAddr: str
@param localPort: port used as local port
@type: localPort: int
@param ttl: 0 Restricted to the same host (won't be output by any interface)
1 Restricted to the same subnet (won't be forwarded by a router)
<32 Restricted to the same site, organization or department
<64 Restricted to the same region
<128 Restricted to the same continent
<255 Unrestricted in scope. Global
@type ttl: int
@param loop:
@type loop: int
"""
super(MulticastSocketBase, self).__init__(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self._localAddr = localAddr
self._localPort = localPort
self._ttl= ttl
self._loop = loop
self.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, ttl)
self.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, loop)
self._bind()
def _bind(self):
"""
"""
raise NotImplementedError
@property
def localAddress(self):
return self._localAddr
@property
def localPort(self):
return self._localPort
class MulticastSocketReceive(MulticastSocketBase):
"""
"""
def __init__(self, localAddr, localPort, mcastAddr, mcastPort, timeout=1, ttl=32, loop=1):
"""
"""
multicast = six.byte2int(socket.inet_aton(mcastAddr)) in range(224, 240)
if not multicast:
raise McastSockValueError("address is not a multicast destination (%s)" % repr(mcastAddr))
self._mcastAddr = mcastAddr
super(MulticastSocketReceive, self).__init__(localAddr, mcastPort, ttl, loop)
self.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_IF, socket.inet_aton(self._localAddr))
value = struct.pack("=4sl", socket.inet_aton(mcastAddr), socket.INADDR_ANY)
self.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, value)
self.settimeout(timeout)
def _bind(self):
"""
@todo: use mcastAddr, instead of ""?
"""
self.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
self.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except:
logger.exception("MulticastSocketBase.__init__(): system doesn't support SO_REUSEPORT")
self.bind(("", self._localPort))
def receive(self):
"""
"""
return self.recvfrom(1024)
class MulticastSocketTransmit(MulticastSocketBase):
"""
"""
def __init__(self, localAddr, localPort, mcastAddr, mcastPort, ttl=32, loop=1):
"""
"""
super(MulticastSocketTransmit, self).__init__(localAddr, localPort, ttl, loop)
self._mcastAddr = mcastAddr
self._mcastPort = mcastPort
def _bind(self):
"""
"""
self.bind((self._localAddr, self._localPort))
if self._localPort == 0:
self._localPort = self.getsockname()[1]
def transmit(self, data):
"""
"""
l = self.sendto(data, (self._mcastAddr, self._mcastPort))
if l > 0 and l < len(data):
raise IOError("partial transmit: %d of %d to %s", l, len(data), self)
|
gpl-3.0
| 979,176,448,004,965,500 | -2,838,921,451,733,641,000 | 27.222826 | 104 | 0.636819 | false |
shingonoide/odoo
|
openerp/tools/import_email.py
|
337
|
6376
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os, sys
import re
import smtplib
import email, mimetypes
from email.header import decode_header
from email.mime.text import MIMEText
import xmlrpclib
warn_msg = """
Bonjour,
Le message avec le sujet "%s" n'a pu être archivé dans l'ERP.
""".decode('utf-8')
class EmailParser(object):
def __init__(self, headers, dispatcher):
self.headers = headers
self.dispatcher = dispatcher
def parse(self, msg):
dispatcher((self.headers, msg))
class CommandDispatcher(object):
def __init__(self, receiver):
self.receiver = receiver
def __call__(self, request):
return self.receiver(request)
class RPCProxy(object):
def __init__(self, uid, passwd, host='localhost', port=8069, path='object'):
self.rpc = xmlrpclib.ServerProxy('http://%s:%s/%s' % (host, port, path))
self.user_id = uid
self.passwd = passwd
def __call__(self, request):
return self.rpc.execute(self.user_id, self.passwd, *request)
class ReceiverEmail2Event(object):
email_re = re.compile(r"""
([a-zA-Z][\w\.-]*[a-zA-Z0-9] # username part
@ # mandatory @ sign
[a-zA-Z0-9][\w\.-]* # domain must start with a letter
\.
[a-z]{2,3} # TLD
)
""", re.VERBOSE)
project_re = re.compile(r"^ *\[?(\d{4}\.?\d{0,3})\]?", re.UNICODE)
def __init__(self, rpc):
self.rpc = rpc
def get_addresses(self, headers, msg):
hcontent = ''
for header in [h for h in headers if msg.has_key(h)]:
hcontent += msg[header]
return self.email_re.findall(hcontent)
def get_partners(self, headers, msg):
alladdresses = self.get_addresses(headers, msg)
address_ids = self.rpc(('res.partner', 'search', [('email', 'in', alladdresses)]))
addresses = self.rpc(('res.partner', 'read', address_ids))
return [x['partner_id'][0] for x in addresses]
def __call__(self, request):
headers, msg = request
partners = self.get_partners(headers, msg)
subject = u''
for string, charset in decode_header(msg['Subject']):
if charset:
subject += string.decode(charset)
else:
subject += unicode(string)
if partners:
self.save_mail(msg, subject, partners)
else:
warning = MIMEText((warn_msg % (subject,)).encode('utf-8'), 'plain', 'utf-8')
warning['Subject'] = 'Message de OpenERP'
warning['From'] = '[email protected]'
warning['To'] = msg['From']
s = smtplib.SMTP()
s.connect()
s.sendmail('[email protected]', self.email_re.findall(msg['From']), warning.as_string())
s.close()
if msg.is_multipart():
for message in [m for m in msg.get_payload() if m.get_content_type() == 'message/rfc822']:
self((headers, message.get_payload()[0]))
def save_mail(self, msg, subject, partners):
counter, description = 1, u''
if msg.is_multipart():
for part in msg.get_payload():
stockdir = os.path.join('emails', msg['Message-Id'][1:-1])
newdir = os.path.join('/tmp', stockdir)
filename = part.get_filename()
if not filename:
ext = mimetypes.guess_extension(part.get_type())
if not ext:
ext = '.bin'
filename = 'part-%03d%s' % (counter, ext)
if part.get_content_maintype() == 'multipart':
continue
elif part.get_content_maintype() == 'text':
if part.get_content_subtype() == 'plain':
description += part.get_payload(decode=1).decode(part.get_charsets()[0])
description += u'\n\nVous trouverez les éventuels fichiers dans le répertoire: %s' % stockdir
continue
else:
description += u'\n\nCe message est en "%s", vous trouverez ce texte dans le répertoire: %s' % (part.get_content_type(), stockdir)
elif part.get_content_type() == 'message/rfc822':
continue
if not os.path.isdir(newdir):
os.mkdir(newdir)
counter += 1
fd = file(os.path.join(newdir, filename), 'w')
fd.write(part.get_payload(decode=1))
fd.close()
else:
description = msg.get_payload(decode=1).decode(msg.get_charsets()[0])
project = self.project_re.search(subject)
if project:
project = project.groups()[0]
else:
project = ''
for partner in partners:
self.rpc(('res.partner.event', 'create', {'name' : subject, 'partner_id' : partner, 'description' : description, 'project' : project}))
if __name__ == '__main__':
rpc_dispatcher = CommandDispatcher(RPCProxy(4, 'admin'))
dispatcher = CommandDispatcher(ReceiverEmail2Event(rpc_dispatcher))
parser = EmailParser(['To', 'Cc', 'From'], dispatcher)
parser.parse(email.message_from_file(sys.stdin))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| -776,345,507,677,271,000 | 8,505,793,779,916,086,000 | 36.698225 | 154 | 0.549521 | false |
google-research/language
|
language/xsp/data_preprocessing/compute_asql_coverage_spider.py
|
1
|
3106
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Compute coverage for Abstract SQL for Spider.
Example usage:
${PATH_TO_BINARY} \
--spider_examples_json=${SPIDER_DIR}/train_spider.json \
--spider_tables_json=${SPIDER_DIR}/tables.json \
--alsologtostderr
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from absl import app
from absl import flags
from language.xsp.data_preprocessing import abstract_sql
from language.xsp.data_preprocessing import abstract_sql_converters
FLAGS = flags.FLAGS
flags.DEFINE_string('spider_examples_json', '', 'Path to Spider json examples')
flags.DEFINE_string('spider_tables_json', '', 'Path to Spider json tables')
def _load_json(filename):
with open(filename) as json_file:
return json.load(json_file)
def compute_spider_coverage(spider_examples_json, spider_tables_json):
"""Prints out statistics for asql conversions."""
table_json = _load_json(spider_tables_json)
# Map of database id to a list of ForiegnKeyRelation tuples.
foreign_key_map = abstract_sql_converters.spider_foreign_keys_map(table_json)
table_schema_map = abstract_sql_converters.spider_table_schemas_map(
table_json)
examples = _load_json(spider_examples_json)
num_examples = 0
num_conversion_failures = 0
num_reconstruction_failtures = 0
for example in examples:
num_examples += 1
print('Parsing example number %s: %s' % (num_examples, example['query']))
gold_sql_query = example['query']
foreign_keys = foreign_key_map[example['db_id']]
table_schema = table_schema_map[example['db_id']]
try:
sql_spans = abstract_sql.sql_to_sql_spans(gold_sql_query, table_schema)
sql_spans = abstract_sql.replace_from_clause(sql_spans)
except abstract_sql.UnsupportedSqlError as e:
print('Error converting:\n%s\n%s' % (gold_sql_query, e))
num_conversion_failures += 1
else:
try:
sql_spans = abstract_sql.restore_from_clause(sql_spans, foreign_keys)
except abstract_sql.UnsupportedSqlError as e:
print('Error recontructing:\n%s\n%s' % (gold_sql_query, e))
num_reconstruction_failtures += 1
print('Examples: %s' % num_examples)
print('Failed conversions: %s' % num_conversion_failures)
print('Failed reconstructions: %s' % num_reconstruction_failtures)
def main(unused_argv):
compute_spider_coverage(FLAGS.spider_examples_json, FLAGS.spider_tables_json)
if __name__ == '__main__':
app.run(main)
|
apache-2.0
| 7,039,120,731,607,400,000 | -4,233,088,448,339,561,000 | 33.511111 | 79 | 0.720541 | false |
io7m/jcalcium
|
io7m-jcalcium-blender/src/main/resources/__init__.py
|
1
|
3191
|
#
# Copyright © 2016 <[email protected]> http://io7m.com
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
bl_info = {
"name": "Calcium JSON format",
"author": "io7m",
"version": (0, 1, 0),
"blender": (2, 66, 0),
"location": "File > Export > Calcium JSON (.csj)",
"description": "Export armatures to Calcium format",
"warning": "",
"wiki_url": "",
"tracker_url": "https://github.com/io7m/jcalcium/issues",
"category": "Import-Export"
}
import bpy
import bpy_extras.io_utils
import mathutils
CalciumOrientationHelper = bpy_extras.io_utils.orientation_helper_factory("CalciumOrientationHelper", axis_forward='-Z', axis_up='Y')
class ExportCalcium(bpy.types.Operator, bpy_extras.io_utils.ExportHelper, CalciumOrientationHelper):
bl_idname = "export_scene.csj"
bl_label = "Export Calcium"
# The filename_ext field is accessed by ExportHelper.
filename_ext = ".csj"
filepath = bpy.props.StringProperty(subtype='FILE_PATH')
verbose = bpy.props.BoolProperty(name="Verbose logging",description="Enable verbose debug logging",default=True)
def execute(self, context):
self.filepath = bpy.path.ensure_ext(self.filepath, ".csj")
args = {}
args['verbose'] = self.verbose
assert type(args['verbose']) == bool
args['conversion_matrix'] = bpy_extras.io_utils.axis_conversion(to_forward=self.axis_forward, to_up=self.axis_up).to_4x4()
assert type(args['conversion_matrix'] == mathutils.Matrix)
from . import export
e = export.CalciumExporter(args)
try:
e.write(self.filepath)
except export.CalciumNoArmatureSelected as ex:
self.report({'ERROR'}, ex.value)
except export.CalciumTooManyArmaturesSelected as ex:
self.report({'ERROR'}, ex.value)
except export.CalciumExportFailed as ex:
self.report({'ERROR'}, ex.value)
#endtry
return {'FINISHED'}
#end
def invoke(self, context, event):
if not self.filepath:
self.filepath = bpy.path.ensure_ext(bpy.data.filepath, ".csj")
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
#end
#endclass
def menuFunction(self, context):
self.layout.operator(ExportCalcium.bl_idname, text="Calcium JSON (.csj)")
#end
def register():
bpy.utils.register_class(ExportCalcium)
bpy.types.INFO_MT_file_export.append(menuFunction)
#end
def unregister():
bpy.utils.unregister_class(ExportCalcium)
bpy.types.INFO_MT_file_export.remove(menuFunction)
#end
if __name__ == "__main__":
register()
#endif
|
isc
| 7,109,596,851,445,676,000 | -7,420,023,086,650,390,000 | 32.229167 | 133 | 0.708464 | false |
rbarlow/pulp
|
client_lib/pulp/client/commands/repo/history.py
|
17
|
7546
|
"""
Commands for showing a repository's sync and publish history
"""
from gettext import gettext as _
from pulp.client.commands.options import OPTION_REPO_ID
from pulp.client.extensions.extensions import PulpCliOption, PulpCliFlag, PulpCliCommand
from pulp.client import validators
# The default limit on the number of history entries to display
REPO_HISTORY_LIMIT = 5
# Descriptions
DESC_DETAILS = _('if specified, all history information is displayed')
DESC_DISTRIBUTOR_ID = _('the distributor id to display history entries for')
DESC_END_DATE = _('only return entries that occur on or before the given date in iso8601 format'
' (yyyy-mm-ddThh:mm:ssZ)')
DESC_LIMIT = _(
'limits displayed history entries to the given amount (must be greater than zero); the default'
' is %(limit)s' % {'limit': REPO_HISTORY_LIMIT})
DESC_PUBLISH_HISTORY = _('displays the history of publish operations on a repository')
DESC_SORT = _('indicates the sort direction ("ascending" or "descending") based on the timestamp')
DESC_SYNC_HISTORY = _('displays the history of sync operations on a repository')
DESC_START_DATE = _('only return entries that occur on or after the given date in iso8601 format'
' (yyyy-mm-ddThh:mm:ssZ)')
# Options
OPTION_END_DATE = PulpCliOption('--end-date', DESC_END_DATE, required=False,
validate_func=validators.iso8601_datetime_validator)
OPTION_LIMIT = PulpCliOption('--limit', DESC_LIMIT, required=False,
validate_func=validators.positive_int_validator)
OPTION_SORT = PulpCliOption('--sort', DESC_SORT, required=False)
OPTION_DISTRIBUTOR_ID = PulpCliOption('--distributor-id', DESC_DISTRIBUTOR_ID, required=True,
validate_func=validators.id_validator)
OPTION_START_DATE = PulpCliOption('--start-date', DESC_START_DATE, required=False,
validate_func=validators.iso8601_datetime_validator)
# Flags
FLAG_DETAILS = PulpCliFlag('--details', DESC_DETAILS, aliases='-d')
class SyncHistoryCommand(PulpCliCommand):
"""
Displays the sync history of a given repository
"""
def __init__(self, context, name='sync', description=DESC_SYNC_HISTORY):
"""
:param context: The client context used to interact with the client framework and server
:type context: pulp.client.extensions.core.ClientContext
:param name: The name of the command in the history section
:type name: str
:param description: The description to use in the cli
:type description: str
"""
# The context is used to access the server and prompt.
self.context = context
super(SyncHistoryCommand, self).__init__(name, description, self.run)
self.add_option(OPTION_REPO_ID)
self.add_option(OPTION_LIMIT)
self.add_option(OPTION_SORT)
self.add_option(OPTION_START_DATE)
self.add_option(OPTION_END_DATE)
self.add_flag(FLAG_DETAILS)
self.fields_to_display = ['repo_id', 'result', 'started', 'completed', 'added_count',
'removed_count', 'updated_count']
def run(self, **user_input):
"""
The action to take when the sync history command is executed
:param user_input: the options and flags provided by the user
:type user_input: dict
"""
# Collect input
repo_id = user_input[OPTION_REPO_ID.keyword]
if user_input[OPTION_LIMIT.keyword] is not None:
limit = int(user_input[OPTION_LIMIT.keyword])
else:
limit = REPO_HISTORY_LIMIT
start_date = user_input[OPTION_START_DATE.keyword]
end_date = user_input[OPTION_END_DATE.keyword]
sort = user_input[OPTION_SORT.keyword]
details = user_input[FLAG_DETAILS.keyword]
# Request the sync history from the server
sync_list = self.context.server.repo_history.sync_history(repo_id, limit, sort, start_date,
end_date).response_body
# Filter the fields to show and define the order in which they are displayed
if details is True:
self.fields_to_display.append('summary')
self.fields_to_display.append('details')
filters = order = self.fields_to_display
# Render results
title = _('Sync History [ %(repo)s ]') % {'repo': repo_id}
self.context.prompt.render_title(title)
self.context.prompt.render_document_list(sync_list, filters=filters, order=order)
class PublishHistoryCommand(PulpCliCommand):
"""
Displays the publish history of a given repository and publisher
"""
def __init__(self, context, name='publish', description=DESC_PUBLISH_HISTORY):
"""
:param context: The client context used to interact with the client framework and server
:type context: pulp.client.extensions.core.ClientContext
:param name: The name of the command in the history section
:type name: str
:param description: The description to use in the cli
:type description: str
"""
# The context is used to access the server and prompt.
self.context = context
super(PublishHistoryCommand, self).__init__(name, description, self.run)
# History is given for a repo id and distributor id pair, so these are mandatory
self.add_option(OPTION_REPO_ID)
self.add_option(OPTION_DISTRIBUTOR_ID)
self.add_option(OPTION_LIMIT)
self.add_option(OPTION_SORT)
self.add_option(OPTION_START_DATE)
self.add_option(OPTION_END_DATE)
self.add_flag(FLAG_DETAILS)
# Set the default fields to display
self.fields_to_display = ['repo_id', 'distributor_id', 'result', 'started', 'completed']
def run(self, **user_input):
"""
The action to take when the sync history command is executed
:param user_input: the options and flags provided by the user
:type user_input: dict
"""
# Collect input
repo_id = user_input[OPTION_REPO_ID.keyword]
distributor_id = user_input[OPTION_DISTRIBUTOR_ID.keyword]
if user_input[OPTION_LIMIT.keyword] is not None:
limit = int(user_input[OPTION_LIMIT.keyword])
else:
limit = REPO_HISTORY_LIMIT
start_date = user_input[OPTION_START_DATE.keyword]
end_date = user_input[OPTION_END_DATE.keyword]
sort = user_input[OPTION_SORT.keyword]
details = user_input[FLAG_DETAILS.keyword]
# Request the publish history from the server
publish_list = self.context.server.repo_history.publish_history(repo_id, distributor_id,
limit, sort, start_date,
end_date)
publish_list = publish_list.response_body
# Filter the fields to show and define the order in which they are displayed
if details is True:
self.fields_to_display.append('summary')
self.fields_to_display.append('details')
filters = order = self.fields_to_display
# Render results
title = _('Publish History [ %(repo)s ]') % {'repo': repo_id}
self.context.prompt.render_title(title)
self.context.prompt.render_document_list(publish_list, filters=filters, order=order)
|
gpl-2.0
| 4,035,282,380,748,270,000 | -4,158,968,428,160,569,300 | 41.393258 | 99 | 0.637159 | false |
nathanaevitas/odoo
|
openerp/addons/hr_recruitment/res_config.py
|
352
|
3627
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-Today OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
class hr_applicant_settings(osv.TransientModel):
_name = 'hr.config.settings'
_inherit = ['hr.config.settings', 'fetchmail.config.settings']
_columns = {
'module_document': fields.boolean('Allow the automatic indexation of resumes',
help='Manage your CV\'s and motivation letter related to all applicants.\n'
'-This installs the module document_ftp. This will install the knowledge management module in order to allow you to search using specific keywords through the content of all documents (PDF, .DOCx...)'),
'alias_prefix': fields.char('Default Alias Name for Jobs'),
'alias_domain': fields.char('Alias Domain'),
}
_defaults = {
'alias_domain': lambda self, cr, uid, context: self.pool['mail.alias']._get_alias_domain(cr, SUPERUSER_ID, [1], None, None)[1],
}
def _find_default_job_alias_id(self, cr, uid, context=None):
alias_id = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'hr_recruitment.mail_alias_jobs')
if not alias_id:
alias_ids = self.pool['mail.alias'].search(
cr, uid, [
('alias_model_id.model', '=', 'hr.applicant'),
('alias_force_thread_id', '=', False),
('alias_parent_model_id.model', '=', 'hr.job'),
('alias_parent_thread_id', '=', False),
('alias_defaults', '=', '{}')
], context=context)
alias_id = alias_ids and alias_ids[0] or False
return alias_id
def get_default_alias_prefix(self, cr, uid, ids, context=None):
alias_name = False
alias_id = self._find_default_job_alias_id(cr, uid, context=context)
if alias_id:
alias_name = self.pool['mail.alias'].browse(cr, uid, alias_id, context=context).alias_name
return {'alias_prefix': alias_name}
def set_default_alias_prefix(self, cr, uid, ids, context=None):
mail_alias = self.pool.get('mail.alias')
for record in self.browse(cr, uid, ids, context=context):
alias_id = self._find_default_job_alias_id(cr, uid, context=context)
if not alias_id:
create_ctx = dict(context, alias_model_name='hr.applicant', alias_parent_model_name='hr.job')
alias_id = self.pool['mail.alias'].create(cr, uid, {'alias_name': record.alias_prefix}, context=create_ctx)
else:
mail_alias.write(cr, uid, alias_id, {'alias_name': record.alias_prefix}, context=context)
return True
|
agpl-3.0
| 6,843,545,876,884,332,000 | 1,013,212,360,465,126,100 | 49.375 | 221 | 0.600221 | false |
joelfrederico/SciSalt
|
scisalt/qt/mplwidget.py
|
1
|
13557
|
from PyQt4 import QtGui
from PyQt4 import QtCore
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as _FigureCanvas
from matplotlib.backends.backend_qt4 import NavigationToolbar2QT as _NavigationToolbar
import matplotlib as _mpl
import numpy as _np
from .Rectangle import Rectangle
import pdb
import traceback
import logging
loggerlevel = logging.DEBUG
logger = logging.getLogger(__name__)
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Slider_and_Text(QtGui.QWidget):
valueChanged = QtCore.pyqtSignal(int)
sliderReleased = QtCore.pyqtSignal(int)
def __init__(self, parent=None):
QtGui.QWidget.__init__(self)
self.setMaximumHeight(40)
# Enable tracking by default
self._tracking = True
self.hLayout = QtGui.QHBoxLayout()
self.slider = QtGui.QSlider()
self.leftbutton = QtGui.QPushButton()
self.leftbutton.setText("<")
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.leftbutton.sizePolicy().hasHeightForWidth())
# self.leftbutton.setSizePolicy(sizePolicy)
self.leftbutton.clicked.connect(self._subone)
self.rightbutton = QtGui.QPushButton()
self.rightbutton.setText(">")
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.rightbutton.sizePolicy().hasHeightForWidth())
# self.rightbutton.setSizePolicy(sizePolicy)
self.rightbutton.clicked.connect(self._addone)
self.v = QtGui.QIntValidator()
self.box = QtGui.QLineEdit()
self.box.setValidator(self.v)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.box.sizePolicy().hasHeightForWidth())
# self.box.setSizePolicy(sizePolicy)
self.hLayout.addWidget(self.leftbutton)
self.hLayout.addWidget(self.slider)
self.hLayout.addWidget(self.box)
self.hLayout.addWidget(self.rightbutton)
self.setLayout(self.hLayout)
self.slider.valueChanged.connect(self._sliderChanged)
self.box.editingFinished.connect(self._textChanged)
self.setOrientation(QtCore.Qt.Horizontal)
# Connect release so tracking works as expected
self.slider.sliderReleased.connect(self._sliderReleased)
def _addone(self):
self.value = self.value + 1
self.valueChanged.emit(self.value)
def _subone(self):
self.value = self.value - 1
self.valueChanged.emit(self.value)
def _sliderReleased(self):
print('Released')
self.sliderReleased.emit(self.slider.value)
def setTracking(self, val):
print('Tracking set to {}'.format(val))
self._tracking = val
def setMaximum(self, val):
self.slider.setMaximum(val)
self.v.setRange(self.slider.minimum(), self.slider.maximum())
self.box.setValidator(self.v)
def setMinimum(self, val):
self.slider.setMinimum(val)
self.v.setRange(self.slider.minimum(), self.slider.maximum())
self.box.setValidator(self.v)
def _sliderChanged(self, val):
self.box.setText(str(val))
if self._tracking:
try:
self.slider.sliderReleased.disconnect()
except:
pass
self.valueChanged.emit(val)
else:
try:
self.slider.sliderReleased.disconnect()
except:
pass
self.slider.sliderReleased.connect(self._sliderChanged_notracking)
def _sliderChanged_notracking(self):
val = self.slider.value()
# print('Value to be emitted is {}'.format(val))
self.valueChanged.emit(val)
def _textChanged(self):
val = self.box.text()
self.slider.setValue(int(val))
self._sliderChanged_notracking()
def setOrientation(self, *args, **kwargs):
self.slider.setOrientation(*args, **kwargs)
def _getValue(self):
return self.slider.value()
def _setValue(self, val):
self.slider.setValue(val)
self.box.setText(str(val))
value = property(_getValue, _setValue)
def setValue(self, val):
self.slider.setValue(val)
self.box.setText(str(val))
# self.valueChanged.emit(val)
class Mpl_Plot(_FigureCanvas):
def __init__(self, parent=None):
# Initialize things
self.fig = _mpl.figure.Figure()
_FigureCanvas.__init__(self, self.fig)
_FigureCanvas.setSizePolicy(self, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
_FigureCanvas.updateGeometry(self)
# Create axes
self.ax = self.fig.add_subplot(111)
def plot(self, *args, **kwargs):
self.ax.clear()
self.ax.plot(*args, **kwargs)
self.ax.ticklabel_format(style='sci', scilimits=(0, 0), axis='y')
self.ax.figure.canvas.draw()
class Mpl_Image(QtGui.QWidget):
# Signal for when the rectangle is changed
rectChanged = QtCore.pyqtSignal(Rectangle)
def __init__(self, parent=None, rectbool = True, toolbarbool=False, image=None):
# Initialize things
QtGui.QWidget.__init__(self)
self.rectbool = rectbool
self._clim_min = 0
self._clim_max = 3600
self._pressed = False
# Add a vertical layout
self.vLayout = QtGui.QVBoxLayout()
# Add a figure
self.fig = _mpl.figure.Figure()
# Add a canvas containing the fig
self.canvas = _FigureCanvas(self.fig)
_FigureCanvas.setSizePolicy(self.canvas, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
_FigureCanvas.updateGeometry(self.canvas)
# Setup the layout
if toolbarbool:
self.toolbar = _NavigationToolbar(self.canvas, self)
self.toolbar.setMaximumHeight(20)
self.vLayout.addWidget(self.toolbar)
self.vLayout.addWidget(self.canvas)
self.setLayout(self.vLayout)
# Create axes
self.ax = self.fig.add_subplot(111)
# Include rectangle functionality
if rectbool:
self.fig.canvas.mpl_connect('button_press_event', self.on_press)
self.fig.canvas.mpl_connect('button_release_event', self.on_release)
self.Rectangle = Rectangle(
x = -10 ,
y = 0 ,
width = 0 ,
height = 3 ,
axes = self.ax
)
# Add image
self.image = image
def _get_img(self):
return self._image
def _set_img(self, image):
self.ax.clear()
self._image = image
if image is not None:
self._imgplot = self.ax.imshow(image, interpolation='none')
if self.rectbool:
self.ax.add_patch(self.Rectangle.get_rect())
# imagemax = _np.max(_np.max(image))
self.set_clim(self._clim_min, self._clim_max)
image = property(_get_img, _set_img)
def set_clim(self, clim_min, clim_max):
if self.image is not None:
self._clim_min = clim_min
self._clim_max = clim_max
self._imgplot.set_clim(clim_min, clim_max)
self.ax.figure.canvas.draw()
def on_press(self, event):
if self.toolbar._active is None:
self._pressed = True
self.x0 = event.xdata
self.y0 = event.ydata
logger.log(level=loggerlevel, msg='Pressed: x0: {}, y0: {}'.format(self.x0, self.y0))
def on_release(self, event):
if self._pressed:
self._pressed = False
print('release')
self.x1 = event.xdata
self.y1 = event.ydata
width = self.x1 - self.x0
height = self.y1 - self.y0
logger.log(level=loggerlevel, msg='Released: x0: {}, y0: {}, x1: {}, y1: {}, width: {}, height: {}'.format(
self.x0 ,
self.y0 ,
self.x1 ,
self.y1 ,
width ,
height
)
)
self.Rectangle.set_xy((self.x0, self.y0))
self.Rectangle.set_width(width)
self.Rectangle.set_height(height)
self.ax.figure.canvas.draw()
self.rectChanged.emit(self.Rectangle)
# print(self.rect)
def zoom_rect(self, border=None, border_px=None):
# ======================================
# Get x coordinates
# ======================================
x0 = self.Rectangle.get_x()
width = self.Rectangle.get_width()
x1 = x0+width
# ======================================
# Get y coordinates
# ======================================
y0 = self.Rectangle.get_y()
height = self.Rectangle.get_height()
y1 = y0+height
# ======================================
# Validate borders
# ======================================
if (border_px is None) and (border is not None):
xborder = border[0]*width
yborder = border[1]*height
elif (border_px is not None) and (border is None):
xborder = border_px[0]
yborder = border_px[1]
elif (border_px is None) and (border is None):
raise IOError('No border info specified!')
elif (border_px is not None) and (border is not None):
raise IOError('Too much border info specified, both border_px and border!')
else:
raise IOError('End of the line!')
# ======================================
# Add borders
# ======================================
x0 = x0 - xborder
x1 = x1 + xborder
y0 = y0 - yborder
y1 = y1 + yborder
# ======================================
# Validate coordinates to prevent
# unPythonic crash
# ======================================
if not ((0 <= x0 and x0 <= self.image.shape[1]) and (0 <= x1 and x1 <= self.image.shape[1])):
print('X issue')
print('Requested: x=({}, {})'.format(x0, x1))
x0 = 0
x1 = self.image.shape[1]
if not ((0 <= y0 and y0 <= self.image.shape[0]) and (0 <= y1 and y1 <= self.image.shape[0])):
print('y issue')
print('Requested: y=({}, {})'.format(y0, y1))
y0 = 0
y1 = self.image.shape[0]
# ======================================
# Set viewable area
# ======================================
self.ax.set_xlim(x0, x1)
self.ax.set_ylim(y0, y1)
# ======================================
# Redraw canvas to show updates
# ======================================
self.ax.figure.canvas.draw()
class Mpl_Image_Plus_Slider(QtGui.QWidget):
# def __init__(self, parent=None, **kwargs):
def __init__(self, parent=None, **kwargs):
# Initialize self as a widget
QtGui.QWidget.__init__(self, parent)
# Add a vertical layout with parent self
self.vLayout = QtGui.QVBoxLayout(self)
self.vLayout.setObjectName(_fromUtf8("vLayout"))
# Add an Mpl_Image widget to vLayout,
# save it to self._img
# Pass arguments through to Mpl_Image.
self._img = Mpl_Image(parent=parent, toolbarbool=True, **kwargs)
self._img.setObjectName(_fromUtf8("_img"))
self.vLayout.addWidget(self._img)
# Add a slider to vLayout,
# save it to self.max_slider
# self.max_slider = QtGui.QSlider(self)
self.max_slider = Slider_and_Text(self)
self.max_slider.setObjectName(_fromUtf8("max_slider"))
self.max_slider.setOrientation(QtCore.Qt.Horizontal)
self.vLayout.addWidget(self.max_slider)
# Setup slider to work with _img's clims
self.max_slider.valueChanged.connect(lambda val: self.set_clim(0, val))
def _get_image(self):
return self._img.image
def _set_image(self, image):
self._img.image = image
maximage = _np.max(_np.max(image))
self.max_slider.setMaximum(maximage)
image = property(_get_image, _set_image)
def _get_ax(self):
return self._img.ax
ax = property(_get_ax)
def _get_Rectangle(self):
return self._img.Rectangle
# def _set_rect(self, rect):
# self._img.rect(rect)
Rectangle = property(_get_Rectangle)
def zoom_rect(self, border=None, border_px=None):
self._img.zoom_rect(border, border_px)
def set_clim(self, *args, **kwargs):
self._img.set_clim(*args, **kwargs)
def setSliderValue(self, val):
self.max_slider.setValue(val)
|
mit
| -1,184,258,922,883,475,200 | -7,959,421,400,368,360,000 | 33.234848 | 119 | 0.570628 | false |
wangyixiaohuihui/spark2-annotation
|
python/pyspark/streaming/flume.py
|
1
|
6047
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
if sys.version >= "3":
from io import BytesIO
else:
from StringIO import StringIO
from py4j.protocol import Py4JJavaError
from pyspark.storagelevel import StorageLevel
from pyspark.serializers import PairDeserializer, NoOpSerializer, UTF8Deserializer, read_int
from pyspark.streaming import DStream
__all__ = ['FlumeUtils', 'utf8_decoder']
def utf8_decoder(s):
""" Decode the unicode as UTF-8 """
if s is None:
return None
return s.decode('utf-8')
class FlumeUtils(object):
@staticmethod
def createStream(ssc, hostname, port,
storageLevel=StorageLevel.MEMORY_AND_DISK_2,
enableDecompression=False,
bodyDecoder=utf8_decoder):
"""
Create an input stream that pulls events from Flume.
:param ssc: StreamingContext object
:param hostname: Hostname of the slave machine to which the flume data will be sent
:param port: Port of the slave machine to which the flume data will be sent
:param storageLevel: Storage level to use for storing the received objects
:param enableDecompression: Should netty server decompress input stream
:param bodyDecoder: A function used to decode body (default is utf8_decoder)
:return: A DStream object
"""
jlevel = ssc._sc._getJavaStorageLevel(storageLevel)
helper = FlumeUtils._get_helper(ssc._sc)
jstream = helper.createStream(ssc._jssc, hostname, port, jlevel, enableDecompression)
return FlumeUtils._toPythonDStream(ssc, jstream, bodyDecoder)
@staticmethod
def createPollingStream(ssc, addresses,
storageLevel=StorageLevel.MEMORY_AND_DISK_2,
maxBatchSize=1000,
parallelism=5,
bodyDecoder=utf8_decoder):
"""
Creates an input stream that is to be used with the Spark Sink deployed on a Flume agent.
This stream will poll the sink for data and will pull events as they are available.
:param ssc: StreamingContext object
:param addresses: List of (host, port)s on which the Spark Sink is running.
:param storageLevel: Storage level to use for storing the received objects
:param maxBatchSize: The maximum number of events to be pulled from the Spark sink
in a single RPC call
:param parallelism: Number of concurrent requests this stream should send to the sink.
Note that having a higher number of requests concurrently being pulled
will result in this stream using more threads
:param bodyDecoder: A function used to decode body (default is utf8_decoder)
:return: A DStream object
"""
jlevel = ssc._sc._getJavaStorageLevel(storageLevel)
hosts = []
ports = []
for (host, port) in addresses:
hosts.append(host)
ports.append(port)
helper = FlumeUtils._get_helper(ssc._sc)
jstream = helper.createPollingStream(
ssc._jssc, hosts, ports, jlevel, maxBatchSize, parallelism)
return FlumeUtils._toPythonDStream(ssc, jstream, bodyDecoder)
@staticmethod
def _toPythonDStream(ssc, jstream, bodyDecoder):
ser = PairDeserializer(NoOpSerializer(), NoOpSerializer())
stream = DStream(jstream, ssc, ser)
def func(event):
headersBytes = BytesIO(event[0]) if sys.version >= "3" else StringIO(event[0])
headers = {}
strSer = UTF8Deserializer()
for i in range(0, read_int(headersBytes)):
key = strSer.loads(headersBytes)
value = strSer.loads(headersBytes)
headers[key] = value
body = bodyDecoder(event[1])
return (headers, body)
return stream.map(func)
@staticmethod
def _get_helper(sc):
try:
return sc._jvm.org.apache.spark.streaming.flume.FlumeUtilsPythonHelper()
except TypeError as e:
if str(e) == "'JavaPackage' object is not callable":
FlumeUtils._printErrorMsg(sc)
raise
@staticmethod
def _printErrorMsg(sc):
print("""
________________________________________________________________________________________________
Spark Streaming's Flume libraries not found in class path. Try one of the following.
1. Include the Flume library and its dependencies with in the
spark-submit command as
$ bin/spark-submit --packages org.apache.spark:spark-streaming-flume:%s ...
2. Download the JAR of the artifact from Maven Central http://search.maven.org/,
Group Id = org.apache.spark, Artifact Id = spark-streaming-flume-assembly, Version = %s.
Then, include the jar in the spark-submit command as
$ bin/spark-submit --jars <spark-streaming-flume-assembly.jar> ...
________________________________________________________________________________________________
""" % (sc.version, sc.version))
|
apache-2.0
| 306,486,198,019,352,640 | 7,487,296,184,435,431,000 | 41.192857 | 99 | 0.61915 | false |
Alwnikrotikz/hooke
|
mfp_igor_scripts/FMjoin.py
|
1
|
1503
|
#!/usr/bin/env python
'''
FMjoin.py
Copies all .ibw files contained in a folder and its subfolders into a single folder. Useful for force maps.
Usage:
python FMjoin.py origindir destdir
Alberto Gomez-Casado (c) 2010, University of Twente (The Netherlands)
This program is released under the GNU General Public License version 2.
'''
import os
import shutil
import sys
def main(*args):
if len(sys.argv) < 2:
print 'You must at least specify origin and destination folders.'
return 0
origin=sys.argv[1]
dest=sys.argv[2]
if os.path.exists(origin):
if os.path.exists(dest):
if os.listdir(dest)!=[]:
print 'Destination folder is not empty! Use another folder.'
return 0
else:
print 'Destination folder does not exist, will create it'
os.mkdir(dest)
else:
print 'You provided a wrong origin folder name, try again.'
origin=os.path.abspath(origin)
dest=os.path.abspath(dest)
for root, dirs, files in os.walk(origin):
for filename in files:
if filename.split('.')[1]!="ibw":
continue
filepath=os.path.join(root,filename)
#to avoid overwriting, we collapse unique paths into filenames
rawdest=filepath.split(os.path.commonprefix([origin, filepath]))[1]
rawdest=rawdest.replace('/','') #for linux
rawdest=rawdest.replace('\\','') #for windows
destfile=os.path.join(dest,rawdest)
print 'Copying '+rawdest
shutil.copy(filepath,destfile)
return 0
if __name__ == '__main__':
sys.exit(main(*sys.argv))
|
lgpl-3.0
| -6,278,480,197,284,280,000 | -7,174,778,657,462,480,000 | 24.913793 | 107 | 0.69328 | false |
Yong-Lee/django
|
django/core/management/commands/squashmigrations.py
|
132
|
7265
|
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, connections, migrations
from django.db.migrations.loader import AmbiguityError, MigrationLoader
from django.db.migrations.migration import SwappableTuple
from django.db.migrations.optimizer import MigrationOptimizer
from django.db.migrations.writer import MigrationWriter
from django.utils import six
from django.utils.version import get_docs_version
class Command(BaseCommand):
help = "Squashes an existing set of migrations (from first until specified) into a single new one."
def add_arguments(self, parser):
parser.add_argument('app_label',
help='App label of the application to squash migrations for.')
parser.add_argument('migration_name',
help='Migrations will be squashed until and including this migration.')
parser.add_argument('--no-optimize', action='store_true', dest='no_optimize', default=False,
help='Do not try to optimize the squashed operations.')
parser.add_argument('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
def handle(self, **options):
self.verbosity = options.get('verbosity')
self.interactive = options.get('interactive')
app_label = options['app_label']
migration_name = options['migration_name']
no_optimize = options['no_optimize']
# Load the current graph state, check the app and migration they asked for exists
loader = MigrationLoader(connections[DEFAULT_DB_ALIAS])
if app_label not in loader.migrated_apps:
raise CommandError(
"App '%s' does not have migrations (so squashmigrations on "
"it makes no sense)" % app_label
)
try:
migration = loader.get_migration_by_prefix(app_label, migration_name)
except AmbiguityError:
raise CommandError(
"More than one migration matches '%s' in app '%s'. Please be "
"more specific." % (migration_name, app_label)
)
except KeyError:
raise CommandError(
"Cannot find a migration matching '%s' from app '%s'." %
(migration_name, app_label)
)
# Work out the list of predecessor migrations
migrations_to_squash = [
loader.get_migration(al, mn)
for al, mn in loader.graph.forwards_plan((migration.app_label, migration.name))
if al == migration.app_label
]
# Tell them what we're doing and optionally ask if we should proceed
if self.verbosity > 0 or self.interactive:
self.stdout.write(self.style.MIGRATE_HEADING("Will squash the following migrations:"))
for migration in migrations_to_squash:
self.stdout.write(" - %s" % migration.name)
if self.interactive:
answer = None
while not answer or answer not in "yn":
answer = six.moves.input("Do you wish to proceed? [yN] ")
if not answer:
answer = "n"
break
else:
answer = answer[0].lower()
if answer != "y":
return
# Load the operations from all those migrations and concat together,
# along with collecting external dependencies and detecting
# double-squashing
operations = []
dependencies = set()
for smigration in migrations_to_squash:
if smigration.replaces:
raise CommandError(
"You cannot squash squashed migrations! Please transition "
"it to a normal migration first: "
"https://docs.djangoproject.com/en/%s/topics/migrations/#squashing-migrations" % get_docs_version()
)
operations.extend(smigration.operations)
for dependency in smigration.dependencies:
if isinstance(dependency, SwappableTuple):
if settings.AUTH_USER_MODEL == dependency.setting:
dependencies.add(("__setting__", "AUTH_USER_MODEL"))
else:
dependencies.add(dependency)
elif dependency[0] != smigration.app_label:
dependencies.add(dependency)
if no_optimize:
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("(Skipping optimization.)"))
new_operations = operations
else:
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Optimizing..."))
optimizer = MigrationOptimizer()
new_operations = optimizer.optimize(operations, migration.app_label)
if self.verbosity > 0:
if len(new_operations) == len(operations):
self.stdout.write(" No optimizations possible.")
else:
self.stdout.write(
" Optimized from %s operations to %s operations." %
(len(operations), len(new_operations))
)
# Work out the value of replaces (any squashed ones we're re-squashing)
# need to feed their replaces into ours
replaces = []
for migration in migrations_to_squash:
if migration.replaces:
replaces.extend(migration.replaces)
else:
replaces.append((migration.app_label, migration.name))
# Make a new migration with those operations
subclass = type("Migration", (migrations.Migration, ), {
"dependencies": dependencies,
"operations": new_operations,
"replaces": replaces,
"initial": True,
})
new_migration = subclass("0001_squashed_%s" % migration.name, app_label)
# Write out the new migration file
writer = MigrationWriter(new_migration)
with open(writer.path, "wb") as fh:
fh.write(writer.as_string())
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Created new squashed migration %s" % writer.path))
self.stdout.write(" You should commit this migration but leave the old ones in place;")
self.stdout.write(" the new migration will be used for new installs. Once you are sure")
self.stdout.write(" all instances of the codebase have applied the migrations you squashed,")
self.stdout.write(" you can delete them.")
if writer.needs_manual_porting:
self.stdout.write(self.style.MIGRATE_HEADING("Manual porting required"))
self.stdout.write(" Your migrations contained functions that must be manually copied over,")
self.stdout.write(" as we could not safely copy their implementation.")
self.stdout.write(" See the comment at the top of the squashed migration for details.")
|
bsd-3-clause
| -8,829,258,377,856,775,000 | -209,692,287,117,221,800 | 46.48366 | 119 | 0.598624 | false |
russss/Diamond
|
src/collectors/dseopscenter/dseopscenter.py
|
16
|
7115
|
# coding=utf-8
"""
Collect the DataStax OpsCenter metrics
"""
import urllib2
import datetime
try:
import json
except ImportError:
import simplejson as json
import diamond.collector
class DseOpsCenterCollector(diamond.collector.Collector):
last_run_time = 0
column_families = None
last_schema_sync_time = 0
def get_default_config_help(self):
config_help = super(DseOpsCenterCollector,
self).get_default_config_help()
config_help.update({
'host': "",
'port': "",
'cluster_id': "Set cluster ID/name.\n",
'metrics': "You can list explicit metrics if you like,\n"
" by default all know metrics are included.\n",
'node_group': "Set node group name, any by default\n",
'default_tail_opts': "Chaning these is not recommended.",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(DseOpsCenterCollector, self).get_default_config()
metrics = [
'cf-bf-false-positives',
'cf-bf-false-ratio',
'cf-bf-space-used',
'cf-keycache-hit-rate',
'cf-keycache-hits',
'cf-keycache-requests',
'cf-live-disk-used',
'cf-live-sstables',
'cf-pending-tasks',
'cf-read-latency-op',
'cf-read-ops',
'cf-rowcache-hit-rate',
'cf-rowcache-hits',
'cf-rowcache-requests',
'cf-total-disk-used',
'cf-write-latency-op',
'cf-write-ops',
'cms-collection-count',
'cms-collection-time',
'data-load',
'heap-committed',
'heap-max',
'heap-used',
'key-cache-hit-rate',
'key-cache-hits',
'key-cache-requests',
'nonheap-committed',
'nonheap-max',
'nonheap-used',
'pending-compaction-tasks',
'pending-flush-sorter-tasks',
'pending-flushes',
'pending-gossip-tasks',
'pending-hinted-handoff',
'pending-internal-responses',
'pending-memtable-post-flushers',
'pending-migrations',
'pending-misc-tasks',
'pending-read-ops',
'pending-read-repair-tasks',
'pending-repair-tasks',
'pending-repl-on-write-tasks',
'pending-request-responses',
'pending-streams',
'pending-write-ops',
'read-latency-op',
'read-ops',
'row-cache-hit-rate',
'row-cache-hits',
'row-cache-requests',
'solr-avg-time-per-req',
'solr-errors',
'solr-requests',
'solr-timeouts',
'total-bytes-compacted',
'total-compactions-completed',
'write-latency-op',
'write-ops',
]
config.update({
'host': '127.0.0.1',
'port': 8888,
'path': 'cassandra',
'node_group': '*',
'metrics': ','.join(metrics),
'default_tail_opts': '&forecast=0&node_aggregation=1',
})
return config
def _get_schema(self):
time_now = int(datetime.datetime.utcnow().strftime('%s'))
if ((self.column_families is None or
(time_now - self.last_schema_sync_time < 3600))):
return False
url = 'http://%s:%i/%s/keyspaces' % (self.config['host'],
int(self.config['port']),
self.config['cluster_id'])
try:
response = urllib2.urlopen(url)
except Exception, err:
self.log.error('%s: %s', url, err)
return False
try:
result = json.load(response)
column_families = []
for ks in result:
i = []
for cf in result[ks]['column_families']:
i.append("%s.%s" % (ks, cf))
column_families.append(i)
self.column_families = ','.join(sum(column_families, []))
self.log.debug('DseOpsCenterCollector columnfamilies = %s',
self.column_families)
self.last_schema_sync_time = time_now
return True
except (TypeError, ValueError):
self.log.error(
"Unable to parse response from opscenter as a json object")
return False
def _get(self, start, end, step=60):
self._get_schema()
url = ('http://%s:%i/%s/new-metrics?node_group=%s&columnfamilies=%s'
'&metrics=%s&start=%i&end=%i&step=%i%s') % (
self.config['host'],
int(self.config['port']),
self.config['cluster_id'],
self.config['node_group'],
self.column_families,
self.config['metrics'],
start, end, step,
self.config['default_tail_opts'])
try:
response = urllib2.urlopen(url)
except Exception, err:
self.log.error('%s: %s', url, err)
return False
self.log.debug('DseOpsCenterCollector metrics url = %s', url)
try:
return json.load(response)
except (TypeError, ValueError):
self.log.error(
"Unable to parse response from opscenter as a json object")
return False
def collect(self):
metrics = {}
if json is None:
self.log.error('Unable to import json')
return None
time_now = int(datetime.datetime.utcnow().strftime('%s'))
self.log.debug('DseOpsCenterCollector last_run_time = %i',
self.last_run_time)
if self.last_run_time == 0:
self.last_run_time = time_now - 60
if time_now - self.last_run_time >= 60:
result = self._get(self.last_run_time, time_now)
self.last_run_time = time_now
if not result:
return None
self.log.debug('DseOpsCenterCollector result = %s', result)
for data in result['data'][self.config['node_group']]:
if data['data-points'][0][0] is not None:
if 'columnfamily' in data:
k = '.'.join([data['metric'],
data['columnfamily']])
metrics[k] = data['data-points'][0][0]
else:
metrics[data['metric']] = data['data-points'][0][0]
self.log.debug('DseOpsCenterCollector metrics = %s', metrics)
for key in metrics:
self.publish(key, metrics[key])
else:
self.log.debug(
"DseOpsCenterCollector can only run once every minute")
return None
|
mit
| -2,450,354,958,458,295,000 | -3,563,577,015,054,976,500 | 33.371981 | 76 | 0.493043 | false |
adityacs/ansible
|
lib/ansible/modules/network/avi/avi_virtualservice.py
|
8
|
20685
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 16.3.8
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'}
DOCUMENTATION = '''
---
module: avi_virtualservice
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of VirtualService Avi RESTful Object
description:
- This module is used to configure VirtualService object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
active_standby_se_tag:
description:
- This configuration only applies if the virtualservice is in legacy active standby ha mode and load distribution among active standby is enabled.
- This field is used to tag the virtualservice so that virtualservices with the same tag will share the same active serviceengine.
- Virtualservices with different tags will have different active serviceengines.
- If one of the serviceengine's in the serviceenginegroup fails, all virtualservices will end up using the same active serviceengine.
- Redistribution of the virtualservices can be either manual or automated when the failed serviceengine recovers.
- Redistribution is based on the auto redistribute property of the serviceenginegroup.
- Default value when not specified in API or module is interpreted by Avi Controller as ACTIVE_STANDBY_SE_1.
analytics_policy:
description:
- Determines analytics settings for the application.
analytics_profile_ref:
description:
- Specifies settings related to analytics.
- It is a reference to an object of type analyticsprofile.
application_profile_ref:
description:
- Enable application layer specific features for the virtual service.
- It is a reference to an object of type applicationprofile.
auto_allocate_floating_ip:
description:
- Auto-allocate floating/elastic ip from the cloud infrastructure.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
auto_allocate_ip:
description:
- Auto-allocate vip from the provided subnet.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
availability_zone:
description:
- Availability-zone to place the virtual service.
avi_allocated_fip:
description:
- (internal-use) fip allocated by avi in the cloud infrastructure.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
avi_allocated_vip:
description:
- (internal-use) vip allocated by avi in the cloud infrastructure.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
client_auth:
description:
- Http authentication configuration for protected resources.
cloud_config_cksum:
description:
- Checksum of cloud configuration for vs.
- Internally set by cloud connector.
cloud_ref:
description:
- It is a reference to an object of type cloud.
cloud_type:
description:
- Cloud_type of virtualservice.
- Default value when not specified in API or module is interpreted by Avi Controller as CLOUD_NONE.
connections_rate_limit:
description:
- Rate limit the incoming connections to this virtual service.
content_rewrite:
description:
- Profile used to match and rewrite strings in request and/or response body.
created_by:
description:
- Creator name.
delay_fairness:
description:
- Select the algorithm for qos fairness.
- This determines how multiple virtual services sharing the same service engines will prioritize traffic over a congested network.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
description:
description:
- User defined description for the object.
discovered_network_ref:
description:
- (internal-use) discovered networks providing reachability for client facing virtual service ip.
- This field is deprecated.
- It is a reference to an object of type network.
discovered_networks:
description:
- (internal-use) discovered networks providing reachability for client facing virtual service ip.
- This field is used internally by avi, not editable by the user.
discovered_subnet:
description:
- (internal-use) discovered subnets providing reachability for client facing virtual service ip.
- This field is deprecated.
dns_info:
description:
- Service discovery specific data including fully qualified domain name, type and time-to-live of the dns record.
- Note that only one of fqdn and dns_info setting is allowed.
east_west_placement:
description:
- Force placement on all se's in service group (mesos mode only).
- Default value when not specified in API or module is interpreted by Avi Controller as False.
enable_autogw:
description:
- Response traffic to clients will be sent back to the source mac address of the connection, rather than statically sent to a default gateway.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
enable_rhi:
description:
- Enable route health injection using the bgp config in the vrf context.
enable_rhi_snat:
description:
- Enable route health injection for source nat'ted floating ip address using the bgp config in the vrf context.
enabled:
description:
- Enable or disable the virtual service.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
floating_ip:
description:
- Floating ip to associate with this virtual service.
floating_subnet_uuid:
description:
- If auto_allocate_floating_ip is true and more than one floating-ip subnets exist, then the subnet for the floating ip address allocation.
- This field is applicable only if the virtualservice belongs to an openstack or aws cloud.
- In openstack or aws cloud it is required when auto_allocate_floating_ip is selected.
flow_dist:
description:
- Criteria for flow distribution among ses.
- Default value when not specified in API or module is interpreted by Avi Controller as LOAD_AWARE.
flow_label_type:
description:
- Criteria for flow labelling.
- Default value when not specified in API or module is interpreted by Avi Controller as NO_LABEL.
fqdn:
description:
- Dns resolvable, fully qualified domain name of the virtualservice.
- Only one of 'fqdn' and 'dns_info' configuration is allowed.
host_name_xlate:
description:
- Translate the host name sent to the servers to this value.
- Translate the host name sent from servers back to the value used by the client.
http_policies:
description:
- Http policies applied on the data traffic of the virtual service.
ign_pool_net_reach:
description:
- Ignore pool servers network reachability constraints for virtual service placement.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
ip_address:
description:
- Ip address of the virtual service.
ipam_network_subnet:
description:
- Subnet and/or network for allocating virtualservice ip by ipam provider module.
limit_doser:
description:
- Limit potential dos attackers who exceed max_cps_per_client significantly to a fraction of max_cps_per_client for a while.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
max_cps_per_client:
description:
- Maximum connections per second per client ip.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
microservice_ref:
description:
- Microservice representing the virtual service.
- It is a reference to an object of type microservice.
name:
description:
- Name for the virtual service.
required: true
network_profile_ref:
description:
- Determines network settings such as protocol, tcp or udp, and related options for the protocol.
- It is a reference to an object of type networkprofile.
network_ref:
description:
- Manually override the network on which the virtual service is placed.
- It is a reference to an object of type network.
network_security_policy_ref:
description:
- Network security policies for the virtual service.
- It is a reference to an object of type networksecuritypolicy.
performance_limits:
description:
- Optional settings that determine performance limits like max connections or bandwdith etc.
pool_group_ref:
description:
- The pool group is an object that contains pools.
- It is a reference to an object of type poolgroup.
pool_ref:
description:
- The pool is an object that contains destination servers and related attributes such as load-balancing and persistence.
- It is a reference to an object of type pool.
port_uuid:
description:
- (internal-use) network port assigned to the virtual service ip address.
remove_listening_port_on_vs_down:
description:
- Remove listening port if virtualservice is down.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
requests_rate_limit:
description:
- Rate limit the incoming requests to this virtual service.
scaleout_ecmp:
description:
- Disable re-distribution of flows across service engines for a virtual service.
- Enable if the network itself performs flow hashing with ecmp in environments such as gcp.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
se_group_ref:
description:
- The service engine group to use for this virtual service.
- Moving to a new se group is disruptive to existing connections for this vs.
- It is a reference to an object of type serviceenginegroup.
server_network_profile_ref:
description:
- Determines the network settings profile for the server side of tcp proxied connections.
- Leave blank to use the same settings as the client to vs side of the connection.
- It is a reference to an object of type networkprofile.
service_pool_select:
description:
- Select pool based on destination port.
services:
description:
- List of services defined for this virtual service.
snat_ip:
description:
- Nat'ted floating source ip address(es) for upstream connection to servers.
ssl_key_and_certificate_refs:
description:
- Select or create one or two certificates, ec and/or rsa, that will be presented to ssl/tls terminated connections.
- It is a reference to an object of type sslkeyandcertificate.
ssl_profile_ref:
description:
- Determines the set of ssl versions and ciphers to accept for ssl/tls terminated connections.
- It is a reference to an object of type sslprofile.
ssl_sess_cache_avg_size:
description:
- Expected number of ssl session cache entries (may be exceeded).
- Default value when not specified in API or module is interpreted by Avi Controller as 1024.
static_dns_records:
description:
- List of static dns records applied to this virtual service.
- These are static entries and no health monitoring is performed against the ip addresses.
subnet:
description:
- Subnet providing reachability for client facing virtual service ip.
subnet_uuid:
description:
- It represents subnet for the virtual service ip address allocation when auto_allocate_ip is true.it is only applicable in openstack or aws cloud.
- This field is required if auto_allocate_ip is true.
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Specify if this is a normal virtual service, or if it is the parent or child of an sni-enabled virtual hosted virtual service.
- Default value when not specified in API or module is interpreted by Avi Controller as VS_TYPE_NORMAL.
url:
description:
- Avi controller URL of the object.
use_bridge_ip_as_vip:
description:
- Use bridge ip as vip on each host in mesos deployments.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
uuid:
description:
- Uuid of the virtualservice.
vh_domain_name:
description:
- The exact name requested from the client's sni-enabled tls hello domain name field.
- If this is a match, the parent vs will forward the connection to this child vs.
vh_parent_vs_uuid:
description:
- Specifies the virtual service acting as virtual hosting (sni) parent.
vrf_context_ref:
description:
- Virtual routing context that the virtual service is bound to.
- This is used to provide the isolation of the set of networks the application is attached to.
- It is a reference to an object of type vrfcontext.
vs_datascripts:
description:
- Datascripts applied on the data traffic of the virtual service.
weight:
description:
- The quality of service weight to assign to traffic transmitted from this virtual service.
- A higher weight will prioritize traffic versus other virtual services sharing the same service engines.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create SSL Virtual Service using Pool testpool2
avi_virtualservice:
controller: 10.10.27.90
username: admin
password: AviNetworks123!
name: newtestvs
state: present
performance_limits:
max_concurrent_connections: 1000
services:
- port: 443
enable_ssl: true
- port: 80
ssl_profile_ref: '/api/sslprofile?name=System-Standard'
application_profile_ref: '/api/applicationprofile?name=System-Secure-HTTP'
ssl_key_and_certificate_refs:
- '/api/sslkeyandcertificate?name=System-Default-Cert'
ip_address:
addr: 10.90.131.103
type: V4
pool_ref: '/api/pool?name=testpool2'
'''
RETURN = '''
obj:
description: VirtualService (api/virtualservice) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
active_standby_se_tag=dict(type='str',),
analytics_policy=dict(type='dict',),
analytics_profile_ref=dict(type='str',),
application_profile_ref=dict(type='str',),
auto_allocate_floating_ip=dict(type='bool',),
auto_allocate_ip=dict(type='bool',),
availability_zone=dict(type='str',),
avi_allocated_fip=dict(type='bool',),
avi_allocated_vip=dict(type='bool',),
client_auth=dict(type='dict',),
cloud_config_cksum=dict(type='str',),
cloud_ref=dict(type='str',),
cloud_type=dict(type='str',),
connections_rate_limit=dict(type='dict',),
content_rewrite=dict(type='dict',),
created_by=dict(type='str',),
delay_fairness=dict(type='bool',),
description=dict(type='str',),
discovered_network_ref=dict(type='list',),
discovered_networks=dict(type='list',),
discovered_subnet=dict(type='list',),
dns_info=dict(type='list',),
east_west_placement=dict(type='bool',),
enable_autogw=dict(type='bool',),
enable_rhi=dict(type='bool',),
enable_rhi_snat=dict(type='bool',),
enabled=dict(type='bool',),
floating_ip=dict(type='dict',),
floating_subnet_uuid=dict(type='str',),
flow_dist=dict(type='str',),
flow_label_type=dict(type='str',),
fqdn=dict(type='str',),
host_name_xlate=dict(type='str',),
http_policies=dict(type='list',),
ign_pool_net_reach=dict(type='bool',),
ip_address=dict(type='dict',),
ipam_network_subnet=dict(type='dict',),
limit_doser=dict(type='bool',),
max_cps_per_client=dict(type='int',),
microservice_ref=dict(type='str',),
name=dict(type='str', required=True),
network_profile_ref=dict(type='str',),
network_ref=dict(type='str',),
network_security_policy_ref=dict(type='str',),
performance_limits=dict(type='dict',),
pool_group_ref=dict(type='str',),
pool_ref=dict(type='str',),
port_uuid=dict(type='str',),
remove_listening_port_on_vs_down=dict(type='bool',),
requests_rate_limit=dict(type='dict',),
scaleout_ecmp=dict(type='bool',),
se_group_ref=dict(type='str',),
server_network_profile_ref=dict(type='str',),
service_pool_select=dict(type='list',),
services=dict(type='list',),
snat_ip=dict(type='list',),
ssl_key_and_certificate_refs=dict(type='list',),
ssl_profile_ref=dict(type='str',),
ssl_sess_cache_avg_size=dict(type='int',),
static_dns_records=dict(type='list',),
subnet=dict(type='dict',),
subnet_uuid=dict(type='str',),
tenant_ref=dict(type='str',),
type=dict(type='str',),
url=dict(type='str',),
use_bridge_ip_as_vip=dict(type='bool',),
uuid=dict(type='str',),
vh_domain_name=dict(type='list',),
vh_parent_vs_uuid=dict(type='str',),
vrf_context_ref=dict(type='str',),
vs_datascripts=dict(type='list',),
weight=dict(type='int',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=16.3.5.post1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'virtualservice',
set([]))
if __name__ == '__main__':
main()
|
gpl-3.0
| 1,558,439,495,983,872,300 | 8,668,086,054,460,079,000 | 44.662252 | 159 | 0.652889 | false |
tpsatish95/Python-Workshop
|
Python Environment Setup/Alternate/1. Python/1. Installer/Python-3.4.0(Linux)/Tools/stringbench/stringbench.py
|
51
|
44018
|
# Various microbenchmarks comparing unicode and byte string performance
# Please keep this file both 2.x and 3.x compatible!
import timeit
import itertools
import operator
import re
import sys
import datetime
import optparse
VERSION = '2.0'
def p(*args):
sys.stdout.write(' '.join(str(s) for s in args) + '\n')
if sys.version_info >= (3,):
BYTES = bytes_from_str = lambda x: x.encode('ascii')
UNICODE = unicode_from_str = lambda x: x
else:
BYTES = bytes_from_str = lambda x: x
UNICODE = unicode_from_str = lambda x: x.decode('ascii')
class UnsupportedType(TypeError):
pass
p('stringbench v%s' % VERSION)
p(sys.version)
p(datetime.datetime.now())
REPEAT = 1
REPEAT = 3
#REPEAT = 7
if __name__ != "__main__":
raise SystemExit("Must run as main program")
parser = optparse.OptionParser()
parser.add_option("-R", "--skip-re", dest="skip_re",
action="store_true",
help="skip regular expression tests")
parser.add_option("-8", "--8-bit", dest="bytes_only",
action="store_true",
help="only do 8-bit string benchmarks")
parser.add_option("-u", "--unicode", dest="unicode_only",
action="store_true",
help="only do Unicode string benchmarks")
_RANGE_1000 = list(range(1000))
_RANGE_100 = list(range(100))
_RANGE_10 = list(range(10))
dups = {}
def bench(s, group, repeat_count):
def blah(f):
if f.__name__ in dups:
raise AssertionError("Multiple functions with same name: %r" %
(f.__name__,))
dups[f.__name__] = 1
f.comment = s
f.is_bench = True
f.group = group
f.repeat_count = repeat_count
return f
return blah
def uses_re(f):
f.uses_re = True
####### 'in' comparisons
@bench('"A" in "A"*1000', "early match, single character", 1000)
def in_test_quick_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("A")
for x in _RANGE_1000:
s2 in s1
@bench('"B" in "A"*1000', "no match, single character", 1000)
def in_test_no_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("B")
for x in _RANGE_1000:
s2 in s1
@bench('"AB" in "AB"*1000', "early match, two characters", 1000)
def in_test_quick_match_two_characters(STR):
s1 = STR("AB" * 1000)
s2 = STR("AB")
for x in _RANGE_1000:
s2 in s1
@bench('"BC" in "AB"*1000', "no match, two characters", 1000)
def in_test_no_match_two_character(STR):
s1 = STR("AB" * 1000)
s2 = STR("BC")
for x in _RANGE_1000:
s2 in s1
@bench('"BC" in ("AB"*300+"C")', "late match, two characters", 1000)
def in_test_slow_match_two_characters(STR):
s1 = STR("AB" * 300+"C")
s2 = STR("BC")
for x in _RANGE_1000:
s2 in s1
@bench('s="ABC"*33; (s+"E") in ((s+"D")*300+s+"E")',
"late match, 100 characters", 100)
def in_test_slow_match_100_characters(STR):
m = STR("ABC"*33)
d = STR("D")
e = STR("E")
s1 = (m+d)*300 + m+e
s2 = m+e
for x in _RANGE_100:
s2 in s1
# Try with regex
@uses_re
@bench('s="ABC"*33; re.compile(s+"D").search((s+"D")*300+s+"E")',
"late match, 100 characters", 100)
def re_test_slow_match_100_characters(STR):
m = STR("ABC"*33)
d = STR("D")
e = STR("E")
s1 = (m+d)*300 + m+e
s2 = m+e
pat = re.compile(s2)
search = pat.search
for x in _RANGE_100:
search(s1)
#### same tests as 'in' but use 'find'
@bench('("A"*1000).find("A")', "early match, single character", 1000)
def find_test_quick_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("A")
s1_find = s1.find
for x in _RANGE_1000:
s1_find(s2)
@bench('("A"*1000).find("B")', "no match, single character", 1000)
def find_test_no_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("B")
s1_find = s1.find
for x in _RANGE_1000:
s1_find(s2)
@bench('("AB"*1000).find("AB")', "early match, two characters", 1000)
def find_test_quick_match_two_characters(STR):
s1 = STR("AB" * 1000)
s2 = STR("AB")
s1_find = s1.find
for x in _RANGE_1000:
s1_find(s2)
@bench('("AB"*1000).find("BC")', "no match, two characters", 1000)
def find_test_no_match_two_character(STR):
s1 = STR("AB" * 1000)
s2 = STR("BC")
s1_find = s1.find
for x in _RANGE_1000:
s1_find(s2)
@bench('("AB"*1000).find("CA")', "no match, two characters", 1000)
def find_test_no_match_two_character_bis(STR):
s1 = STR("AB" * 1000)
s2 = STR("CA")
s1_find = s1.find
for x in _RANGE_1000:
s1_find(s2)
@bench('("AB"*300+"C").find("BC")', "late match, two characters", 1000)
def find_test_slow_match_two_characters(STR):
s1 = STR("AB" * 300+"C")
s2 = STR("BC")
s1_find = s1.find
for x in _RANGE_1000:
s1_find(s2)
@bench('("AB"*300+"CA").find("CA")', "late match, two characters", 1000)
def find_test_slow_match_two_characters_bis(STR):
s1 = STR("AB" * 300+"CA")
s2 = STR("CA")
s1_find = s1.find
for x in _RANGE_1000:
s1_find(s2)
@bench('s="ABC"*33; ((s+"D")*500+s+"E").find(s+"E")',
"late match, 100 characters", 100)
def find_test_slow_match_100_characters(STR):
m = STR("ABC"*33)
d = STR("D")
e = STR("E")
s1 = (m+d)*500 + m+e
s2 = m+e
s1_find = s1.find
for x in _RANGE_100:
s1_find(s2)
@bench('s="ABC"*33; ((s+"D")*500+"E"+s).find("E"+s)',
"late match, 100 characters", 100)
def find_test_slow_match_100_characters_bis(STR):
m = STR("ABC"*33)
d = STR("D")
e = STR("E")
s1 = (m+d)*500 + e+m
s2 = e+m
s1_find = s1.find
for x in _RANGE_100:
s1_find(s2)
#### Same tests for 'rfind'
@bench('("A"*1000).rfind("A")', "early match, single character", 1000)
def rfind_test_quick_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("A")
s1_rfind = s1.rfind
for x in _RANGE_1000:
s1_rfind(s2)
@bench('("A"*1000).rfind("B")', "no match, single character", 1000)
def rfind_test_no_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("B")
s1_rfind = s1.rfind
for x in _RANGE_1000:
s1_rfind(s2)
@bench('("AB"*1000).rfind("AB")', "early match, two characters", 1000)
def rfind_test_quick_match_two_characters(STR):
s1 = STR("AB" * 1000)
s2 = STR("AB")
s1_rfind = s1.rfind
for x in _RANGE_1000:
s1_rfind(s2)
@bench('("AB"*1000).rfind("BC")', "no match, two characters", 1000)
def rfind_test_no_match_two_character(STR):
s1 = STR("AB" * 1000)
s2 = STR("BC")
s1_rfind = s1.rfind
for x in _RANGE_1000:
s1_rfind(s2)
@bench('("AB"*1000).rfind("CA")', "no match, two characters", 1000)
def rfind_test_no_match_two_character_bis(STR):
s1 = STR("AB" * 1000)
s2 = STR("CA")
s1_rfind = s1.rfind
for x in _RANGE_1000:
s1_rfind(s2)
@bench('("C"+"AB"*300).rfind("CA")', "late match, two characters", 1000)
def rfind_test_slow_match_two_characters(STR):
s1 = STR("C" + "AB" * 300)
s2 = STR("CA")
s1_rfind = s1.rfind
for x in _RANGE_1000:
s1_rfind(s2)
@bench('("BC"+"AB"*300).rfind("BC")', "late match, two characters", 1000)
def rfind_test_slow_match_two_characters_bis(STR):
s1 = STR("BC" + "AB" * 300)
s2 = STR("BC")
s1_rfind = s1.rfind
for x in _RANGE_1000:
s1_rfind(s2)
@bench('s="ABC"*33; ("E"+s+("D"+s)*500).rfind("E"+s)',
"late match, 100 characters", 100)
def rfind_test_slow_match_100_characters(STR):
m = STR("ABC"*33)
d = STR("D")
e = STR("E")
s1 = e+m + (d+m)*500
s2 = e+m
s1_rfind = s1.rfind
for x in _RANGE_100:
s1_rfind(s2)
@bench('s="ABC"*33; (s+"E"+("D"+s)*500).rfind(s+"E")',
"late match, 100 characters", 100)
def rfind_test_slow_match_100_characters_bis(STR):
m = STR("ABC"*33)
d = STR("D")
e = STR("E")
s1 = m+e + (d+m)*500
s2 = m+e
s1_rfind = s1.rfind
for x in _RANGE_100:
s1_rfind(s2)
#### Now with index.
# Skip the ones which fail because that would include exception overhead.
@bench('("A"*1000).index("A")', "early match, single character", 1000)
def index_test_quick_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("A")
s1_index = s1.index
for x in _RANGE_1000:
s1_index(s2)
@bench('("AB"*1000).index("AB")', "early match, two characters", 1000)
def index_test_quick_match_two_characters(STR):
s1 = STR("AB" * 1000)
s2 = STR("AB")
s1_index = s1.index
for x in _RANGE_1000:
s1_index(s2)
@bench('("AB"*300+"C").index("BC")', "late match, two characters", 1000)
def index_test_slow_match_two_characters(STR):
s1 = STR("AB" * 300+"C")
s2 = STR("BC")
s1_index = s1.index
for x in _RANGE_1000:
s1_index(s2)
@bench('s="ABC"*33; ((s+"D")*500+s+"E").index(s+"E")',
"late match, 100 characters", 100)
def index_test_slow_match_100_characters(STR):
m = STR("ABC"*33)
d = STR("D")
e = STR("E")
s1 = (m+d)*500 + m+e
s2 = m+e
s1_index = s1.index
for x in _RANGE_100:
s1_index(s2)
#### Same for rindex
@bench('("A"*1000).rindex("A")', "early match, single character", 1000)
def rindex_test_quick_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("A")
s1_rindex = s1.rindex
for x in _RANGE_1000:
s1_rindex(s2)
@bench('("AB"*1000).rindex("AB")', "early match, two characters", 1000)
def rindex_test_quick_match_two_characters(STR):
s1 = STR("AB" * 1000)
s2 = STR("AB")
s1_rindex = s1.rindex
for x in _RANGE_1000:
s1_rindex(s2)
@bench('("C"+"AB"*300).rindex("CA")', "late match, two characters", 1000)
def rindex_test_slow_match_two_characters(STR):
s1 = STR("C" + "AB" * 300)
s2 = STR("CA")
s1_rindex = s1.rindex
for x in _RANGE_1000:
s1_rindex(s2)
@bench('s="ABC"*33; ("E"+s+("D"+s)*500).rindex("E"+s)',
"late match, 100 characters", 100)
def rindex_test_slow_match_100_characters(STR):
m = STR("ABC"*33)
d = STR("D")
e = STR("E")
s1 = e + m + (d+m)*500
s2 = e + m
s1_rindex = s1.rindex
for x in _RANGE_100:
s1_rindex(s2)
#### Same for partition
@bench('("A"*1000).partition("A")', "early match, single character", 1000)
def partition_test_quick_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("A")
s1_partition = s1.partition
for x in _RANGE_1000:
s1_partition(s2)
@bench('("A"*1000).partition("B")', "no match, single character", 1000)
def partition_test_no_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("B")
s1_partition = s1.partition
for x in _RANGE_1000:
s1_partition(s2)
@bench('("AB"*1000).partition("AB")', "early match, two characters", 1000)
def partition_test_quick_match_two_characters(STR):
s1 = STR("AB" * 1000)
s2 = STR("AB")
s1_partition = s1.partition
for x in _RANGE_1000:
s1_partition(s2)
@bench('("AB"*1000).partition("BC")', "no match, two characters", 1000)
def partition_test_no_match_two_character(STR):
s1 = STR("AB" * 1000)
s2 = STR("BC")
s1_partition = s1.partition
for x in _RANGE_1000:
s1_partition(s2)
@bench('("AB"*300+"C").partition("BC")', "late match, two characters", 1000)
def partition_test_slow_match_two_characters(STR):
s1 = STR("AB" * 300+"C")
s2 = STR("BC")
s1_partition = s1.partition
for x in _RANGE_1000:
s1_partition(s2)
@bench('s="ABC"*33; ((s+"D")*500+s+"E").partition(s+"E")',
"late match, 100 characters", 100)
def partition_test_slow_match_100_characters(STR):
m = STR("ABC"*33)
d = STR("D")
e = STR("E")
s1 = (m+d)*500 + m+e
s2 = m+e
s1_partition = s1.partition
for x in _RANGE_100:
s1_partition(s2)
#### Same for rpartition
@bench('("A"*1000).rpartition("A")', "early match, single character", 1000)
def rpartition_test_quick_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("A")
s1_rpartition = s1.rpartition
for x in _RANGE_1000:
s1_rpartition(s2)
@bench('("A"*1000).rpartition("B")', "no match, single character", 1000)
def rpartition_test_no_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("B")
s1_rpartition = s1.rpartition
for x in _RANGE_1000:
s1_rpartition(s2)
@bench('("AB"*1000).rpartition("AB")', "early match, two characters", 1000)
def rpartition_test_quick_match_two_characters(STR):
s1 = STR("AB" * 1000)
s2 = STR("AB")
s1_rpartition = s1.rpartition
for x in _RANGE_1000:
s1_rpartition(s2)
@bench('("AB"*1000).rpartition("BC")', "no match, two characters", 1000)
def rpartition_test_no_match_two_character(STR):
s1 = STR("AB" * 1000)
s2 = STR("BC")
s1_rpartition = s1.rpartition
for x in _RANGE_1000:
s1_rpartition(s2)
@bench('("C"+"AB"*300).rpartition("CA")', "late match, two characters", 1000)
def rpartition_test_slow_match_two_characters(STR):
s1 = STR("C" + "AB" * 300)
s2 = STR("CA")
s1_rpartition = s1.rpartition
for x in _RANGE_1000:
s1_rpartition(s2)
@bench('s="ABC"*33; ("E"+s+("D"+s)*500).rpartition("E"+s)',
"late match, 100 characters", 100)
def rpartition_test_slow_match_100_characters(STR):
m = STR("ABC"*33)
d = STR("D")
e = STR("E")
s1 = e + m + (d+m)*500
s2 = e + m
s1_rpartition = s1.rpartition
for x in _RANGE_100:
s1_rpartition(s2)
#### Same for split(s, 1)
@bench('("A"*1000).split("A", 1)', "early match, single character", 1000)
def split_test_quick_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("A")
s1_split = s1.split
for x in _RANGE_1000:
s1_split(s2, 1)
@bench('("A"*1000).split("B", 1)', "no match, single character", 1000)
def split_test_no_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("B")
s1_split = s1.split
for x in _RANGE_1000:
s1_split(s2, 1)
@bench('("AB"*1000).split("AB", 1)', "early match, two characters", 1000)
def split_test_quick_match_two_characters(STR):
s1 = STR("AB" * 1000)
s2 = STR("AB")
s1_split = s1.split
for x in _RANGE_1000:
s1_split(s2, 1)
@bench('("AB"*1000).split("BC", 1)', "no match, two characters", 1000)
def split_test_no_match_two_character(STR):
s1 = STR("AB" * 1000)
s2 = STR("BC")
s1_split = s1.split
for x in _RANGE_1000:
s1_split(s2, 1)
@bench('("AB"*300+"C").split("BC", 1)', "late match, two characters", 1000)
def split_test_slow_match_two_characters(STR):
s1 = STR("AB" * 300+"C")
s2 = STR("BC")
s1_split = s1.split
for x in _RANGE_1000:
s1_split(s2, 1)
@bench('s="ABC"*33; ((s+"D")*500+s+"E").split(s+"E", 1)',
"late match, 100 characters", 100)
def split_test_slow_match_100_characters(STR):
m = STR("ABC"*33)
d = STR("D")
e = STR("E")
s1 = (m+d)*500 + m+e
s2 = m+e
s1_split = s1.split
for x in _RANGE_100:
s1_split(s2, 1)
#### Same for rsplit(s, 1)
@bench('("A"*1000).rsplit("A", 1)', "early match, single character", 1000)
def rsplit_test_quick_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("A")
s1_rsplit = s1.rsplit
for x in _RANGE_1000:
s1_rsplit(s2, 1)
@bench('("A"*1000).rsplit("B", 1)', "no match, single character", 1000)
def rsplit_test_no_match_single_character(STR):
s1 = STR("A" * 1000)
s2 = STR("B")
s1_rsplit = s1.rsplit
for x in _RANGE_1000:
s1_rsplit(s2, 1)
@bench('("AB"*1000).rsplit("AB", 1)', "early match, two characters", 1000)
def rsplit_test_quick_match_two_characters(STR):
s1 = STR("AB" * 1000)
s2 = STR("AB")
s1_rsplit = s1.rsplit
for x in _RANGE_1000:
s1_rsplit(s2, 1)
@bench('("AB"*1000).rsplit("BC", 1)', "no match, two characters", 1000)
def rsplit_test_no_match_two_character(STR):
s1 = STR("AB" * 1000)
s2 = STR("BC")
s1_rsplit = s1.rsplit
for x in _RANGE_1000:
s1_rsplit(s2, 1)
@bench('("C"+"AB"*300).rsplit("CA", 1)', "late match, two characters", 1000)
def rsplit_test_slow_match_two_characters(STR):
s1 = STR("C" + "AB" * 300)
s2 = STR("CA")
s1_rsplit = s1.rsplit
for x in _RANGE_1000:
s1_rsplit(s2, 1)
@bench('s="ABC"*33; ("E"+s+("D"+s)*500).rsplit("E"+s, 1)',
"late match, 100 characters", 100)
def rsplit_test_slow_match_100_characters(STR):
m = STR("ABC"*33)
d = STR("D")
e = STR("E")
s1 = e + m + (d+m)*500
s2 = e + m
s1_rsplit = s1.rsplit
for x in _RANGE_100:
s1_rsplit(s2, 1)
#### Benchmark the operator-based methods
@bench('"A"*10', "repeat 1 character 10 times", 1000)
def repeat_single_10_times(STR):
s = STR("A")
for x in _RANGE_1000:
s * 10
@bench('"A"*1000', "repeat 1 character 1000 times", 1000)
def repeat_single_1000_times(STR):
s = STR("A")
for x in _RANGE_1000:
s * 1000
@bench('"ABCDE"*10', "repeat 5 characters 10 times", 1000)
def repeat_5_10_times(STR):
s = STR("ABCDE")
for x in _RANGE_1000:
s * 10
@bench('"ABCDE"*1000', "repeat 5 characters 1000 times", 1000)
def repeat_5_1000_times(STR):
s = STR("ABCDE")
for x in _RANGE_1000:
s * 1000
# + for concat
@bench('"Andrew"+"Dalke"', "concat two strings", 1000)
def concat_two_strings(STR):
s1 = STR("Andrew")
s2 = STR("Dalke")
for x in _RANGE_1000:
s1+s2
@bench('s1+s2+s3+s4+...+s20', "concat 20 strings of words length 4 to 15",
1000)
def concat_many_strings(STR):
s1=STR('TIXSGYNREDCVBHJ')
s2=STR('PUMTLXBZVDO')
s3=STR('FVZNJ')
s4=STR('OGDXUW')
s5=STR('WEIMRNCOYVGHKB')
s6=STR('FCQTNMXPUZH')
s7=STR('TICZJYRLBNVUEAK')
s8=STR('REYB')
s9=STR('PWUOQ')
s10=STR('EQHCMKBS')
s11=STR('AEVDFOH')
s12=STR('IFHVD')
s13=STR('JGTCNLXWOHQ')
s14=STR('ITSKEPYLROZAWXF')
s15=STR('THEK')
s16=STR('GHPZFBUYCKMNJIT')
s17=STR('JMUZ')
s18=STR('WLZQMTB')
s19=STR('KPADCBW')
s20=STR('TNJHZQAGBU')
for x in _RANGE_1000:
(s1 + s2+ s3+ s4+ s5+ s6+ s7+ s8+ s9+s10+
s11+s12+s13+s14+s15+s16+s17+s18+s19+s20)
#### Benchmark join
def get_bytes_yielding_seq(STR, arg):
if STR is BYTES and sys.version_info >= (3,):
raise UnsupportedType
return STR(arg)
@bench('"A".join("")',
"join empty string, with 1 character sep", 100)
def join_empty_single(STR):
sep = STR("A")
s2 = get_bytes_yielding_seq(STR, "")
sep_join = sep.join
for x in _RANGE_100:
sep_join(s2)
@bench('"ABCDE".join("")',
"join empty string, with 5 character sep", 100)
def join_empty_5(STR):
sep = STR("ABCDE")
s2 = get_bytes_yielding_seq(STR, "")
sep_join = sep.join
for x in _RANGE_100:
sep_join(s2)
@bench('"A".join("ABC..Z")',
"join string with 26 characters, with 1 character sep", 1000)
def join_alphabet_single(STR):
sep = STR("A")
s2 = get_bytes_yielding_seq(STR, "ABCDEFGHIJKLMnOPQRSTUVWXYZ")
sep_join = sep.join
for x in _RANGE_1000:
sep_join(s2)
@bench('"ABCDE".join("ABC..Z")',
"join string with 26 characters, with 5 character sep", 1000)
def join_alphabet_5(STR):
sep = STR("ABCDE")
s2 = get_bytes_yielding_seq(STR, "ABCDEFGHIJKLMnOPQRSTUVWXYZ")
sep_join = sep.join
for x in _RANGE_1000:
sep_join(s2)
@bench('"A".join(list("ABC..Z"))',
"join list of 26 characters, with 1 character sep", 1000)
def join_alphabet_list_single(STR):
sep = STR("A")
s2 = [STR(x) for x in "ABCDEFGHIJKLMnOPQRSTUVWXYZ"]
sep_join = sep.join
for x in _RANGE_1000:
sep_join(s2)
@bench('"ABCDE".join(list("ABC..Z"))',
"join list of 26 characters, with 5 character sep", 1000)
def join_alphabet_list_five(STR):
sep = STR("ABCDE")
s2 = [STR(x) for x in "ABCDEFGHIJKLMnOPQRSTUVWXYZ"]
sep_join = sep.join
for x in _RANGE_1000:
sep_join(s2)
@bench('"A".join(["Bob"]*100))',
"join list of 100 words, with 1 character sep", 1000)
def join_100_words_single(STR):
sep = STR("A")
s2 = [STR("Bob")]*100
sep_join = sep.join
for x in _RANGE_1000:
sep_join(s2)
@bench('"ABCDE".join(["Bob"]*100))',
"join list of 100 words, with 5 character sep", 1000)
def join_100_words_5(STR):
sep = STR("ABCDE")
s2 = [STR("Bob")]*100
sep_join = sep.join
for x in _RANGE_1000:
sep_join(s2)
#### split tests
@bench('("Here are some words. "*2).split()', "split whitespace (small)", 1000)
def whitespace_split(STR):
s = STR("Here are some words. "*2)
s_split = s.split
for x in _RANGE_1000:
s_split()
@bench('("Here are some words. "*2).rsplit()', "split whitespace (small)", 1000)
def whitespace_rsplit(STR):
s = STR("Here are some words. "*2)
s_rsplit = s.rsplit
for x in _RANGE_1000:
s_rsplit()
@bench('("Here are some words. "*2).split(None, 1)',
"split 1 whitespace", 1000)
def whitespace_split_1(STR):
s = STR("Here are some words. "*2)
s_split = s.split
N = None
for x in _RANGE_1000:
s_split(N, 1)
@bench('("Here are some words. "*2).rsplit(None, 1)',
"split 1 whitespace", 1000)
def whitespace_rsplit_1(STR):
s = STR("Here are some words. "*2)
s_rsplit = s.rsplit
N = None
for x in _RANGE_1000:
s_rsplit(N, 1)
@bench('("Here are some words. "*2).partition(" ")',
"split 1 whitespace", 1000)
def whitespace_partition(STR):
sep = STR(" ")
s = STR("Here are some words. "*2)
s_partition = s.partition
for x in _RANGE_1000:
s_partition(sep)
@bench('("Here are some words. "*2).rpartition(" ")',
"split 1 whitespace", 1000)
def whitespace_rpartition(STR):
sep = STR(" ")
s = STR("Here are some words. "*2)
s_rpartition = s.rpartition
for x in _RANGE_1000:
s_rpartition(sep)
human_text = """\
Python is a dynamic object-oriented programming language that can be
used for many kinds of software development. It offers strong support
for integration with other languages and tools, comes with extensive
standard libraries, and can be learned in a few days. Many Python
programmers report substantial productivity gains and feel the language
encourages the development of higher quality, more maintainable code.
Python runs on Windows, Linux/Unix, Mac OS X, Amiga, Palm
Handhelds, and Nokia mobile phones. Python has also been ported to the
Java and .NET virtual machines.
Python is distributed under an OSI-approved open source license that
makes it free to use, even for commercial products.
"""*25
human_text_bytes = bytes_from_str(human_text)
human_text_unicode = unicode_from_str(human_text)
def _get_human_text(STR):
if STR is UNICODE:
return human_text_unicode
if STR is BYTES:
return human_text_bytes
raise AssertionError
@bench('human_text.split()', "split whitespace (huge)", 10)
def whitespace_split_huge(STR):
s = _get_human_text(STR)
s_split = s.split
for x in _RANGE_10:
s_split()
@bench('human_text.rsplit()', "split whitespace (huge)", 10)
def whitespace_rsplit_huge(STR):
s = _get_human_text(STR)
s_rsplit = s.rsplit
for x in _RANGE_10:
s_rsplit()
@bench('"this\\nis\\na\\ntest\\n".split("\\n")', "split newlines", 1000)
def newlines_split(STR):
s = STR("this\nis\na\ntest\n")
s_split = s.split
nl = STR("\n")
for x in _RANGE_1000:
s_split(nl)
@bench('"this\\nis\\na\\ntest\\n".rsplit("\\n")', "split newlines", 1000)
def newlines_rsplit(STR):
s = STR("this\nis\na\ntest\n")
s_rsplit = s.rsplit
nl = STR("\n")
for x in _RANGE_1000:
s_rsplit(nl)
@bench('"this\\nis\\na\\ntest\\n".splitlines()', "split newlines", 1000)
def newlines_splitlines(STR):
s = STR("this\nis\na\ntest\n")
s_splitlines = s.splitlines
for x in _RANGE_1000:
s_splitlines()
## split text with 2000 newlines
def _make_2000_lines():
import random
r = random.Random(100)
chars = list(map(chr, range(32, 128)))
i = 0
while i < len(chars):
chars[i] = " "
i += r.randrange(9)
s = "".join(chars)
s = s*4
words = []
for i in range(2000):
start = r.randrange(96)
n = r.randint(5, 65)
words.append(s[start:start+n])
return "\n".join(words)+"\n"
_text_with_2000_lines = _make_2000_lines()
_text_with_2000_lines_bytes = bytes_from_str(_text_with_2000_lines)
_text_with_2000_lines_unicode = unicode_from_str(_text_with_2000_lines)
def _get_2000_lines(STR):
if STR is UNICODE:
return _text_with_2000_lines_unicode
if STR is BYTES:
return _text_with_2000_lines_bytes
raise AssertionError
@bench('"...text...".split("\\n")', "split 2000 newlines", 10)
def newlines_split_2000(STR):
s = _get_2000_lines(STR)
s_split = s.split
nl = STR("\n")
for x in _RANGE_10:
s_split(nl)
@bench('"...text...".rsplit("\\n")', "split 2000 newlines", 10)
def newlines_rsplit_2000(STR):
s = _get_2000_lines(STR)
s_rsplit = s.rsplit
nl = STR("\n")
for x in _RANGE_10:
s_rsplit(nl)
@bench('"...text...".splitlines()', "split 2000 newlines", 10)
def newlines_splitlines_2000(STR):
s = _get_2000_lines(STR)
s_splitlines = s.splitlines
for x in _RANGE_10:
s_splitlines()
## split text on "--" characters
@bench(
'"this--is--a--test--of--the--emergency--broadcast--system".split("--")',
"split on multicharacter separator (small)", 1000)
def split_multichar_sep_small(STR):
s = STR("this--is--a--test--of--the--emergency--broadcast--system")
s_split = s.split
pat = STR("--")
for x in _RANGE_1000:
s_split(pat)
@bench(
'"this--is--a--test--of--the--emergency--broadcast--system".rsplit("--")',
"split on multicharacter separator (small)", 1000)
def rsplit_multichar_sep_small(STR):
s = STR("this--is--a--test--of--the--emergency--broadcast--system")
s_rsplit = s.rsplit
pat = STR("--")
for x in _RANGE_1000:
s_rsplit(pat)
## split dna text on "ACTAT" characters
@bench('dna.split("ACTAT")',
"split on multicharacter separator (dna)", 10)
def split_multichar_sep_dna(STR):
s = _get_dna(STR)
s_split = s.split
pat = STR("ACTAT")
for x in _RANGE_10:
s_split(pat)
@bench('dna.rsplit("ACTAT")',
"split on multicharacter separator (dna)", 10)
def rsplit_multichar_sep_dna(STR):
s = _get_dna(STR)
s_rsplit = s.rsplit
pat = STR("ACTAT")
for x in _RANGE_10:
s_rsplit(pat)
## split with limits
GFF3_example = "\t".join([
"I", "Genomic_canonical", "region", "357208", "396183", ".", "+", ".",
"ID=Sequence:R119;note=Clone R119%3B Genbank AF063007;Name=R119"])
@bench('GFF3_example.split("\\t")', "tab split", 1000)
def tab_split_no_limit(STR):
sep = STR("\t")
s = STR(GFF3_example)
s_split = s.split
for x in _RANGE_1000:
s_split(sep)
@bench('GFF3_example.split("\\t", 8)', "tab split", 1000)
def tab_split_limit(STR):
sep = STR("\t")
s = STR(GFF3_example)
s_split = s.split
for x in _RANGE_1000:
s_split(sep, 8)
@bench('GFF3_example.rsplit("\\t")', "tab split", 1000)
def tab_rsplit_no_limit(STR):
sep = STR("\t")
s = STR(GFF3_example)
s_rsplit = s.rsplit
for x in _RANGE_1000:
s_rsplit(sep)
@bench('GFF3_example.rsplit("\\t", 8)', "tab split", 1000)
def tab_rsplit_limit(STR):
sep = STR("\t")
s = STR(GFF3_example)
s_rsplit = s.rsplit
for x in _RANGE_1000:
s_rsplit(sep, 8)
#### Count characters
@bench('...text.with.2000.newlines.count("\\n")',
"count newlines", 10)
def count_newlines(STR):
s = _get_2000_lines(STR)
s_count = s.count
nl = STR("\n")
for x in _RANGE_10:
s_count(nl)
# Orchid sequences concatenated, from Biopython
_dna = """
CGTAACAAGGTTTCCGTAGGTGAACCTGCGGAAGGATCATTGTTGAGATCACATAATAATTGATCGGGTT
AATCTGGAGGATCTGTTTACTTTGGTCACCCATGAGCATTTGCTGTTGAAGTGACCTAGAATTGCCATCG
AGCCTCCTTGGGAGCTTTCTTGTTGGCGAGATCTAAACCCTTGCCCGGCGCAGTTTTGCTCCAAGTCGTT
TGACACATAATTGGTGAAGGGGGTGGCATCCTTCCCTGACCCTCCCCCAACTATTTTTTTAACAACTCTC
AGCAACGGAGACTCAGTCTTCGGCAAATGCGATAAATGGTGTGAATTGCAGAATCCCGTGCACCATCGAG
TCTTTGAACGCAAGTTGCGCCCGAGGCCATCAGGCCAAGGGCACGCCTGCCTGGGCATTGCGAGTCATAT
CTCTCCCTTAACGAGGCTGTCCATACATACTGTTCAGCCGGTGCGGATGTGAGTTTGGCCCCTTGTTCTT
TGGTACGGGGGGTCTAAGAGCTGCATGGGCTTTTGATGGTCCTAAATACGGCAAGAGGTGGACGAACTAT
GCTACAACAAAATTGTTGTGCAGAGGCCCCGGGTTGTCGTATTAGATGGGCCACCGTAATCTGAAGACCC
TTTTGAACCCCATTGGAGGCCCATCAACCCATGATCAGTTGATGGCCATTTGGTTGCGACCCCAGGTCAG
GTGAGCAACAGCTGTCGTAACAAGGTTTCCGTAGGGTGAACTGCGGAAGGATCATTGTTGAGATCACATA
ATAATTGATCGAGTTAATCTGGAGGATCTGTTTACTTGGGTCACCCATGGGCATTTGCTGTTGAAGTGAC
CTAGATTTGCCATCGAGCCTCCTTGGGAGCATCCTTGTTGGCGATATCTAAACCCTCAATTTTTCCCCCA
ATCAAATTACACAAAATTGGTGGAGGGGGTGGCATTCTTCCCTTACCCTCCCCCAAATATTTTTTTAACA
ACTCTCAGCAACGGATATCTCAGCTCTTGCATCGATGAAGAACCCACCGAAATGCGATAAATGGTGTGAA
TTGCAGAATCCCGTGAACCATCGAGTCTTTGAACGCAAGTTGCGCCCGAGGCCATCAGGCCAAGGGCACG
CCTGCCTGGGCATTGCGAGTCATATCTCTCCCTTAACGAGGCTGTCCATACATACTGTTCAGCCGGTGCG
GATGTGAGTTTGGCCCCTTGTTCTTTGGTACGGGGGGTCTAAGAGATGCATGGGCTTTTGATGGTCCTAA
ATACGGCAAGAGGTGGACGAACTATGCTACAACAAAATTGTTGTGCAAAGGCCCCGGGTTGTCGTATAAG
ATGGGCCACCGATATCTGAAGACCCTTTTGGACCCCATTGGAGCCCATCAACCCATGTCAGTTGATGGCC
ATTCGTAACAAGGTTTCCGTAGGTGAACCTGCGGAAGGATCATTGTTGAGATCACATAATAATTGATCGA
GTTAATCTGGAGGATCTGTTTACTTGGGTCACCCATGGGCATTTGCTGTTGAAGTGACCTAGATTTGCCA
TCGAGCCTCCTTGGGAGCTTTCTTGTTGGCGATATCTAAACCCTTGCCCGGCAGAGTTTTGGGAATCCCG
TGAACCATCGAGTCTTTGAACGCAAGTTGCGCCCGAGGCCATCAGGCCAAGGGCACGCCTGCCTGGGCAT
TGCGAGTCATATCTCTCCCTTAACGAGGCTGTCCATACACACCTGTTCAGCCGGTGCGGATGTGAGTTTG
GCCCCTTGTTCTTTGGTACGGGGGGTCTAAGAGCTGCATGGGCTTTTGATGGTCCTAAATACGGCAAGAG
GTGGACGAACTATGCTACAACAAAATTGTTGTGCAAAGGCCCCGGGTTGTCGTATTAGATGGGCCACCAT
AATCTGAAGACCCTTTTGAACCCCATTGGAGGCCCATCAACCCATGATCAGTTGATGGCCATTTGGTTGC
GACCCAGTCAGGTGAGGGTAGGTGAACCTGCGGAAGGATCATTGTTGAGATCACATAATAATTGATCGAG
TTAATCTGGAGGATCTGTTTACTTTGGTCACCCATGGGCATTTGCTGTTGAAGTGACCTAGATTTGCCAT
CGAGCCTCCTTGGGAGCTTTCTTGTTGGCGAGATCTAAACCCTTGCCCGGCGGAGTTTGGCGCCAAGTCA
TATGACACATAATTGGTGAAGGGGGTGGCATCCTGCCCTGACCCTCCCCAAATTATTTTTTTAACAACTC
TCAGCAACGGATATCTCGGCTCTTGCATCGATGAAGAACGCAGCGAAATGCGATAAATGGTGTGAATTGC
AGAATCCCGTGAACCATCGAGTCTTTGGAACGCAAGTTGCGCCCGAGGCCATCAGGCCAAGGGCACGCCT
GCCTGGGCATTGGGAATCATATCTCTCCCCTAACGAGGCTATCCAAACATACTGTTCATCCGGTGCGGAT
GTGAGTTTGGCCCCTTGTTCTTTGGTACCGGGGGTCTAAGAGCTGCATGGGCATTTGATGGTCCTCAAAA
CGGCAAGAGGTGGACGAACTATGCCACAACAAAATTGTTGTCCCAAGGCCCCGGGTTGTCGTATTAGATG
GGCCACCGTAACCTGAAGACCCTTTTGAACCCCATTGGAGGCCCATCAACCCATGATCAGTTGATGACCA
TTTGTTGCGACCCCAGTCAGCTGAGCAACCCGCTGAGTGGAAGGTCATTGCCGATATCACATAATAATTG
ATCGAGTTAATCTGGAGGATCTGTTTACTTGGTCACCCATGAGCATTTGCTGTTGAAGTGACCTAGATTT
GCCATCGAGCCTCCTTGGGAGTTTTCTTGTTGGCGAGATCTAAACCCTTGCCCGGCGGAGTTGTGCGCCA
AGTCATATGACACATAATTGGTGAAGGGGGTGGCATCCTGCCCTGACCCTCCCCAAATTATTTTTTTAAC
AACTCTCAGCAACGGATATCTCGGCTCTTGCATCGATGAAGAACGCAGCGAAATGCGATAAATGGTGTGA
ATTGCAGAATCCCGTGAACCATCGAGTCTTTGAACGCAAGTTGCGCCCGAGGCCATCAGGCCAAGGGCAC
GCCTGCCTGGGCATTGCGAGTCATATCTCTCCCTTAACGAGGCTGTCCATACATACTGTTCATCCGGTGC
GGATGTGAGTTTGGCCCCTTGTTCTTTGGTACGGGGGGTCTAAGAGCTGCATGGGCATTTGATGGTCCTC
AAAACGGCAAGAGGTGGACGAACTATGCTACAACCAAATTGTTGTCCCAAGGCCCCGGGTTGTCGTATTA
GATGGGCCACCGTAACCTGAAGACCCTTTTGAACCCCATTGGAGGCCCATCAACCCATGATCAGTTGATG
ACCATGTGTTGCGACCCCAGTCAGCTGAGCAACGCGCTGAGCGTAACAAGGTTTCCGTAGGTGGACCTCC
GGGAGGATCATTGTTGAGATCACATAATAATTGATCGAGGTAATCTGGAGGATCTGCATATTTTGGTCAC
"""
_dna = "".join(_dna.splitlines())
_dna = _dna * 25
_dna_bytes = bytes_from_str(_dna)
_dna_unicode = unicode_from_str(_dna)
def _get_dna(STR):
if STR is UNICODE:
return _dna_unicode
if STR is BYTES:
return _dna_bytes
raise AssertionError
@bench('dna.count("AACT")', "count AACT substrings in DNA example", 10)
def count_aact(STR):
seq = _get_dna(STR)
seq_count = seq.count
needle = STR("AACT")
for x in _RANGE_10:
seq_count(needle)
##### startswith and endswith
@bench('"Andrew".startswith("A")', 'startswith single character', 1000)
def startswith_single(STR):
s1 = STR("Andrew")
s2 = STR("A")
s1_startswith = s1.startswith
for x in _RANGE_1000:
s1_startswith(s2)
@bench('"Andrew".startswith("Andrew")', 'startswith multiple characters',
1000)
def startswith_multiple(STR):
s1 = STR("Andrew")
s2 = STR("Andrew")
s1_startswith = s1.startswith
for x in _RANGE_1000:
s1_startswith(s2)
@bench('"Andrew".startswith("Anders")',
'startswith multiple characters - not!', 1000)
def startswith_multiple_not(STR):
s1 = STR("Andrew")
s2 = STR("Anders")
s1_startswith = s1.startswith
for x in _RANGE_1000:
s1_startswith(s2)
# endswith
@bench('"Andrew".endswith("w")', 'endswith single character', 1000)
def endswith_single(STR):
s1 = STR("Andrew")
s2 = STR("w")
s1_endswith = s1.endswith
for x in _RANGE_1000:
s1_endswith(s2)
@bench('"Andrew".endswith("Andrew")', 'endswith multiple characters', 1000)
def endswith_multiple(STR):
s1 = STR("Andrew")
s2 = STR("Andrew")
s1_endswith = s1.endswith
for x in _RANGE_1000:
s1_endswith(s2)
@bench('"Andrew".endswith("Anders")',
'endswith multiple characters - not!', 1000)
def endswith_multiple_not(STR):
s1 = STR("Andrew")
s2 = STR("Anders")
s1_endswith = s1.endswith
for x in _RANGE_1000:
s1_endswith(s2)
#### Strip
@bench('"Hello!\\n".strip()', 'strip terminal newline', 1000)
def terminal_newline_strip_right(STR):
s = STR("Hello!\n")
s_strip = s.strip
for x in _RANGE_1000:
s_strip()
@bench('"Hello!\\n".rstrip()', 'strip terminal newline', 1000)
def terminal_newline_rstrip(STR):
s = STR("Hello!\n")
s_rstrip = s.rstrip
for x in _RANGE_1000:
s_rstrip()
@bench('"\\nHello!".strip()', 'strip terminal newline', 1000)
def terminal_newline_strip_left(STR):
s = STR("\nHello!")
s_strip = s.strip
for x in _RANGE_1000:
s_strip()
@bench('"\\nHello!\\n".strip()', 'strip terminal newline', 1000)
def terminal_newline_strip_both(STR):
s = STR("\nHello!\n")
s_strip = s.strip
for x in _RANGE_1000:
s_strip()
@bench('"\\nHello!".rstrip()', 'strip terminal newline', 1000)
def terminal_newline_lstrip(STR):
s = STR("\nHello!")
s_lstrip = s.lstrip
for x in _RANGE_1000:
s_lstrip()
@bench('s="Hello!\\n"; s[:-1] if s[-1]=="\\n" else s',
'strip terminal newline', 1000)
def terminal_newline_if_else(STR):
s = STR("Hello!\n")
NL = STR("\n")
for x in _RANGE_1000:
s[:-1] if (s[-1] == NL) else s
# Strip multiple spaces or tabs
@bench('"Hello\\t \\t".strip()', 'strip terminal spaces and tabs', 1000)
def terminal_space_strip(STR):
s = STR("Hello\t \t!")
s_strip = s.strip
for x in _RANGE_1000:
s_strip()
@bench('"Hello\\t \\t".rstrip()', 'strip terminal spaces and tabs', 1000)
def terminal_space_rstrip(STR):
s = STR("Hello!\t \t")
s_rstrip = s.rstrip
for x in _RANGE_1000:
s_rstrip()
@bench('"\\t \\tHello".rstrip()', 'strip terminal spaces and tabs', 1000)
def terminal_space_lstrip(STR):
s = STR("\t \tHello!")
s_lstrip = s.lstrip
for x in _RANGE_1000:
s_lstrip()
#### replace
@bench('"This is a test".replace(" ", "\\t")', 'replace single character',
1000)
def replace_single_character(STR):
s = STR("This is a test!")
from_str = STR(" ")
to_str = STR("\t")
s_replace = s.replace
for x in _RANGE_1000:
s_replace(from_str, to_str)
@uses_re
@bench('re.sub(" ", "\\t", "This is a test"', 'replace single character',
1000)
def replace_single_character_re(STR):
s = STR("This is a test!")
pat = re.compile(STR(" "))
to_str = STR("\t")
pat_sub = pat.sub
for x in _RANGE_1000:
pat_sub(to_str, s)
@bench('"...text.with.2000.lines...replace("\\n", " ")',
'replace single character, big string', 10)
def replace_single_character_big(STR):
s = _get_2000_lines(STR)
from_str = STR("\n")
to_str = STR(" ")
s_replace = s.replace
for x in _RANGE_10:
s_replace(from_str, to_str)
@uses_re
@bench('re.sub("\\n", " ", "...text.with.2000.lines...")',
'replace single character, big string', 10)
def replace_single_character_big_re(STR):
s = _get_2000_lines(STR)
pat = re.compile(STR("\n"))
to_str = STR(" ")
pat_sub = pat.sub
for x in _RANGE_10:
pat_sub(to_str, s)
@bench('dna.replace("ATC", "ATT")',
'replace multiple characters, dna', 10)
def replace_multiple_characters_dna(STR):
seq = _get_dna(STR)
from_str = STR("ATC")
to_str = STR("ATT")
seq_replace = seq.replace
for x in _RANGE_10:
seq_replace(from_str, to_str)
# This increases the character count
@bench('"...text.with.2000.newlines...replace("\\n", "\\r\\n")',
'replace and expand multiple characters, big string', 10)
def replace_multiple_character_big(STR):
s = _get_2000_lines(STR)
from_str = STR("\n")
to_str = STR("\r\n")
s_replace = s.replace
for x in _RANGE_10:
s_replace(from_str, to_str)
# This decreases the character count
@bench('"When shall we three meet again?".replace("ee", "")',
'replace/remove multiple characters', 1000)
def replace_multiple_character_remove(STR):
s = STR("When shall we three meet again?")
from_str = STR("ee")
to_str = STR("")
s_replace = s.replace
for x in _RANGE_1000:
s_replace(from_str, to_str)
big_s = "A" + ("Z"*128*1024)
big_s_bytes = bytes_from_str(big_s)
big_s_unicode = unicode_from_str(big_s)
def _get_big_s(STR):
if STR is UNICODE: return big_s_unicode
if STR is BYTES: return big_s_bytes
raise AssertionError
# The older replace implementation counted all matches in
# the string even when it only needed to make one replacement.
@bench('("A" + ("Z"*128*1024)).replace("A", "BB", 1)',
'quick replace single character match', 10)
def quick_replace_single_match(STR):
s = _get_big_s(STR)
from_str = STR("A")
to_str = STR("BB")
s_replace = s.replace
for x in _RANGE_10:
s_replace(from_str, to_str, 1)
@bench('("A" + ("Z"*128*1024)).replace("AZZ", "BBZZ", 1)',
'quick replace multiple character match', 10)
def quick_replace_multiple_match(STR):
s = _get_big_s(STR)
from_str = STR("AZZ")
to_str = STR("BBZZ")
s_replace = s.replace
for x in _RANGE_10:
s_replace(from_str, to_str, 1)
####
# CCP does a lot of this, for internationalisation of ingame messages.
_format = "The %(thing)s is %(place)s the %(location)s."
_format_dict = { "thing":"THING", "place":"PLACE", "location":"LOCATION", }
_format_bytes = bytes_from_str(_format)
_format_unicode = unicode_from_str(_format)
_format_dict_bytes = dict((bytes_from_str(k), bytes_from_str(v)) for (k,v) in _format_dict.items())
_format_dict_unicode = dict((unicode_from_str(k), unicode_from_str(v)) for (k,v) in _format_dict.items())
def _get_format(STR):
if STR is UNICODE:
return _format_unicode
if STR is BYTES:
if sys.version_info >= (3,):
raise UnsupportedType
return _format_bytes
raise AssertionError
def _get_format_dict(STR):
if STR is UNICODE:
return _format_dict_unicode
if STR is BYTES:
if sys.version_info >= (3,):
raise UnsupportedType
return _format_dict_bytes
raise AssertionError
# Formatting.
@bench('"The %(k1)s is %(k2)s the %(k3)s."%{"k1":"x","k2":"y","k3":"z",}',
'formatting a string type with a dict', 1000)
def format_with_dict(STR):
s = _get_format(STR)
d = _get_format_dict(STR)
for x in _RANGE_1000:
s % d
#### Upper- and lower- case conversion
@bench('("Where in the world is Carmen San Deigo?"*10).lower()',
"case conversion -- rare", 1000)
def lower_conversion_rare(STR):
s = STR("Where in the world is Carmen San Deigo?"*10)
s_lower = s.lower
for x in _RANGE_1000:
s_lower()
@bench('("WHERE IN THE WORLD IS CARMEN SAN DEIGO?"*10).lower()',
"case conversion -- dense", 1000)
def lower_conversion_dense(STR):
s = STR("WHERE IN THE WORLD IS CARMEN SAN DEIGO?"*10)
s_lower = s.lower
for x in _RANGE_1000:
s_lower()
@bench('("wHERE IN THE WORLD IS cARMEN sAN dEIGO?"*10).upper()',
"case conversion -- rare", 1000)
def upper_conversion_rare(STR):
s = STR("Where in the world is Carmen San Deigo?"*10)
s_upper = s.upper
for x in _RANGE_1000:
s_upper()
@bench('("where in the world is carmen san deigo?"*10).upper()',
"case conversion -- dense", 1000)
def upper_conversion_dense(STR):
s = STR("where in the world is carmen san deigo?"*10)
s_upper = s.upper
for x in _RANGE_1000:
s_upper()
# end of benchmarks
#################
class BenchTimer(timeit.Timer):
def best(self, repeat=1):
for i in range(1, 10):
number = 10**i
x = self.timeit(number)
if x > 0.02:
break
times = [x]
for i in range(1, repeat):
times.append(self.timeit(number))
return min(times) / number
def main():
(options, test_names) = parser.parse_args()
if options.bytes_only and options.unicode_only:
raise SystemExit("Only one of --8-bit and --unicode are allowed")
bench_functions = []
for (k,v) in globals().items():
if hasattr(v, "is_bench"):
if test_names:
for name in test_names:
if name in v.group:
break
else:
# Not selected, ignore
continue
if options.skip_re and hasattr(v, "uses_re"):
continue
bench_functions.append( (v.group, k, v) )
bench_functions.sort()
p("bytes\tunicode")
p("(in ms)\t(in ms)\t%\tcomment")
bytes_total = uni_total = 0.0
for title, group in itertools.groupby(bench_functions,
operator.itemgetter(0)):
# Flush buffer before each group
sys.stdout.flush()
p("="*10, title)
for (_, k, v) in group:
if hasattr(v, "is_bench"):
bytes_time = 0.0
bytes_time_s = " - "
if not options.unicode_only:
try:
bytes_time = BenchTimer("__main__.%s(__main__.BYTES)" % (k,),
"import __main__").best(REPEAT)
bytes_time_s = "%.2f" % (1000 * bytes_time)
bytes_total += bytes_time
except UnsupportedType:
bytes_time_s = "N/A"
uni_time = 0.0
uni_time_s = " - "
if not options.bytes_only:
try:
uni_time = BenchTimer("__main__.%s(__main__.UNICODE)" % (k,),
"import __main__").best(REPEAT)
uni_time_s = "%.2f" % (1000 * uni_time)
uni_total += uni_time
except UnsupportedType:
uni_time_s = "N/A"
try:
average = bytes_time/uni_time
except (TypeError, ZeroDivisionError):
average = 0.0
p("%s\t%s\t%.1f\t%s (*%d)" % (
bytes_time_s, uni_time_s, 100.*average,
v.comment, v.repeat_count))
if bytes_total == uni_total == 0.0:
p("That was zippy!")
else:
try:
ratio = bytes_total/uni_total
except ZeroDivisionError:
ratio = 0.0
p("%.2f\t%.2f\t%.1f\t%s" % (
1000*bytes_total, 1000*uni_total, 100.*ratio,
"TOTAL"))
if __name__ == "__main__":
main()
|
apache-2.0
| -5,229,531,482,914,257,000 | -5,764,400,875,337,187,000 | 28.701754 | 105 | 0.609864 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.