repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
nwjs/chromium.src | content/test/gpu/gpu_tests/gpu_integration_test_unittest.py | 2 | 16019 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import shutil
import tempfile
import unittest
import mock
import sys
import run_gpu_integration_test
import gpu_project_config
from gpu_tests import context_lost_integration_test
from gpu_tests import gpu_helper
from gpu_tests import gpu_integration_test
from gpu_tests import path_util
from gpu_tests import webgl_conformance_integration_test
from telemetry.testing import browser_test_runner
from telemetry.testing import fakes
from telemetry.internal.platform import system_info
path_util.AddDirToPathIfNeeded(path_util.GetChromiumSrcDir(), 'tools', 'perf')
from chrome_telemetry_build import chromium_config
VENDOR_NVIDIA = 0x10DE
VENDOR_AMD = 0x1002
VENDOR_INTEL = 0x8086
VENDOR_STRING_IMAGINATION = 'Imagination Technologies'
DEVICE_STRING_SGX = 'PowerVR SGX 554'
def _GetSystemInfo(
gpu='', device='', vendor_string='',
device_string='', passthrough=False, gl_renderer=''):
sys_info = {
'model_name': '',
'gpu': {
'devices': [
{'vendor_id': gpu, 'device_id': device,
'vendor_string': vendor_string, 'device_string': device_string},
],
'aux_attributes': {'passthrough_cmd_decoder': passthrough}
}
}
if gl_renderer:
sys_info['gpu']['aux_attributes']['gl_renderer'] = gl_renderer
return system_info.SystemInfo.FromDict(sys_info)
def _GetTagsToTest(browser, test_class=None, args=None):
test_class = test_class or gpu_integration_test.GpuIntegrationTest
tags = None
with mock.patch.object(
test_class, 'ExpectationsFiles', return_value=['exp.txt']):
possible_browser = fakes.FakePossibleBrowser()
possible_browser._returned_browser = browser
args = args or gpu_helper.GetMockArgs()
tags = set(test_class.GenerateTags(args, possible_browser))
return tags
def _GenerateNvidiaExampleTagsForTestClassAndArgs(test_class, args):
tags = None
with mock.patch.object(
test_class, 'ExpectationsFiles', return_value=['exp.txt']):
_ = [_ for _ in test_class.GenerateGpuTests(args)]
platform = fakes.FakePlatform('win', 'win10')
browser = fakes.FakeBrowser(platform, 'release')
browser._returned_system_info = _GetSystemInfo(
gpu=VENDOR_NVIDIA, device=0x1cb3, gl_renderer='ANGLE Direct3D9')
tags = _GetTagsToTest(browser, test_class)
return tags
class GpuIntegrationTestUnittest(unittest.TestCase):
def setUp(self):
self._test_state = {}
self._test_result = {}
def _RunGpuIntegrationTests(self, test_name, extra_args=None):
extra_args = extra_args or []
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.close()
test_argv = [
run_gpu_integration_test.__file__, test_name,
'--write-full-results-to=%s' % temp_file.name] + extra_args
unittest_config = chromium_config.ChromiumConfig(
top_level_dir=path_util.GetGpuTestDir(),
benchmark_dirs=[
os.path.join(path_util.GetGpuTestDir(), 'unittest_data')])
with mock.patch.object(sys, 'argv', test_argv):
with mock.patch.object(gpu_project_config, 'CONFIG', unittest_config):
try:
run_gpu_integration_test.main()
with open(temp_file.name) as f:
self._test_result = json.load(f)
finally:
temp_file.close()
def testOverrideDefaultRetryArgumentsinRunGpuIntegrationTests(self):
self._RunGpuIntegrationTests(
'run_tests_with_expectations_files', ['--retry-limit=1'])
self.assertEqual(
self._test_result['tests']['a']['b']
['unexpected-fail.html']['actual'],
'FAIL FAIL')
def testDefaultRetryArgumentsinRunGpuIntegrationTests(self):
self._RunGpuIntegrationTests('run_tests_with_expectations_files')
self.assertEqual(
self._test_result['tests']['a']['b']['expected-flaky.html']['actual'],
'FAIL FAIL FAIL')
def testTestNamePrefixGenerationInRunGpuIntegrationTests(self):
self._RunGpuIntegrationTests('simple_integration_unittest')
self.assertIn('expected_failure', self._test_result['tests'])
def testWithoutExpectationsFilesGenerateTagsReturnsEmptyList(self):
# we need to make sure that GenerateTags() returns an empty list if
# there are no expectations files returned from ExpectationsFiles() or
# else Typ will raise an exception
args = gpu_helper.GetMockArgs()
possible_browser = mock.MagicMock()
self.assertFalse(gpu_integration_test.GpuIntegrationTest.GenerateTags(
args, possible_browser))
def _TestTagGenerationForMockPlatform(self, test_class, args):
tag_set = _GenerateNvidiaExampleTagsForTestClassAndArgs(
webgl_conformance_integration_test.WebGLConformanceIntegrationTest,
args)
self.assertTrue(
set(['win', 'win10', 'd3d9', 'release',
'nvidia', 'nvidia-0x1cb3', 'no-passthrough']).issubset(tag_set))
return tag_set
def testGenerateContextLostExampleTagsForAsan(self):
args = gpu_helper.GetMockArgs(is_asan=True)
tag_set = self._TestTagGenerationForMockPlatform(
context_lost_integration_test.ContextLostIntegrationTest,
args)
self.assertIn('asan', tag_set)
self.assertNotIn('no-asan', tag_set)
def testGenerateContextLostExampleTagsForNoAsan(self):
args = gpu_helper.GetMockArgs()
tag_set = self._TestTagGenerationForMockPlatform(
context_lost_integration_test.ContextLostIntegrationTest,
args)
self.assertIn('no-asan', tag_set)
self.assertNotIn('asan', tag_set)
def testGenerateWebglConformanceExampleTagsForWebglVersion1andAsan(self):
args = gpu_helper.GetMockArgs(is_asan=True, webgl_version='1.0.0')
tag_set = self._TestTagGenerationForMockPlatform(
webgl_conformance_integration_test.WebGLConformanceIntegrationTest,
args)
self.assertTrue(set(['asan', 'webgl-version-1']).issubset(tag_set))
self.assertFalse(set(['no-asan', 'webgl-version-2']) & tag_set)
def testGenerateWebglConformanceExampleTagsForWebglVersion2andNoAsan(self):
args = gpu_helper.GetMockArgs(is_asan=False, webgl_version='2.0.0')
tag_set = self._TestTagGenerationForMockPlatform(
webgl_conformance_integration_test.WebGLConformanceIntegrationTest,
args)
self.assertTrue(set(['no-asan', 'webgl-version-2']) .issubset(tag_set))
self.assertFalse(set(['asan', 'webgl-version-1']) & tag_set)
def testGenerateNvidiaExampleTags(self):
platform = fakes.FakePlatform('win', 'win10')
browser = fakes.FakeBrowser(platform, 'release')
browser._returned_system_info = _GetSystemInfo(
gpu=VENDOR_NVIDIA, device=0x1cb3, gl_renderer='ANGLE Direct3D9')
self.assertEqual(
_GetTagsToTest(browser),
set(['win', 'win10', 'release', 'nvidia', 'nvidia-0x1cb3',
'd3d9', 'no-passthrough']))
def testGenerateVendorTagUsingVendorString(self):
platform = fakes.FakePlatform('mac', 'mojave')
browser = fakes.FakeBrowser(platform, 'release')
browser._returned_system_info = _GetSystemInfo(
vendor_string=VENDOR_STRING_IMAGINATION,
device_string=DEVICE_STRING_SGX,
passthrough=True, gl_renderer='ANGLE OpenGL ES')
self.assertEqual(
_GetTagsToTest(browser),
set(['mac', 'mojave', 'release', 'imagination',
'imagination-PowerVR-SGX-554',
'opengles', 'passthrough']))
def testGenerateVendorTagUsingDeviceString(self):
platform = fakes.FakePlatform('mac', 'mojave')
browser = fakes.FakeBrowser(platform, 'release')
browser._returned_system_info = _GetSystemInfo(
vendor_string='illegal vendor string',
device_string='ANGLE (Imagination, Triangle Monster 3000, 1.0)')
self.assertEqual(
_GetTagsToTest(browser),
set(['mac', 'mojave', 'release', 'imagination',
'imagination-Triangle-Monster-3000',
'no-angle', 'no-passthrough']))
def testSimpleIntegrationTest(self):
self._RunIntegrationTest(
'simple_integration_unittest',
['unexpected_error',
'unexpected_failure'],
['expected_flaky',
'expected_failure'],
['expected_skip'],
['--retry-only-retry-on-failure', '--retry-limit=3',
'--test-name-prefix=unittest_data.integration_tests.SimpleTest.'])
# The number of browser starts include the one call to StartBrowser at the
# beginning of the run of the test suite and for each RestartBrowser call
# which happens after every failure
self.assertEquals(self._test_state['num_browser_starts'], 6)
def testIntegrationTesttWithBrowserFailure(self):
self._RunIntegrationTest(
'browser_start_failure_integration_unittest', [],
['unittest_data.integration_tests.BrowserStartFailureTest.restart'],
[], [])
self.assertEquals(self._test_state['num_browser_crashes'], 2)
self.assertEquals(self._test_state['num_browser_starts'], 3)
def testIntegrationTestWithBrowserCrashUponStart(self):
self._RunIntegrationTest(
'browser_crash_after_start_integration_unittest', [],
[('unittest_data.integration_tests.BrowserCrashAfterStartTest.restart')],
[], [])
self.assertEquals(self._test_state['num_browser_crashes'], 2)
self.assertEquals(self._test_state['num_browser_starts'], 3)
def testRetryLimit(self):
self._RunIntegrationTest(
'test_retry_limit',
['unittest_data.integration_tests.TestRetryLimit.unexpected_failure'],
[],
[],
['--retry-limit=2'])
# The number of attempted runs is 1 + the retry limit.
self.assertEquals(self._test_state['num_test_runs'], 3)
def _RunTestsWithExpectationsFiles(self):
self._RunIntegrationTest(
'run_tests_with_expectations_files',
['a/b/unexpected-fail.html'],
['a/b/expected-fail.html', 'a/b/expected-flaky.html'],
['should_skip'],
['--retry-limit=3', '--retry-only-retry-on-failure-tests',
('--test-name-prefix=unittest_data.integration_tests.'
'RunTestsWithExpectationsFiles.')])
def testTestFilterCommandLineArg(self):
self._RunIntegrationTest(
'run_tests_with_expectations_files',
['a/b/unexpected-fail.html'],
['a/b/expected-fail.html'],
['should_skip'],
['--retry-limit=3', '--retry-only-retry-on-failure-tests',
('--test-filter=a/b/unexpected-fail.html::a/b/expected-fail.html::'
'should_skip'),
('--test-name-prefix=unittest_data.integration_tests.'
'RunTestsWithExpectationsFiles.')])
def testUseTestExpectationsFileToHandleExpectedSkip(self):
self._RunTestsWithExpectationsFiles()
results = self._test_result['tests']['should_skip']
self.assertEqual(results['expected'], 'SKIP')
self.assertEqual(results['actual'], 'SKIP')
self.assertNotIn('is_regression', results)
def testUseTestExpectationsFileToHandleUnexpectedTestFailure(self):
self._RunTestsWithExpectationsFiles()
results = self._test_result['tests']['a']['b']['unexpected-fail.html']
self.assertEqual(results['expected'], 'PASS')
self.assertEqual(results['actual'], 'FAIL')
self.assertIn('is_regression', results)
def testUseTestExpectationsFileToHandleExpectedFailure(self):
self._RunTestsWithExpectationsFiles()
results = self._test_result['tests']['a']['b']['expected-fail.html']
self.assertEqual(results['expected'], 'FAIL')
self.assertEqual(results['actual'], 'FAIL')
self.assertNotIn('is_regression', results)
def testUseTestExpectationsFileToHandleExpectedFlakyTest(self):
self._RunTestsWithExpectationsFiles()
results = self._test_result['tests']['a']['b']['expected-flaky.html']
self.assertEqual(results['expected'], 'PASS')
self.assertEqual(results['actual'], 'FAIL FAIL FAIL PASS')
self.assertNotIn('is_regression', results)
def testRepeat(self):
self._RunIntegrationTest(
'test_repeat',
[],
['unittest_data.integration_tests.TestRepeat.success'],
[],
['--repeat=3'])
self.assertEquals(self._test_state['num_test_runs'], 3)
def testAlsoRunDisabledTests(self):
self._RunIntegrationTest(
'test_also_run_disabled_tests',
['skip', 'flaky'],
# Tests that are expected to fail and do fail are treated as test passes
['expected_failure'],
[],
['--all', '--test-name-prefix',
'unittest_data.integration_tests.TestAlsoRunDisabledTests.',
'--retry-limit=3', '--retry-only-retry-on-failure'])
self.assertEquals(self._test_state['num_flaky_test_runs'], 4)
self.assertEquals(self._test_state['num_test_runs'], 6)
def testStartBrowser_Retries(self):
class TestException(Exception):
pass
def SetBrowserAndRaiseTestException():
gpu_integration_test.GpuIntegrationTest.browser = (
mock.MagicMock())
raise TestException
gpu_integration_test.GpuIntegrationTest.browser = None
gpu_integration_test.GpuIntegrationTest.platform = None
with mock.patch.object(
gpu_integration_test.serially_executed_browser_test_case.\
SeriallyExecutedBrowserTestCase,
'StartBrowser',
side_effect=SetBrowserAndRaiseTestException) as mock_start_browser:
with mock.patch.object(
gpu_integration_test.GpuIntegrationTest,
'StopBrowser') as mock_stop_browser:
with self.assertRaises(TestException):
gpu_integration_test.GpuIntegrationTest.StartBrowser()
self.assertEqual(mock_start_browser.call_count,
gpu_integration_test._START_BROWSER_RETRIES)
self.assertEqual(mock_stop_browser.call_count,
gpu_integration_test._START_BROWSER_RETRIES)
def _RunIntegrationTest(self, test_name, failures, successes, skips,
additional_args):
config = chromium_config.ChromiumConfig(
top_level_dir=path_util.GetGpuTestDir(),
benchmark_dirs=[
os.path.join(path_util.GetGpuTestDir(), 'unittest_data')])
temp_dir = tempfile.mkdtemp()
test_results_path = os.path.join(temp_dir, 'test_results.json')
test_state_path = os.path.join(temp_dir, 'test_state.json')
try:
browser_test_runner.Run(
config,
[test_name,
'--write-full-results-to=%s' % test_results_path,
'--test-state-json-path=%s' % test_state_path] + additional_args)
with open(test_results_path) as f:
self._test_result = json.load(f)
with open(test_state_path) as f:
self._test_state = json.load(f)
actual_successes, actual_failures, actual_skips = (
self._ExtractTestResults(self._test_result))
self.assertEquals(set(actual_failures), set(failures))
self.assertEquals(set(actual_successes), set(successes))
self.assertEquals(set(actual_skips), set(skips))
finally:
shutil.rmtree(temp_dir)
def _ExtractTestResults(self, test_result):
delimiter = test_result['path_delimiter']
failures = []
successes = []
skips = []
def _IsLeafNode(node):
test_dict = node[1]
return ('expected' in test_dict and
isinstance(test_dict['expected'], basestring))
node_queues = []
for t in test_result['tests']:
node_queues.append((t, test_result['tests'][t]))
while node_queues:
node = node_queues.pop()
full_test_name, test_dict = node
if _IsLeafNode(node):
if all(res not in test_dict['expected'].split() for res in
test_dict['actual'].split()):
failures.append(full_test_name)
elif test_dict['expected'] == test_dict['actual'] == 'SKIP':
skips.append(full_test_name)
else:
successes.append(full_test_name)
else:
for k in test_dict:
node_queues.append(
('%s%s%s' % (full_test_name, delimiter, k),
test_dict[k]))
return successes, failures, skips
| bsd-3-clause | 1,460,405,917,800,884,500 | 39.350126 | 79 | 0.678694 | false |
gino3a/tm-boilerplate | tailbone/turn/__init__.py | 34 | 3644 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tailbone import BaseHandler
from tailbone import as_json
from tailbone import AppError
from tailbone import DEBUG
from tailbone import PREFIX
from tailbone.compute_engine import LoadBalancer
from tailbone.compute_engine import TailboneCEInstance
from tailbone.compute_engine import STARTUP_SCRIPT_BASE
import binascii
from hashlib import sha1
import hmac
import md5
import time
import webapp2
from google.appengine.api import lib_config
from google.appengine.ext import ndb
class _ConfigDefaults(object):
SECRET = "notasecret"
REALM = "localhost"
RESTRICTED_DOMAINS = ["localhost"]
SOURCE_SNAPSHOT = None
PARAMS = {}
_config = lib_config.register('tailboneTurn', _ConfigDefaults.__dict__)
# Prefixing internal models with Tailbone to avoid clobbering when using RESTful API
class TailboneTurnInstance(TailboneCEInstance):
SOURCE_SNAPSHOT = _config.SOURCE_SNAPSHOT
PARAMS = dict(dict(TailboneCEInstance.PARAMS, **{
"name": "turn-id",
"metadata": {
"items": [
{
"key": "startup-script",
"value": STARTUP_SCRIPT_BASE + """
# load turnserver
curl -O http://rfc5766-turn-server.googlecode.com/files/turnserver-1.8.7.0-binary-linux-wheezy-ubuntu-mint-x86-64bits.tar.gz
tar xvfz turnserver-1.8.7.0-binary-linux-wheezy-ubuntu-mint-x86-64bits.tar.gz
dpkg -i rfc5766-turn-server_1.8.7.0-1_amd64.deb
apt-get -fy install
IP=$(gcutil getinstance $(hostname) 2>&1 | grep external-ip | grep -oEi "[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}")
turnserver --use-auth-secret -v -a -X $IP -f --static-auth-secret %s -r %s
""" % (_config.SECRET, _config.REALM)
},
],
}
}), **_config.PARAMS)
secret = ndb.StringProperty(default=_config.SECRET)
def credentials(username, secret=None):
timestamp = str(time.mktime(time.gmtime())).split('.')[0]
username = "{}:{}".format(username, timestamp)
if not secret:
secret = _config.SECRET
# force string
secret = str(secret)
password = hmac.new(secret, username, sha1)
password = binascii.b2a_base64(password.digest())[:-1]
return username, password
class TurnHandler(BaseHandler):
@as_json
def get(self):
if _config.RESTRICTED_DOMAINS:
if self.request.host_url not in _config.RESTRICTED_DOMAINS:
raise AppError("Invalid host.")
username = self.request.get("username")
if not username:
raise AppError("Must provide username.")
instance = LoadBalancer.find(TailboneTurnInstance)
if not instance:
raise AppError('Instance not found, try again later.')
username, password = credentials(username, instance.secret)
return {
"username": username,
"password": password,
"uris": [
"turn:{}:3478?transport=udp".format(instance.address),
"turn:{}:3478?transport=tcp".format(instance.address),
"turn:{}:3479?transport=udp".format(instance.address),
"turn:{}:3479?transport=tcp".format(instance.address),
],
}
app = webapp2.WSGIApplication([
(r"{}turn/?.*".format(PREFIX), TurnHandler),
], debug=DEBUG)
| apache-2.0 | -2,226,313,581,117,824,500 | 32.740741 | 124 | 0.705269 | false |
markoshorro/gem5 | src/arch/x86/isa/insts/simd64/integer/exit_media_state.py | 72 | 2182 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop EMMS {
emms
};
# FEMMS
'''
| bsd-3-clause | -8,181,221,059,882,605,000 | 48.590909 | 72 | 0.790559 | false |
srvg/ansible | lib/ansible/utils/unicode.py | 158 | 1166 | # (c) 2012-2014, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils._text import to_text
__all__ = ('unicode_wrap')
def unicode_wrap(func, *args, **kwargs):
"""If a function returns a string, force it to be a text string.
Use with partial to ensure that filter plugins will return text values.
"""
return to_text(func(*args, **kwargs), nonstring='passthru')
| gpl-3.0 | 5,315,954,398,911,828,000 | 34.333333 | 75 | 0.730703 | false |
rickerc/ceilometer_audit | tests/storage/test_get_engine.py | 3 | 1507 | # -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
#
# Author: Doug Hellmann <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/storage/
"""
import mox
import testtools
from ceilometer import storage
from ceilometer.storage import impl_log
class EngineTest(testtools.TestCase):
def test_get_engine(self):
conf = mox.Mox().CreateMockAnything()
conf.database = mox.Mox().CreateMockAnything()
conf.database.connection = 'log://localhost'
engine = storage.get_engine(conf)
self.assertIsInstance(engine, impl_log.LogStorage)
def test_get_engine_no_such_engine(self):
conf = mox.Mox().CreateMockAnything()
conf.database = mox.Mox().CreateMockAnything()
conf.database.connection = 'no-such-engine://localhost'
try:
storage.get_engine(conf)
except RuntimeError as err:
self.assertIn('no-such-engine', unicode(err))
| apache-2.0 | 298,780,702,891,013,570 | 33.227273 | 75 | 0.702523 | false |
tehguy/dndtools | dndtools/dnd/spells/urls.py | 3 | 2039 | # -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
urlpatterns = patterns(
'dnd.spells.views',
# spells
url(
r'^$',
'spell_index',
name='spell_index',
),
# spells > rulebook
url(
r'^(?P<rulebook_slug>[^/]+)--(?P<rulebook_id>\d+)/$',
'spells_in_rulebook',
name='spells_in_rulebook',
),
# spells > rulebook > spell
url(
r'^(?P<rulebook_slug>[^/]+)--(?P<rulebook_id>\d+)/(?P<spell_slug>[^/]+)--(?P<spell_id>\d+)/$',
'spell_detail',
name='spell_detail',
),
# spells > descriptors
url(
r'^descriptors/$',
'spell_descriptor_list',
name='spell_descriptor_list',
),
# spells > descriptors > descriptor
url(
r'^descriptors/(?P<spell_descriptor_slug>[^/]+)/$',
'spell_descriptor_detail',
name='spell_descriptor_detail',
),
# spells > schools
url(
r'^schools/$',
'spell_school_list',
name='spell_school_list',
),
# spells > schools > detail
url(
r'^schools/(?P<spell_school_slug>[^/]+)/$',
'spell_school_detail',
name='spell_school_detail',
),
# spells > sub_schools > detail
url(
r'^sub-schools/(?P<spell_sub_school_slug>[^/]+)/$',
'spell_sub_school_detail',
name='spell_sub_school_detail',
),
# spells > domains
url(
r'^domains/$',
'spell_domain_list',
name='spell_domain_list',
),
# spells > domains > detail
url(
r'^domains/(?P<spell_domain_slug>[^/]+)/$',
'spell_domain_detail',
name='spell_domain_detail',
),
# spells > domains > detail (variant)
url(
r'^domains/(?P<rulebook_slug>[^/]+)--(?P<rulebook_id>\d+)/(?P<spell_domain_slug>[^/]+)/$',
'spell_domain_detail',
name='spell_variant_domain_detail',
),
url(
r'^verify/spell/(?P<spell_id>\d+)/$',
'spell_verify',
name='spell_verify',
),
)
| mit | -7,851,849,655,267,112,000 | 23.865854 | 102 | 0.501226 | false |
thecaffiend/jupyternb_to_c_over_websockets | server_side/driverclient/client.py | 1 | 3402 | """
Adapted from: https://docs.python.org/3.4/howto/sockets.html
TODO: Get this based on tornado TCPClient class instead of this half baked
thing
TODO: Do co-routines *or* callbacks. This goes for the whole thing, not just
this class.
"""
import socket
from tornado import (
gen,
)
class DriverClient:
"""
Client for talking to the c driver socket server.
TODO: Send/receive both need MSGLEN, and need to format the msgs right
(bytes-like objects).
TODO: Clean up use of coroutines vs callbacks (everywhere)
"""
def __init__(self, sock=None):
"""
Create the driver socket
sock: An already created socket to use
"""
if sock is None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
else:
self.sock = sock
def connect(self, host, port):
"""
Connect to the driver's socket.
host: Host IP address
port: Port to connect to
"""
self.sock.connect((host, port))
# TODO: use select to determine when to read, otherwise this will throw
# an occasional exception on read...
self.sock.setblocking(0)
@gen.coroutine
def drvsend(self, msg):
"""
Send to the driver.
msg: String message to convert to a bytes-like object and send to the
server.
"""
totalsent = 0
# TODO: for now this is a string, so just encode it and send. make more
# robust
# while totalsent < MSGLEN:
# sent = self.sock.send(msg[totalsent:])
# if sent == 0:
# raise RuntimeError("socket connection broken")
# totalsent = totalsent + sent
sent = self.sock.send(msg.encode())
return sent
@gen.coroutine
def drvreceive(self):
"""
Receive from the driver.
"""
chunks = []
bytes_recd = 0
# TODO: hack so MSGLEN is defined. fix
MSGLEN = 2048
# TODO: get chunked read working
# while bytes_recd < MSGLEN:
# chunk = self.sock.recv(min(MSGLEN - bytes_recd, 2048))
# if chunk == b'':
# raise RuntimeError("socket connection broken")
# chunks.append(chunk)
# bytes_recd = bytes_recd + len(chunk)
# return b''.join(chunks)
ret = self.sock.recv(2048)
print('Received %s from the API server' % (ret))
return ret
def close(self):
"""
Close our socket.
"""
self.sock.close()
@gen.coroutine
def handle_ws_command(self, cmd, cmd_val):
"""
Handle a command from the wsserver.
"""
print('DriverClient is handling (%s, %s)' % (cmd, cmd_val))
sent = self.drvsend("{%s, %s}" % (cmd, cmd_val))
return sent
@gen.coroutine
def handle_ws_msg(self, msg):
"""
Handle a message (dict) from the wsserver.
"""
print('DriverClient is handling %s' % (msg))
sent = self.drvsend("%s" % (msg))
return sent
# TODO: just for testing, remove
def test_echo(self):
self.connect("127.0.0.1", 60002)
self.drvsend("test")
self.drvreceive()
self.sock.close()
| mit | -4,579,463,662,026,024,400 | 27.830508 | 79 | 0.554968 | false |
Endi1/Penguin | penguin/tests/tests.py | 1 | 1539 | import unittest
import os
import shutil
from penguin.main import newSite, buildSite, publishPosts
from penguin.penguin import Penguin
class TestContentCreation(unittest.TestCase):
def test_build_project(self):
newSite('test_site')
os.chdir('test_site')
site = Penguin()
buildSite(site)
self.assertTrue(os.path.exists('site'))
self.assertTrue(os.path.exists('site/index.html'))
self.assertTrue(os.path.exists('site/about/index.html'))
os.chdir('..')
shutil.rmtree('test_site')
def test_new_page(self):
newSite('test_site')
os.chdir('test_site')
site = Penguin()
buildSite(site)
with open('new.html', 'w') as f:
f.write('This is a new page')
f.close()
buildSite(site)
self.assertTrue(os.path.exists('site/new/index.html'))
with open('site/new/index.html', 'r') as f:
self.assertEqual(f.read(), 'This is a new page')
f.close()
os.chdir('..')
shutil.rmtree('test_site')
def test_publish(self):
newSite('test_site')
os.chdir('test_site')
site = Penguin()
buildSite(site)
publishPosts()
self.assertTrue(os.path.exists('site/posts'))
self.assertTrue(os.path.exists('site/posts/first.html'))
os.chdir('..')
shutil.rmtree('test_site')
suite = unittest.TestLoader().loadTestsFromTestCase(TestContentCreation)
unittest.TextTestRunner(verbosity=2).run(suite)
| mit | -5,073,080,997,238,541,000 | 29.176471 | 72 | 0.604938 | false |
mrbean-bremen/pyfakefs | pyfakefs/extra_packages.py | 1 | 1253 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Imports external packages that replace or emulate internal packages.
If the external module is not present, the build-in module is imported.
"""
try:
import pathlib2
pathlib = pathlib2
except ImportError:
pathlib2 = None
try:
import pathlib
except ImportError:
pathlib = None
try:
import scandir
use_scandir_package = True
use_builtin_scandir = False
except ImportError:
try:
from os import scandir # noqa: F401
use_builtin_scandir = True
use_scandir_package = False
except ImportError:
use_builtin_scandir = False
use_scandir_package = False
use_scandir = use_scandir_package or use_builtin_scandir
| apache-2.0 | 7,204,931,398,368,746,000 | 27.477273 | 74 | 0.71269 | false |
girving/tensorflow | tensorflow/python/keras/optimizer_v2/optimizer_v2.py | 1 | 55544 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Version 2 of class Optimizer."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import distribution_strategy_context
from tensorflow.python.training import optimizer as optimizer_v1
from tensorflow.python.training import slot_creator
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util import nest
class _OptimizableVariable(object):
"""Interface for abstracting over variables in the optimizers."""
@abc.abstractmethod
def target(self):
"""Returns the optimization target for this variable."""
raise NotImplementedError("Calling an abstract method.")
@abc.abstractmethod
def update_op(self, optimizer, g, *args):
"""Returns the update ops for updating the variable."""
raise NotImplementedError("Calling an abstract method.")
class _RefVariableProcessor(_OptimizableVariable):
"""Processor for Variable."""
def __init__(self, v):
self._v = v
def target(self):
return self._v._ref() # pylint: disable=protected-access
def update_op(self, optimizer, g, *args):
if isinstance(g, ops.Tensor):
update_op = optimizer._apply_dense(g, self._v, *args) # pylint: disable=protected-access
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
else:
assert isinstance(g, ops.IndexedSlices), ("Gradient ", g, " is neither a "
"tensor nor IndexedSlices.")
if self._v.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
# pylint: disable=protected-access
return optimizer._apply_sparse_duplicate_indices(g, self._v, *args)
class _DenseReadResourceVariableProcessor(_OptimizableVariable):
"""Processor for dense ResourceVariables."""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g, *args):
# pylint: disable=protected-access
update_op = optimizer._resource_apply_dense(g, self._v.op.inputs[0], *args)
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
class _DenseResourceVariableProcessor(_OptimizableVariable):
"""Processor for dense ResourceVariables."""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g, *args):
# pylint: disable=protected-access
if isinstance(g, ops.IndexedSlices):
if self._v.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
return optimizer._resource_apply_sparse_duplicate_indices(
g.values, self._v, g.indices, *args)
update_op = optimizer._resource_apply_dense(g, self._v, *args)
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
class _TensorProcessor(_OptimizableVariable):
"""Processor for ordinary Tensors.
Even though a Tensor can't really be updated, sometimes it is useful to
compute the gradients with respect to a Tensor using the optimizer. Updating
the Tensor is, of course, unsupported.
"""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g, *args):
raise NotImplementedError("Trying to update a Tensor ", self._v)
def _get_processor(v):
"""The processor of v."""
if context.executing_eagerly():
if isinstance(v, ops.Tensor):
return _TensorProcessor(v)
else:
return _DenseResourceVariableProcessor(v)
if v.op.type == "VarHandleOp":
return _DenseResourceVariableProcessor(v)
if isinstance(v, variables.Variable):
return _RefVariableProcessor(v)
if isinstance(v, ops.Tensor):
return _TensorProcessor(v)
raise NotImplementedError("Trying to optimize unsupported type ", v)
def _var_key_v2(var):
"""Key for representing a primary variable, for looking up slots."""
# pylint: disable=protected-access
if hasattr(var, "_distributed_container"):
distributed_container = var._distributed_container()
assert distributed_container is not None
if context.executing_eagerly():
return distributed_container._unique_id
return distributed_container._shared_name
if context.executing_eagerly():
return var._unique_id
return var.op.name
def _resolve(value, name):
if callable(value):
value = value()
return ops.convert_to_tensor(value, name=name)
def _is_dynamic(value):
"""Returns true if __init__ arg `value` should be re-evaluated each step."""
if callable(value): return True
# Don't need to do anything special in graph mode, since dynamic values
# will propagate correctly automatically.
# TODO(josh11b): Add per-device caching across steps using variables for
# truly static values once we add distributed support.
if context.executing_eagerly() and isinstance(
value, resource_variable_ops.ResourceVariable):
return True
return False
class _OptimizerV2State(object):
"""Holds per-graph and per-step optimizer state.
Use _init_with_static_hyper() to create the state for a graph, and then
_copy_with_dynamic_hyper() to convert that to state for a particular step.
The difference between the two is that the former only has hyper
parameter values that are static and the latter also has values that
can change every step (according to _is_dynamic()).
"""
def __init__(self, op_name):
self._op_name = op_name
def _init_with_static_hyper(self, hyper):
"""Initialize a fresh state object from hyper dict."""
# self._hyper contains a dict from name to a dict with the Tensor values.
# This dict starts with a single item with key "None" with the hyper
# parameter value converted to a Tensor. Other items have dtype keys
# with that Tensor cast to that dtype.
with ops.init_scope():
self._hyper = {name: {None: ops.convert_to_tensor(value, name=name)}
for name, (dynamic, value) in sorted(hyper.items())
if not dynamic}
self._slots = {}
self._non_slot_dict = {}
# Extra state to help Optimizers implement Checkpointable. Holds information
# about variables which will be restored as soon as they're created.
self._deferred_dependencies = {} # Non-slot variables
self._deferred_slot_restorations = {} # Slot variables
def _copy_with_dynamic_hyper(self, hyper, distribution, non_slot_devices):
"""Create a new state object for a particular step."""
ret = _OptimizerV2State(self._op_name)
# pylint: disable=protected-access
ret._slots = self._slots
ret._non_slot_dict = self._non_slot_dict
ret._deferred_dependencies = self._deferred_dependencies
ret._deferred_slot_restorations = self._deferred_slot_restorations
ret._hyper = {name: {None: _resolve(value, name)}
for name, (dynamic, value) in sorted(hyper.items())
if dynamic}
ret._hyper.update(self._hyper)
ret._non_slot_devices = non_slot_devices
ret._distribution = distribution
return ret
def _variables(self):
"""Returns a list of all variables held by self."""
optimizer_variables = list(self._non_slot_dict.values())
for variable_dict in self._slots.values():
for slot_for_variable in variable_dict.values():
optimizer_variables.append(slot_for_variable)
# Sort variables by name so that the return is deterministic.
return sorted(optimizer_variables, key=lambda v: v.name)
def _slot_dict(self, slot_name):
"""Returns a dict for caching slots created under the given name.
Args:
slot_name: Name for the slot.
Returns:
A dict that maps primary `Variable` objects to the slot created
for that variable, under the given slot name.
"""
named_slots = self._slots.get(slot_name, None)
if named_slots is None:
named_slots = {}
self._slots[slot_name] = named_slots
return named_slots
def create_slot(self, var, val, slot_name, optional_op_name=None):
"""Find or create a slot for a variable.
Args:
var: A `Variable` object.
val: A `Tensor`. The initial value of the slot.
slot_name: Name for the slot.
optional_op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
var_key = _var_key_v2(var)
if var_key not in named_slots:
new_slot_variable = slot_creator.create_slot(
var, val, optional_op_name or self._op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=new_slot_variable)
named_slots[var_key] = new_slot_variable
return named_slots[var_key]
def create_slot_with_initializer(self, var, initializer, shape, dtype,
slot_name, optional_op_name=None):
"""Find or create a slot for a variable, using an Initializer.
Args:
var: A `Variable` object.
initializer: An `Initializer`. The initial value of the slot.
shape: Shape of the initial value of the slot.
dtype: Type of the value of the slot.
slot_name: Name for the slot.
optional_op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
var_key = _var_key_v2(var)
if var_key not in named_slots:
new_slot_variable = slot_creator.create_slot_with_initializer(
var, initializer, shape, dtype, optional_op_name or self._op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=new_slot_variable)
named_slots[var_key] = new_slot_variable
return named_slots[var_key]
def zeros_slot(self, var, slot_name, optional_op_name=None):
"""Find or create a slot initialized with 0.0.
Args:
var: A `Variable` object.
slot_name: Name for the slot.
optional_op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
var_key = _var_key_v2(var)
if var_key not in named_slots:
new_slot_variable = slot_creator.create_zeros_slot(
var, optional_op_name or self._op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=new_slot_variable)
named_slots[var_key] = new_slot_variable
return named_slots[var_key]
def _create_or_restore_slot_variable(
self, slot_variable_position, slot_name, variable,
optional_op_name=None):
"""Restore a slot variable's value, possibly creating it.
Called when a variable which has an associated slot variable is created or
restored. When executing eagerly, we create the slot variable with a
restoring initializer.
No new variables are created when graph building. Instead,
_restore_slot_variable catches these after normal creation and adds restore
ops to the graph. This method is nonetheless important when graph building
for the case when a slot variable has already been created but `variable`
has just been added to a dependency graph (causing us to realize that the
slot variable needs to be restored).
Args:
slot_variable_position: A `checkpointable._CheckpointPosition` object
indicating the slot variable `Checkpointable` object to be restored.
slot_name: The name of this `Optimizer`'s slot to restore into.
variable: The variable object this slot is being created for.
optional_op_name: Name to use when scoping the Variable that
needs to be created for the slot.
"""
slot_variable = self.get_slot(var=variable, name=slot_name)
if (slot_variable is None and context.executing_eagerly() and
slot_variable_position.is_simple_variable()
# Defer slot variable creation if there is an active variable creator
# scope. Generally we'd like to eagerly create/restore slot variables
# when possible, but this may mean that scopes intended to catch
# `variable` also catch its eagerly created slot variable
# unintentionally (specifically make_template would add a dependency on
# a slot variable if not for this case). Deferring is mostly harmless
# (aside from double initialization), and makes variable creator scopes
# behave the same way they do when graph building.
and not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access
initializer = checkpointable.CheckpointInitialValue(
checkpoint_position=slot_variable_position)
slot_variable = self.create_slot(
var=variable,
val=initializer,
slot_name=slot_name,
optional_op_name=optional_op_name)
# Optimizers do not have unconditional dependencies on their slot
# variables (nor do any other objects). They are only saved if the
# variables they were created for are also saved.
if slot_variable is not None:
# If we've either made this slot variable, or if we've pulled out an
# existing slot variable, we should restore it.
slot_variable_position.restore(slot_variable)
else:
# We didn't make the slot variable. Defer restoring until it gets created
# normally. We keep a list rather than the one with the highest restore
# UID in case slot variables have their own dependencies, in which case
# those could differ between restores.
variable_key = _var_key_v2(variable)
self._deferred_slot_restorations.setdefault(
slot_name, {}).setdefault(variable_key, []).append(
slot_variable_position)
def get_slot(self, var, name):
"""Return a slot named `name` created for `var` by the Optimizer.
Some `Optimizer` subclasses use additional variables. For example
`Momentum` and `Adagrad` use variables to accumulate updates. This method
gives access to these `Variable` objects if for some reason you need them.
Use `get_slot_names()` to get the list of slot names created by the
`Optimizer`.
Args:
var: A variable passed to `minimize()` or `apply_gradients()`.
name: A string.
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
named_slots = self._slots.get(name, None)
if not named_slots:
return None
return named_slots.get(_var_key_v2(var), None)
def get_slot_names(self):
"""Return a list of the names of slots created by the `Optimizer`.
See `get_slot()`.
Returns:
A list of strings.
"""
return sorted(self._slots.keys())
def create_non_slot(self, initial_value, name, colocate_with=None):
"""Add an extra variable, not associated with a slot."""
v = self._non_slot_dict.get(name, None)
if v is None:
if colocate_with is None: colocate_with = self._non_slot_devices
with self._distribution.colocate_vars_with(colocate_with):
# TODO(josh11b): Use get_variable() except for the legacy Adam use case.
v = variable_scope.variable(initial_value, name=name, trainable=False)
self._non_slot_dict[name] = v
deferred_dependencies_list = self._deferred_dependencies.pop(name, ())
for checkpoint_position in sorted(
deferred_dependencies_list,
key=lambda restore: restore.checkpoint.restore_uid,
reverse=True):
checkpoint_position.restore(v)
return v
def _restore_slot_variable(self, slot_name, variable, slot_variable):
"""Restore a newly created slot variable's value."""
variable_key = _var_key_v2(variable)
deferred_restorations = self._deferred_slot_restorations.get(
slot_name, {}).pop(variable_key, [])
# Iterate over restores, highest restore UID first to minimize the number
# of assignments.
deferred_restorations.sort(key=lambda position: position.restore_uid,
reverse=True)
for checkpoint_position in deferred_restorations:
checkpoint_position.restore(slot_variable)
def get_non_slot(self, name):
"""Returns the non-slot variable identified by `name`."""
return self._non_slot_dict.get(name, None)
def get_hyper(self, name, dtype=None):
"""Returns the `name` hyper parameter, optionally cast to `dtype`."""
dtype_dict = self._hyper[name]
# Do we have the value cast to dtype already cached? This should always
# succeed when dtype is None.
if dtype in dtype_dict:
return dtype_dict[dtype]
# Not cached, cast to dtype and save the result in the cache.
result = math_ops.cast(dtype_dict[None], dtype)
dtype_dict[dtype] = result
return result
class OptimizerV2(optimizer_v1.Optimizer):
"""Updated base class for optimizers.
This class defines the API to add Ops to train a model. You never use this
class directly, but instead instantiate one of its subclasses such as
`GradientDescentOptimizer`, `AdagradOptimizer`, or `MomentumOptimizer`.
### Usage
```python
# Create an optimizer with the desired parameters.
opt = GradientDescentOptimizer(learning_rate=0.1)
# Add Ops to the graph to minimize a cost by updating a list of variables.
# "cost" is a Tensor, and the list of variables contains tf.Variable
# objects.
opt_op = opt.minimize(cost, var_list=<list of variables>)
```
In the training program you will just have to run the returned Op.
```python
# Execute opt_op to do one step of training:
opt_op.run()
```
### Processing gradients before applying them.
Calling `minimize()` takes care of both computing the gradients and
applying them to the variables. If you want to process the gradients
before applying them you can instead use the optimizer in three steps:
1. Compute the gradients with `compute_gradients()`.
2. Process the gradients as you wish.
3. Apply the processed gradients with `apply_gradients()`.
Example:
```python
# Create an optimizer.
opt = GradientDescentOptimizer(learning_rate=0.1)
# Compute the gradients for a list of variables.
grads_and_vars = opt.compute_gradients(loss, <list of variables>)
# grads_and_vars is a list of tuples (gradient, variable). Do whatever you
# need to the 'gradient' part, for example cap them, etc.
capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars]
# Ask the optimizer to apply the capped gradients.
opt.apply_gradients(capped_grads_and_vars)
```
### Gating Gradients
Both `minimize()` and `compute_gradients()` accept a `gate_gradients`
argument that controls the degree of parallelism during the application of
the gradients.
The possible values are: `GATE_NONE`, `GATE_OP`, and `GATE_GRAPH`.
<b>`GATE_NONE`</b>: Compute and apply gradients in parallel. This provides
the maximum parallelism in execution, at the cost of some non-reproducibility
in the results. For example the two gradients of `matmul` depend on the input
values: With `GATE_NONE` one of the gradients could be applied to one of the
inputs _before_ the other gradient is computed resulting in non-reproducible
results.
<b>`GATE_OP`</b>: For each Op, make sure all gradients are computed before
they are used. This prevents race conditions for Ops that generate gradients
for multiple inputs where the gradients depend on the inputs.
<b>`GATE_GRAPH`</b>: Make sure all gradients for all variables are computed
before any one of them is used. This provides the least parallelism but can
be useful if you want to process all gradients before applying any of them.
### Slots
Some optimizer subclasses, such as `MomentumOptimizer` and `AdagradOptimizer`
allocate and manage additional variables associated with the variables to
train. These are called <i>Slots</i>. Slots have names and you can ask the
optimizer for the names of the slots that it uses. Once you have a slot name
you can ask the optimizer for the variable it created to hold the slot value.
This can be useful if you want to log debug a training algorithm, report stats
about the slots, etc.
### Non-slot variables
Some optimizer subclasses, such as `AdamOptimizer` have variables that
are not associated with the variables to train, just the step itself.
### Hyper parameters
These are arguments passed to the optimizer subclass constructor
(the `__init__` method), and then passed to `self._set_hyper()`.
They can be either regular Python values (like 1.0), tensors, or
callables. If they are callable, the callable will be called during
`apply_gradients()` to get the value for the hyper parameter.
### State
Internal methods are passed a `state` argument with the correct
values to use for the slot and non-slot variables, and the hyper
parameters.
"""
# Values for gate_gradients.
GATE_NONE = 0
GATE_OP = 1
GATE_GRAPH = 2
def __init__(self, name):
"""Create a new Optimizer.
This must be called by the constructors of subclasses.
Note that Optimizer instances should not bind to a single graph,
and so shouldn't keep Tensors as member variables. Generally
you should be able to use the _set_hyper()/state.get_hyper()
facility instead.
Args:
name: A non-empty string. The name to use for accumulators created
for the optimizer.
Raises:
ValueError: If name is malformed.
RuntimeError: If _create_slots has been overridden instead of
_create_vars.
"""
# Note: We intentionally don't call parent __init__.
# Optimizer._create_slots was replaced by _create_vars in OptimizerV2.
if (self.__class__._create_slots.__code__ is not # pylint: disable=protected-access
OptimizerV2._create_slots.__code__):
raise RuntimeError("Override _create_vars instead of _create_slots when "
"descending from OptimizerV2 (class %s)" %
self.__class__.__name__)
if not name:
raise ValueError("Must specify the optimizer name")
self._use_locking = False
self._name = name
# Map from graph_key to state for that graph. We use the graph_key
# since it works in both eager and graph mode, and gives the outer
# graph inside functions.
tower_context = distribution_strategy_context.get_tower_context()
if tower_context is None:
# In a cross-tower context for a DistributionStrategy, which means
# only one Optimizer will be created, not one per tower.
self._per_graph_state = {}
else:
# We use get_tower_context().merge_call() to get a single dict
# shared across all model replicas when running with a
# DistributionStrategy.
self._per_graph_state = tower_context.merge_call(lambda _: {})
# Hyper parameters, and whether they should be re-evaluated every step.
self._hyper = {}
def _set_hyper(self, name, value):
self._hyper[name] = (_is_dynamic(value), value)
def minimize(self, loss, global_step=None, var_list=None,
gate_gradients=GATE_OP, aggregation_method=None,
colocate_gradients_with_ops=False, name=None,
grad_loss=None, stop_gradients=None,
scale_loss_by_num_towers=None):
"""Add operations to minimize `loss` by updating `var_list`.
This method simply combines calls `compute_gradients()` and
`apply_gradients()`. If you want to process the gradient before applying
them call `compute_gradients()` and `apply_gradients()` explicitly instead
of using this function.
Args:
loss: A `Tensor` containing the value to minimize.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
var_list: Optional list or tuple of `Variable` objects to update to
minimize `loss`. Defaults to the list of variables collected in
the graph under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
name: Optional name for the returned operation.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
stop_gradients: Optional. A Tensor or list of tensors not to differentiate
through.
scale_loss_by_num_towers: Optional boolean. If true, scale the loss
down by the number of towers. By default, auto-detects whether this
is needed.
Returns:
An Operation that updates the variables in `var_list`. If `global_step`
was not `None`, that operation also increments `global_step`.
Raises:
ValueError: If some of the variables are not `Variable` objects.
@compatibility(eager)
When eager execution is enabled, `loss` should be a Python function that
takes elements of `var_list` as arguments and computes the value to be
minimized. If `var_list` is None, `loss` should take no arguments.
Minimization (and gradient computation) is done with respect to the
elements of `var_list` if not None, else with respect to any trainable
variables created during the execution of the `loss` function.
`gate_gradients`, `aggregation_method`, `colocate_gradients_with_ops` and
`grad_loss` are ignored when eager execution is enabled.
@end_compatibility
"""
grads_and_vars = self.compute_gradients(
loss, var_list=var_list, gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss, stop_gradients=stop_gradients,
scale_loss_by_num_towers=scale_loss_by_num_towers)
vars_with_grad = [v for g, v in grads_and_vars if g is not None]
if not vars_with_grad:
raise ValueError(
"No gradients provided for any variable, check your graph for ops"
" that do not support gradients, between variables %s and loss %s." %
([str(v) for _, v in grads_and_vars], loss))
return self.apply_gradients(grads_and_vars, global_step=global_step,
name=name)
def compute_gradients(self, loss, var_list=None,
gate_gradients=GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None, stop_gradients=None,
scale_loss_by_num_towers=None):
"""Compute gradients of `loss` for the variables in `var_list`.
This is the first part of `minimize()`. It returns a list
of (gradient, variable) pairs where "gradient" is the gradient
for "variable". Note that "gradient" can be a `Tensor`, an
`IndexedSlices`, or `None` if there is no gradient for the
given variable.
Args:
loss: A Tensor containing the value to minimize or a callable taking
no arguments which returns the value to minimize. When eager execution
is enabled it must be a callable.
var_list: Optional list or tuple of `tf.Variable` to update to minimize
`loss`. Defaults to the list of variables collected in the graph
under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
stop_gradients: Optional. A Tensor or list of tensors not to differentiate
through.
scale_loss_by_num_towers: Optional boolean. If true, scale the loss
down by the number of towers. By default, auto-detects whether this
is needed.
Returns:
A list of (gradient, variable) pairs. Variable is always present, but
gradient can be `None`.
Raises:
TypeError: If `var_list` contains anything else than `Variable` objects.
ValueError: If some arguments are invalid.
RuntimeError: If called with eager execution enabled and `loss` is
not callable.
@compatibility(eager)
When eager execution is enabled, `gate_gradients`, `aggregation_method`,
and `colocate_gradients_with_ops` are ignored.
@end_compatibility
"""
# TODO(josh11b): Test that we handle weight decay in a reasonable way.
if callable(loss):
with backprop.GradientTape() as tape:
if var_list is not None:
tape.watch(var_list)
loss_value = loss()
# Scale loss for number of towers (callable-loss case). In this case,
# we have to be careful to call distribute_lib.get_loss_reduction()
# *after* loss() is evaluated, so we know what loss reduction it uses.
if scale_loss_by_num_towers is None:
scale_loss_by_num_towers = (
distribute_lib.get_loss_reduction() ==
variable_scope.VariableAggregation.MEAN)
if scale_loss_by_num_towers:
num_towers = distribution_strategy_context.get_distribution_strategy(
).num_towers
if num_towers > 1:
loss_value *= 1. / num_towers
if var_list is None:
var_list = tape.watched_variables()
grads = tape.gradient(loss_value, var_list, grad_loss)
return list(zip(grads, var_list))
if context.executing_eagerly():
raise RuntimeError(
"`loss` passed to Optimizer.compute_gradients should "
"be a function when eager execution is enabled.")
# Scale loss for number of towers (non-callable-loss case).
if scale_loss_by_num_towers is None:
scale_loss_by_num_towers = (
distribute_lib.get_loss_reduction() ==
variable_scope.VariableAggregation.MEAN)
if scale_loss_by_num_towers:
num_towers = distribution_strategy_context.get_distribution_strategy(
).num_towers
if num_towers > 1:
loss *= 1. / num_towers
if gate_gradients not in [optimizer_v1.Optimizer.GATE_NONE,
optimizer_v1.Optimizer.GATE_OP,
optimizer_v1.Optimizer.GATE_GRAPH]:
raise ValueError("gate_gradients must be one of: Optimizer.GATE_NONE, "
"Optimizer.GATE_OP, Optimizer.GATE_GRAPH. Not %s" %
gate_gradients)
self._assert_valid_dtypes([loss])
if grad_loss is not None:
self._assert_valid_dtypes([grad_loss])
if var_list is None:
var_list = (
variables.trainable_variables() +
ops.get_collection(ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
else:
var_list = nest.flatten(var_list)
# pylint: disable=protected-access
var_list += ops.get_collection(ops.GraphKeys._STREAMING_MODEL_PORTS)
# pylint: enable=protected-access
processors = [_get_processor(v) for v in var_list]
if not var_list:
raise ValueError("No variables to optimize.")
var_refs = [p.target() for p in processors]
grads = gradients.gradients(
loss, var_refs, grad_ys=grad_loss,
gate_gradients=(gate_gradients == optimizer_v1.Optimizer.GATE_OP),
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
stop_gradients=stop_gradients)
if gate_gradients == optimizer_v1.Optimizer.GATE_GRAPH:
grads = control_flow_ops.tuple(grads)
grads_and_vars = list(zip(grads, var_list))
self._assert_valid_dtypes(
[v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource])
return grads_and_vars
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients. If `global_step`
was not None, that operation also increments `global_step`.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
"""
# This is a default implementation of apply_gradients() that can be shared
# by most optimizers. It relies on the subclass implementing the following
# methods: _create_vars(), _prepare(), _apply_dense(), and _apply_sparse().
# Filter out variables with gradients of `None`.
grads_and_vars = tuple(grads_and_vars) # Make sure repeat iteration works.
if not grads_and_vars:
raise ValueError("No variables provided.")
filtered = tuple((g, v) for (g, v) in grads_and_vars if g is not None)
if not filtered:
raise ValueError("No gradients provided for any variable: %s." %
([str(v) for _, v in grads_and_vars],))
return distribution_strategy_context.get_tower_context().merge_call(
self._distributed_apply, filtered, global_step=global_step, name=name)
def _get_or_create_state(self, var_list=None):
"""Either looks up or creates `_OptimizerV2State`.
If any variables are available, they should be passed via the `var_list`
argument, and these will be used to determine the graph to create/retrieve
state for. Otherwise the returned state is for the current default graph.
Args:
var_list: A list of variables to extract a graph from.
Returns:
An `_OptimizerV2State` object.
"""
# Determine the graph_key from the current graph.
eager_execution = context.executing_eagerly()
if eager_execution or var_list is None:
graph = ops.get_default_graph()
else:
graph = ops._get_graph_from_inputs(var_list) # pylint: disable=protected-access
assert graph is not None
graph_key = graph._graph_key # pylint: disable=protected-access
# Get the per graph state by looking up the graph_key.
if graph_key in self._per_graph_state:
per_graph_state = self._per_graph_state[graph_key]
else:
per_graph_state = _OptimizerV2State(self._name)
per_graph_state._init_with_static_hyper(self._hyper) # pylint: disable=protected-access
self._per_graph_state[graph_key] = per_graph_state
return per_graph_state
def _distributed_apply(self, distribution, grads_and_vars, global_step, name):
"""`apply_gradients` for use with a `DistributionStrategy`."""
reduced_grads = distribution.batch_reduce(
variable_scope.VariableAggregation.SUM, grads_and_vars)
var_list = [v for _, v in grads_and_vars]
grads_and_vars = zip(reduced_grads, var_list)
unwrapped_var_list = [x for v in var_list for x in distribution.unwrap(v)]
eager_execution = context.executing_eagerly()
if eager_execution:
# Give a clear error in this case instead of "name not supported
# for Eager Tensors" when we compute non_slot_devices.
for v in unwrapped_var_list:
if isinstance(v, ops.Tensor):
raise NotImplementedError("Trying to update a Tensor ", v)
with ops.name_scope(name, self._name) as name:
per_graph_state = self._get_or_create_state(var_list=unwrapped_var_list)
# Include the current value of any dynamic hyper parameters in `state`.
non_slot_devices = distribution.non_slot_devices(var_list)
state = per_graph_state._copy_with_dynamic_hyper( # pylint: disable=protected-access
self._hyper, distribution, non_slot_devices)
# Create any slot and non-slot variables we need in `state`.
with ops.init_scope():
self._create_vars(var_list, state)
with ops.name_scope(name): # Re-enter name_scope created above
# Give the child class a chance to do something before we start
# applying gradients.
self._prepare(state)
def update(v, g):
"""Update variable `v` using gradient `g`."""
assert v is not None
# Convert the grad to Tensor or IndexedSlices if necessary, and
# look up a processor for each variable's type.
try:
g = ops.convert_to_tensor_or_indexed_slices(g)
except TypeError:
raise TypeError(
"Gradient must be convertible to a Tensor"
" or IndexedSlices, or None: %s" % g)
if not isinstance(g, (ops.Tensor, ops.IndexedSlices)):
raise TypeError(
"Gradient must be a Tensor, IndexedSlices, or None: %s" % g)
processor = _get_processor(v)
# We colocate all ops created in _apply_dense or _apply_sparse
# on the same device as the variable.
# TODO(apassos): figure out how to get the variable name here.
scope_name = "" if eager_execution else v.op.name
# device_policy is set because non-mirrored tensors will be read in
# `update_op`.
# TODO(josh11b): Make different state objects for each device to
# avoid needing to set the device_policy.
with ops.name_scope("update_" + scope_name), \
context.context().device_policy(context.DEVICE_PLACEMENT_SILENT):
return processor.update_op(self, g, state)
# Use the processors to update the variables.
update_ops = []
for grad, var in grads_and_vars:
update_ops.extend(distribution.update(var, update, grad, grouped=False))
# Give the child class a chance to do something after applying
# gradients
def finish():
# TODO(josh11b): Make different state objects for each device to
# avoid needing to set the device_policy.
with context.context().device_policy(context.DEVICE_PLACEMENT_SILENT):
return self._finish(state)
update_ops = control_flow_ops.group(update_ops)
with ops.control_dependencies([update_ops]):
finish_updates = distribution.update_non_slot(
non_slot_devices, finish, grouped=False)
# We said grouped=False, which means finish_updates is always a list.
# It will be [None] when finish() returns None.
if finish_updates == [None]:
finish_updates = [update_ops]
# Update `global_step` (if any).
if global_step is None:
apply_updates = distribution.group(finish_updates, name=name)
else:
with ops.control_dependencies(finish_updates):
def update_global_step(global_step, name):
return global_step.assign_add(1, read_value=False, name=name)
apply_updates = distribution.update(global_step, update_global_step,
name)
# Add the training op to the TRAIN_OP graph collection in graph mode.
if not eager_execution:
if isinstance(apply_updates, ops.Tensor):
apply_updates = apply_updates.op
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
if apply_updates not in train_op:
train_op.append(apply_updates)
return apply_updates
def get_slot(self, var, name):
"""Return a slot named `name` created for `var` by the Optimizer.
Some `Optimizer` subclasses use additional variables. For example
`Momentum` and `Adagrad` use variables to accumulate updates. This method
gives access to these `Variable` objects if for some reason you need them.
Use `get_slot_names()` to get the list of slot names created by the
`Optimizer`.
Args:
var: A variable passed to `minimize()` or `apply_gradients()`.
name: A string.
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
state = self._get_state_for_var(var)
return state.get_slot(var, name) if state is not None else None
def get_slot_names(self):
"""Return a list of the names of slots created by the `Optimizer`.
See `get_slot()`.
Returns:
A list of strings.
"""
state = self._get_per_graph_state()
return state.get_slot_names() if state is not None else []
def variables(self):
"""A list of variables which encode the current state of `Optimizer`.
Includes slot variables and additional global variables created by the
optimizer in the current default graph.
Returns:
A list of variables.
"""
state = self._get_per_graph_state()
return state._variables() if state is not None else [] # pylint: disable=protected-access
# --------------
# Methods to be implemented by subclasses if they want to use the
# inherited implementation of apply_gradients() or compute_gradients().
# --------------
def _create_vars(self, var_list, state):
"""Create all slots needed by the variables and any non-slot variables.
Args:
var_list: A list of `Variable` objects.
state: An object with these methods:
`create_slot(var, val, slot_name, optional_op_name)`,
`create_slot_with_initializer(`
`var, initializer, shape, dtype, slot_name, optional_op_name)`,
`zeros_slot(var, slot_name, optional_op_name)`,
`create_non_slot_variable(initial_value, name, colocate_with)`,
`get_hyper(name)`
"""
# No slots needed by default
pass
def _prepare(self, state):
"""Code to execute before applying gradients.
Note that most uses of _prepare() in Optimizer have been subsumed
by explicit support for hyper parameters in OptimizerV2
Args:
state: An object with a `get_hyper(name)` method.
Returns:
Return value will be ignored.
"""
pass
def _apply_dense(self, grad, var, state):
"""Add ops to apply dense gradients to `var`.
Args:
grad: A `Tensor`.
var: A `Variable` object.
state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,
and `get_hyper(name)` methods.
Returns:
An `Operation`.
"""
raise NotImplementedError()
def _resource_apply_dense(self, grad, handle, state):
"""Add ops to apply dense gradients to the variable `handle`.
Args:
grad: a `Tensor` representing the gradient.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,
and `get_hyper(name)` methods.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _resource_apply_sparse_duplicate_indices(
self, grad, handle, indices, state):
"""Add ops to apply sparse gradients to `handle`, with repeated indices.
Optimizers which override this method must deal with repeated indices. See
the docstring of `_apply_sparse_duplicate_indices` for details. By default
the correct behavior, to sum non-unique indices and their associated
gradients, is enforced by first pre-processing `grad` and `indices` and
passing them on to `_resource_apply_sparse`. Optimizers which deal correctly
with duplicate indices may instead override this method to avoid the
overhead of summing.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
indices: a `Tensor` of integral type representing the indices for
which the gradient is nonzero. Indices may be repeated.
state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,
and `get_hyper(name)` methods.
Returns:
An `Operation` which updates the value of the variable.
"""
# pylint: disable=protected-access
summed_grad, unique_indices = optimizer_v1._deduplicate_indexed_slices(
values=grad, indices=indices)
# pylint: enable=protected-access
return self._resource_apply_sparse(
summed_grad, handle, unique_indices, state)
def _resource_apply_sparse(self, grad, handle, indices, state):
"""Add ops to apply sparse gradients to the variable `handle`.
Similar to `_apply_sparse`, the `indices` argument to this method has been
de-duplicated. Optimizers which deal correctly with non-unique indices may
instead override `_resource_apply_sparse_duplicate_indices` to avoid this
overhead.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
indices: a `Tensor` of integral type representing the indices for
which the gradient is nonzero. Indices are unique.
state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,
and `get_hyper(name)` methods.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _apply_sparse_duplicate_indices(self, grad, var, state):
"""Add ops to apply sparse gradients to `var`, with repeated sparse indices.
Optimizers which override this method must deal with IndexedSlices objects
such as the following:
IndexedSlicesValue(values=[1, 1], indices=[0, 0], dense_shape=[1])
The correct interpretation is:
IndexedSlicesValue(values=[2], indices=[0], dense_shape=[1])
Many optimizers deal incorrectly with repeated indices when updating based
on sparse gradients (e.g. summing squares rather than squaring the sum, or
applying momentum terms multiple times). Adding first is always the correct
behavior, so this is enforced here by reconstructing the IndexedSlices to
have only unique indices, then calling _apply_sparse.
Optimizers which deal correctly with repeated indices may instead override
this method to avoid the overhead of summing indices.
Args:
grad: `IndexedSlices`.
var: A `Variable` object.
state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,
and `get_hyper(name)` methods.
Returns:
An `Operation`.
"""
# pylint: disable=protected-access
summed_values, unique_indices = optimizer_v1._deduplicate_indexed_slices(
values=grad.values, indices=grad.indices)
# pylint: enable=protected-access
gradient_no_duplicate_indices = ops.IndexedSlices(
indices=unique_indices,
values=summed_values,
dense_shape=grad.dense_shape)
return self._apply_sparse(gradient_no_duplicate_indices, var, state)
def _apply_sparse(self, grad, var, state):
"""Add ops to apply sparse gradients to `var`.
The IndexedSlices object passed to `grad` in this function is by default
pre-processed in `_apply_sparse_duplicate_indices` to remove duplicate
indices (see its docstring for details). Optimizers which can tolerate or
have correct special cases for duplicate sparse indices may override
`_apply_sparse_duplicate_indices` instead of this function, avoiding that
overhead.
Args:
grad: `IndexedSlices`, with no repeated indices.
var: A `Variable` object.
state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,
and `get_hyper(name)` methods.
Returns:
An `Operation`.
"""
raise NotImplementedError()
def _finish(self, state):
"""Do what is needed to finish the update.
This is called inside a scope colocated with any non-slot variables.
Args:
state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,
and `get_hyper(name)` methods.
Returns:
The operation to apply updates, or None if no updates.
"""
return None
# --------------
# Utility methods for subclasses.
# --------------
def _get_per_graph_state(self):
# pylint: disable=protected-access
return self._per_graph_state.get(ops.get_default_graph()._graph_key, None)
def _get_state_for_var(self, var):
# pylint: disable=protected-access
return self._per_graph_state.get(var._graph_key, None)
# --------------
# Overridden methods from Checkpointable.
# --------------
def _track_checkpointable(self, *args, **kwargs):
"""Optimizers may not track dependencies. Raises an error."""
raise NotImplementedError(
"Optimizers may not have dependencies. File a feature request if this "
"limitation bothers you.")
@property
def _checkpoint_dependencies(self):
"""From Checkpointable. Gather graph-specific non-slot variables to save."""
current_graph_non_slot_variables = []
state = self._get_per_graph_state()
if state is not None:
for name, variable_object in sorted(
state._non_slot_dict.items(), # pylint: disable=protected-access
# Avoid comparing variables
key=lambda item: item[0]):
current_graph_non_slot_variables.append(
checkpointable.CheckpointableReference(
name=name, ref=variable_object))
# Note: ignores super(); Optimizers may not have any dependencies outside of
# state objects.
return current_graph_non_slot_variables
def _lookup_dependency(self, name):
"""From Checkpointable. Find a non-slot variable in the current graph."""
state = self._get_per_graph_state()
if state is None:
return None
else:
return state.get_non_slot(name)
@property
def _deferred_dependencies(self):
"""Lets Checkpointable know where non-slot variables are created.
If necessary, creates a new state object for the current default graph.
Checkpointable will then add entries to that state's deferred dependency
dictionary. The state object will check that dictionary when creating
non-slot variables, restoring their value if an entry is found.
Returns:
A dictionary which holds deferred dependencies for the current default
graph.
"""
state = self._get_or_create_state()
return state._deferred_dependencies # pylint: disable=protected-access
def _create_or_restore_slot_variable(
self, slot_variable_position, slot_name, variable):
"""Checkpointable: Restore a slot variable's value, possibly creating it.
Called when a variable which has an associated slot variable is created or
restored.
Args:
slot_variable_position: A `checkpointable._CheckpointPosition` object
indicating the slot variable `Checkpointable` object to be restored.
slot_name: The name of this `Optimizer`'s slot to restore into.
variable: The variable object this slot is being created for.
"""
state = self._get_or_create_state(var_list=[variable])
state._create_or_restore_slot_variable( # pylint: disable=protected-access
slot_variable_position=slot_variable_position,
slot_name=slot_name,
variable=variable,
optional_op_name=self._name)
def get_config(self):
"""Returns the config of the optimimizer.
An optimizer config is a Python dictionary (serializable)
containing the configuration of an optimizer.
The same optimizer can be reinstantiated later
(without any saved state) from this configuration.
Returns:
Python dictionary.
"""
return {"name": self._name}
@classmethod
def from_config(cls, config, custom_objects=None):
"""Creates an optimizer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same optimizer from the config
dictionary.
Arguments:
config: A Python dictionary, typically the output of get_config.
custom_objects: A Python dictionary mapping names to additional Python
objects used to create this optimizer, such as a function used for a
hyperparameter.
Returns:
An optimizer instance.
"""
return cls(**config)
def _serialize_hyperparameter(self, hyperparameter_name):
"""Serialize a hyperparameter that can be a float, callable, or Tensor."""
return self._hyper[hyperparameter_name][1]
# --------------
# Unsupported parent methods
# --------------
def _slot_dict(self, slot_name):
raise NotImplementedError(
"_slot_dict() method unsupported in OptimizerV2")
def _get_or_make_slot(self, var, val, slot_name, op_name):
raise NotImplementedError(
"_get_or_make_slot() method unsupported in OptimizerV2")
def _get_or_make_slot_with_initializer(self, var, initializer, shape, dtype,
slot_name, op_name):
raise NotImplementedError(
"_get_or_make_slot_with_initializer() method unsupported in "
"OptimizerV2")
def _create_non_slot_variable(self, initial_value, name, colocate_with):
raise NotImplementedError(
"_create_non_slot_variable() method unsupported in OptimizerV2")
def _get_non_slot_variable(self, name, graph=None):
raise NotImplementedError(
"_get_non_slot_variable() method unsupported in OptimizerV2")
def _non_slot_variables(self):
raise NotImplementedError(
"_non_slot_variables() method unsupported in OptimizerV2")
| apache-2.0 | -3,316,315,807,232,272,400 | 39.103971 | 101 | 0.677337 | false |
prasadtalasila/INET-Vagrant-Demos | Nonce_Demo/impacket-0.9.12/impacket/testcases/dot11/test_FrameManagementAssociationResponse.py | 6 | 7476 | #!/usr/bin/env python
# sorry, this is very ugly, but I'm in python 2.5
import sys
sys.path.insert(0,"../..")
from dot11 import Dot11, Dot11Types, Dot11ManagementFrame, Dot11ManagementAssociationResponse
from ImpactDecoder import RadioTapDecoder
from binascii import hexlify
import unittest
class TestDot11ManagementAssociationResponseFrames(unittest.TestCase):
def setUp(self):
# 802.11 Management Frame
#
self.rawframe="\x00\x00\x1c\x00\xef\x18\x00\x00\xc2L\xfa\x00<\x00\x00\x00\x10\x02\x85\t\xa0\x00\xb4\x9e_\x00\x00\x16\x10\x00:\x01p\x1a\x04T\xe3\x86\x00\x18\xf8lvB\x00\x18\xf8lvB\xf0\x02\x11\x04\x00\x00\x04\xc0\x01\x08\x82\x84\x8b\x96$0Hl2\x04\x0c\x12\x18`\xdd\t\x00\x10\x18\x02\x02\xf0\x00\x00\x00f%\xdf7"
self.radiotap_decoder = RadioTapDecoder()
radiotap=self.radiotap_decoder.decode(self.rawframe)
self.assertEqual(str(radiotap.__class__), "dot11.RadioTap")
self.dot11=radiotap.child()
self.assertEqual(str(self.dot11.__class__), "dot11.Dot11")
type = self.dot11.get_type()
self.assertEqual(type,Dot11Types.DOT11_TYPE_MANAGEMENT)
subtype = self.dot11.get_subtype()
self.assertEqual(subtype,Dot11Types.DOT11_SUBTYPE_MANAGEMENT_ASSOCIATION_RESPONSE)
typesubtype = self.dot11.get_type_n_subtype()
self.assertEqual(typesubtype,Dot11Types.DOT11_TYPE_MANAGEMENT_SUBTYPE_ASSOCIATION_RESPONSE)
self.management_base=self.dot11.child()
self.assertEqual(str(self.management_base.__class__), "dot11.Dot11ManagementFrame")
self.management_association_response=self.management_base.child()
self.assertEqual(str(self.management_association_response.__class__), "dot11.Dot11ManagementAssociationResponse")
def test_01(self):
'Test Header and Tail Size field'
self.assertEqual(self.management_base.get_header_size(), 22)
self.assertEqual(self.management_base.get_tail_size(), 0)
self.assertEqual(self.management_association_response.get_header_size(), 33)
self.assertEqual(self.management_association_response.get_tail_size(), 0)
def test_02(self):
'Test Duration field'
self.assertEqual(self.management_base.get_duration(), 0x013a)
self.management_base.set_duration(0x1234)
self.assertEqual(self.management_base.get_duration(), 0x1234)
def test_03(self):
'Test Destination Address field'
addr=self.management_base.get_destination_address()
self.assertEqual(addr.tolist(), [0x70,0x1a,0x04,0x54,0xe3,0x86])
addr[0]=0x12
addr[5]=0x34
self.management_base.set_destination_address(addr)
self.assertEqual(self.management_base.get_destination_address().tolist(), [0x12,0x1a,0x04,0x54,0xe3,0x34])
def test_04(self):
'Test Source Address field'
addr=self.management_base.get_source_address()
self.assertEqual(addr.tolist(), [0x00,0x18,0xF8,0x6C,0x76,0x42])
addr[0]=0x12
addr[5]=0x34
self.management_base.set_source_address(addr)
self.assertEqual(self.management_base.get_source_address().tolist(), [0x12,0x18,0xF8,0x6C,0x76,0x34])
def test_05(self):
'Test BSSID Address field'
addr=self.management_base.get_bssid()
self.assertEqual(addr.tolist(), [0x00,0x18,0xF8,0x6C,0x76,0x42])
addr[0]=0x12
addr[5]=0x34
self.management_base.set_bssid(addr)
self.assertEqual(self.management_base.get_bssid().tolist(), [0x12,0x18,0xF8,0x6C,0x76,0x34])
def test_06(self):
'Test Sequence control field'
self.assertEqual(self.management_base.get_sequence_control(), 0x02f0)
self.management_base.set_sequence_control(0x1234)
self.assertEqual(self.management_base.get_sequence_control(), 0x1234)
def test_07(self):
'Test Fragment number field'
self.assertEqual(self.management_base.get_fragment_number(), 0x00)
self.management_base.set_fragment_number(0xF1) # Es de 4 bit
self.assertEqual(self.management_base.get_fragment_number(), 0x01)
def test_08(self):
'Test Sequence number field'
self.assertEqual(self.management_base.get_sequence_number(), 47)
self.management_base.set_sequence_number(0xF234) # Es de 12 bit
self.assertEqual(self.management_base.get_sequence_number(), 0x0234)
def test_09(self):
'Test Management Frame Data field'
frame_body="\x11\x04\x00\x00\x04\xc0\x01\x08\x82\x84\x8b\x96$0Hl2\x04\x0c\x12\x18`\xdd\t\x00\x10\x18\x02\x02\xf0\x00\x00\x00"
self.assertEqual(self.management_base.get_frame_body(), frame_body)
def test_10(self):
'Test Management Association Response Capabilities field'
self.assertEqual(self.management_association_response.get_capabilities(), 0x0411)
self.management_association_response.set_capabilities(0x4321)
self.assertEqual(self.management_association_response.get_capabilities(), 0x4321)
def test_11(self):
'Test Management Association Response Status Code field'
self.assertEqual(self.management_association_response.get_status_code(), 0x0000)
self.management_association_response.set_status_code(0x4321)
self.assertEqual(self.management_association_response.get_status_code(), 0x4321)
def test_12(self):
'Test Management Association Response Association ID field'
self.assertEqual(self.management_association_response.get_association_id(), 0xc004)
self.management_association_response.set_association_id(0x4321)
self.assertEqual(self.management_association_response.get_association_id(), 0x4321)
def test_13(self):
'Test Management Association Response Supported_rates getter/setter methods'
self.assertEqual(self.management_association_response.get_supported_rates(), (0x82, 0x84, 0x8b, 0x96, 0x24, 0x30, 0x48, 0x6c))
self.assertEqual(self.management_association_response.get_supported_rates(human_readable=True), (1.0, 2.0, 5.5, 11.0, 18.0, 24.0, 36.0, 54.0))
self.management_association_response.set_supported_rates((0x12, 0x98, 0x24, 0xb0, 0x48, 0x60))
self.assertEqual(self.management_association_response.get_supported_rates(), (0x12, 0x98, 0x24, 0xb0, 0x48, 0x60))
self.assertEqual(self.management_association_response.get_supported_rates(human_readable=True), (9.0, 12.0, 18.0, 24.0, 36.0, 48.0))
self.assertEqual(self.management_association_response.get_header_size(), 33-2)
def test_14(self):
'Test Management Vendor Specific getter/setter methods'
self.assertEqual(self.management_association_response.get_vendor_specific(), [("\x00\x10\x18","\x02\x02\xf0\x00\x00\x00")])
self.management_association_response.add_vendor_specific("\x00\x00\x40", "\x04\x04\x04\x04\x04\x04")
self.assertEqual(self.management_association_response.get_vendor_specific(),
[("\x00\x10\x18", "\x02\x02\xf0\x00\x00\x00"),
("\x00\x00\x40", "\x04\x04\x04\x04\x04\x04"),
])
self.assertEqual(self.management_association_response.get_header_size(), 33+11)
suite = unittest.TestLoader().loadTestsFromTestCase(TestDot11ManagementAssociationResponseFrames)
unittest.TextTestRunner(verbosity=2).run(suite)
| gpl-2.0 | -2,053,607,938,761,711,900 | 47.545455 | 313 | 0.686731 | false |
bcl/anaconda | tests/glade/check_markup.py | 5 | 5424 | #!/usr/bin/python3
#
# Copyright (C) 2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: David Shea <[email protected]>
#
"""
Python script to check that properties in glade using Pango markup contain
valid markup.
"""
# Ignore any interruptible calls
# pylint: disable=interruptible-system-call
import sys
import argparse
# Import translation methods if needed
if ('-t' in sys.argv) or ('--translate' in sys.argv):
try:
from pocketlint.translatepo import translate_all
except ImportError:
print("Unable to load po translation module")
sys.exit(99)
from pocketlint.pangocheck import markup_nodes, markup_match, markup_necessary
try:
from lxml import etree
except ImportError:
print("You need to install the python-lxml package to use check_markup.py")
sys.exit(99)
class PangoElementException(Exception):
def __init__(self, element):
Exception.__init__(self)
self.element = element
def __str__(self):
return "Invalid element %s" % self.element
def _validate_pango_markup(root):
"""Validate parsed pango markup.
:param etree.ElementTree root: The pango markup parsed as an XML ElementTree
:raises PangoElementException: If the pango markup contains unknown elements
"""
if root.tag not in markup_nodes:
raise PangoElementException(root.tag)
for child in root:
_validate_pango_markup(child)
def check_glade_file(glade_file_path, po_map=None):
glade_success = True
with open(glade_file_path) as glade_file:
# Parse the XML
glade_tree = etree.parse(glade_file)
# Search for label properties on objects that have use_markup set to True
for label in glade_tree.xpath(".//property[@name='label' and ../property[@name='use_markup']/text() = 'True']"):
if po_map:
try:
label_texts = po_map.get(label.text, label.get("context"))
except KeyError:
continue
lang_str = " for language %s" % po_map.metadata['Language']
else:
label_texts = (label.text,)
lang_str = ""
# Wrap the label text in <markup> tags and parse the tree
for label_text in label_texts:
try:
# pylint: disable=unescaped-markup
pango_tree = etree.fromstring("<markup>%s</markup>" % label_text)
_validate_pango_markup(pango_tree)
# Check if the markup is necessary
if not markup_necessary(pango_tree):
print("Markup could be expressed as attributes at %s%s:%d" % \
(glade_file_path, lang_str, label.sourceline))
glade_success = False
except etree.XMLSyntaxError:
print("Unable to parse pango markup at %s%s:%d" % \
(glade_file_path, lang_str, label.sourceline))
glade_success = False
except PangoElementException as px:
print("Invalid pango element %s at %s%s:%d" % \
(px.element, glade_file_path, lang_str, label.sourceline))
glade_success = False
else:
if po_map:
# Check that translated markup has the same elements and attributes
if not markup_match(label.text, label_text):
print("Translated markup does not contain the same elements and attributes at %s%s:%d" % \
(glade_file_path, lang_str, label.sourceline))
glade_success = False
return glade_success
if __name__ == "__main__":
parser = argparse.ArgumentParser("Check Pango markup validity")
parser.add_argument("-t", "--translate", action='store_true',
help="Check translated strings")
parser.add_argument("-p", "--podir", action='store', type=str,
metavar='PODIR', help='Directory containing po files', default='./po')
parser.add_argument("glade_files", nargs="+", metavar="GLADE-FILE",
help='The glade file to check')
args = parser.parse_args(args=sys.argv[1:])
success = True
for file_path in args.glade_files:
if not check_glade_file(file_path):
success = False
# Now loop over all of the translations
if args.translate:
podicts = translate_all(args.podir)
for po_dict in podicts.values():
for file_path in args.glade_files:
if not check_glade_file(file_path, po_dict):
success = False
sys.exit(0 if success else 1)
| gpl-2.0 | -5,156,641,110,452,370,000 | 38.304348 | 120 | 0.603429 | false |
J861449197/edx-platform | common/djangoapps/embargo/tests/test_views.py | 136 | 3286 | """Tests for embargo app views. """
import unittest
from mock import patch
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.conf import settings
from mako.exceptions import TopLevelLookupException
import ddt
from util.testing import UrlResetMixin
from embargo import messages
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@ddt.ddt
class CourseAccessMessageViewTest(UrlResetMixin, TestCase):
"""Tests for the courseware access message view.
These end-points serve static content.
While we *could* check the text on each page,
this will require changes to the test every time
the text on the page changes.
Instead, we load each page we expect to be available
(based on the configuration in `embargo.messages`)
and verify that we get the correct status code.
This will catch errors in the message configuration
(for example, moving a template and forgetting to
update the configuration appropriately).
"""
@patch.dict(settings.FEATURES, {'EMBARGO': True})
def setUp(self):
super(CourseAccessMessageViewTest, self).setUp('embargo')
@ddt.data(*messages.ENROLL_MESSAGES.keys())
def test_enrollment_messages(self, msg_key):
self._load_page('enrollment', msg_key)
@ddt.data(*messages.COURSEWARE_MESSAGES.keys())
def test_courseware_messages(self, msg_key):
self._load_page('courseware', msg_key)
@ddt.data('enrollment', 'courseware')
def test_invalid_message_key(self, access_point):
self._load_page(access_point, 'invalid', expected_status=404)
@patch.dict(settings.FEATURES, {'USE_CUSTOM_THEME': True})
@ddt.data('enrollment', 'courseware')
def test_custom_theme_override(self, access_point):
# Custom override specified for the "embargo" message
# for backwards compatibility with previous versions
# of the embargo app.
# This template isn't available by default, but we can at least
# verify that the view will look for it when the USE_CUSTOM_THEME
# feature flag is specified.
with self.assertRaisesRegexp(TopLevelLookupException, 'static_templates/theme-embargo.html'):
self._load_page(access_point, 'embargo')
@patch.dict(settings.FEATURES, {'USE_CUSTOM_THEME': True})
@ddt.data('enrollment', 'courseware')
def test_custom_theme_override_not_specified(self, access_point):
# No custom override specified for the "default" message
self._load_page(access_point, 'default')
def _load_page(self, access_point, message_key, expected_status=200):
"""Load the message page and check the status code. """
url = reverse('embargo_blocked_message', kwargs={
'access_point': access_point,
'message_key': message_key
})
response = self.client.get(url)
self.assertEqual(
response.status_code, expected_status,
msg=(
u"Unexpected status code when loading '{url}': "
u"expected {expected} but got {actual}"
).format(
url=url,
expected=expected_status,
actual=response.status_code
)
)
| agpl-3.0 | 8,906,891,392,949,215,000 | 37.209302 | 101 | 0.672246 | false |
Titan-C/sympy | sympy/concrete/products.py | 4 | 15193 | from __future__ import print_function, division
from sympy.tensor.indexed import Idx
from sympy.core.mul import Mul
from sympy.core.singleton import S
from sympy.core.symbol import symbols
from sympy.concrete.expr_with_intlimits import ExprWithIntLimits
from sympy.functions.elementary.exponential import exp, log
from sympy.polys import quo, roots
from sympy.simplify import powsimp
from sympy.core.compatibility import range
class Product(ExprWithIntLimits):
r"""Represents unevaluated products.
``Product`` represents a finite or infinite product, with the first
argument being the general form of terms in the series, and the second
argument being ``(dummy_variable, start, end)``, with ``dummy_variable``
taking all integer values from ``start`` through ``end``. In accordance
with long-standing mathematical convention, the end term is included in
the product.
Finite products
===============
For finite products (and products with symbolic limits assumed to be finite)
we follow the analogue of the summation convention described by Karr [1],
especially definition 3 of section 1.4. The product:
.. math::
\prod_{m \leq i < n} f(i)
has *the obvious meaning* for `m < n`, namely:
.. math::
\prod_{m \leq i < n} f(i) = f(m) f(m+1) \cdot \ldots \cdot f(n-2) f(n-1)
with the upper limit value `f(n)` excluded. The product over an empty set is
one if and only if `m = n`:
.. math::
\prod_{m \leq i < n} f(i) = 1 \quad \mathrm{for} \quad m = n
Finally, for all other products over empty sets we assume the following
definition:
.. math::
\prod_{m \leq i < n} f(i) = \frac{1}{\prod_{n \leq i < m} f(i)} \quad \mathrm{for} \quad m > n
It is important to note that above we define all products with the upper
limit being exclusive. This is in contrast to the usual mathematical notation,
but does not affect the product convention. Indeed we have:
.. math::
\prod_{m \leq i < n} f(i) = \prod_{i = m}^{n - 1} f(i)
where the difference in notation is intentional to emphasize the meaning,
with limits typeset on the top being inclusive.
Examples
========
>>> from sympy.abc import a, b, i, k, m, n, x
>>> from sympy import Product, factorial, oo
>>> Product(k, (k, 1, m))
Product(k, (k, 1, m))
>>> Product(k, (k, 1, m)).doit()
factorial(m)
>>> Product(k**2,(k, 1, m))
Product(k**2, (k, 1, m))
>>> Product(k**2,(k, 1, m)).doit()
factorial(m)**2
Wallis' product for pi:
>>> W = Product(2*i/(2*i-1) * 2*i/(2*i+1), (i, 1, oo))
>>> W
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, oo))
Direct computation currently fails:
>>> W.doit()
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, oo))
But we can approach the infinite product by a limit of finite products:
>>> from sympy import limit
>>> W2 = Product(2*i/(2*i-1)*2*i/(2*i+1), (i, 1, n))
>>> W2
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, n))
>>> W2e = W2.doit()
>>> W2e
2**(-2*n)*4**n*factorial(n)**2/(RisingFactorial(1/2, n)*RisingFactorial(3/2, n))
>>> limit(W2e, n, oo)
pi/2
By the same formula we can compute sin(pi/2):
>>> from sympy import pi, gamma, simplify
>>> P = pi * x * Product(1 - x**2/k**2, (k, 1, n))
>>> P = P.subs(x, pi/2)
>>> P
pi**2*Product(1 - pi**2/(4*k**2), (k, 1, n))/2
>>> Pe = P.doit()
>>> Pe
pi**2*RisingFactorial(1 + pi/2, n)*RisingFactorial(-pi/2 + 1, n)/(2*factorial(n)**2)
>>> Pe = Pe.rewrite(gamma)
>>> Pe
pi**2*gamma(n + 1 + pi/2)*gamma(n - pi/2 + 1)/(2*gamma(1 + pi/2)*gamma(-pi/2 + 1)*gamma(n + 1)**2)
>>> Pe = simplify(Pe)
>>> Pe
sin(pi**2/2)*gamma(n + 1 + pi/2)*gamma(n - pi/2 + 1)/gamma(n + 1)**2
>>> limit(Pe, n, oo)
sin(pi**2/2)
Products with the lower limit being larger than the upper one:
>>> Product(1/i, (i, 6, 1)).doit()
120
>>> Product(i, (i, 2, 5)).doit()
120
The empty product:
>>> Product(i, (i, n, n-1)).doit()
1
An example showing that the symbolic result of a product is still
valid for seemingly nonsensical values of the limits. Then the Karr
convention allows us to give a perfectly valid interpretation to
those products by interchanging the limits according to the above rules:
>>> P = Product(2, (i, 10, n)).doit()
>>> P
2**(n - 9)
>>> P.subs(n, 5)
1/16
>>> Product(2, (i, 10, 5)).doit()
1/16
>>> 1/Product(2, (i, 6, 9)).doit()
1/16
An explicit example of the Karr summation convention applied to products:
>>> P1 = Product(x, (i, a, b)).doit()
>>> P1
x**(-a + b + 1)
>>> P2 = Product(x, (i, b+1, a-1)).doit()
>>> P2
x**(a - b - 1)
>>> simplify(P1 * P2)
1
And another one:
>>> P1 = Product(i, (i, b, a)).doit()
>>> P1
RisingFactorial(b, a - b + 1)
>>> P2 = Product(i, (i, a+1, b-1)).doit()
>>> P2
RisingFactorial(a + 1, -a + b - 1)
>>> P1 * P2
RisingFactorial(b, a - b + 1)*RisingFactorial(a + 1, -a + b - 1)
>>> simplify(P1 * P2)
1
See Also
========
Sum, summation
product
References
==========
.. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM,
Volume 28 Issue 2, April 1981, Pages 305-350
http://dl.acm.org/citation.cfm?doid=322248.322255
.. [2] http://en.wikipedia.org/wiki/Multiplication#Capital_Pi_notation
.. [3] http://en.wikipedia.org/wiki/Empty_product
"""
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
obj = ExprWithIntLimits.__new__(cls, function, *symbols, **assumptions)
return obj
def _eval_rewrite_as_Sum(self, *args):
from sympy.concrete.summations import Sum
return exp(Sum(log(self.function), *self.limits))
@property
def term(self):
return self._args[0]
function = term
def _eval_is_zero(self):
# a Product is zero only if its term is zero.
return self.term.is_zero
def doit(self, **hints):
f = self.function
for index, limit in enumerate(self.limits):
i, a, b = limit
dif = b - a
if dif.is_Integer and dif < 0:
a, b = b + 1, a - 1
f = 1 / f
if isinstance(i, Idx):
i = i.label
g = self._eval_product(f, (i, a, b))
if g in (None, S.NaN):
return self.func(powsimp(f), *self.limits[index:])
else:
f = g
if hints.get('deep', True):
return f.doit(**hints)
else:
return powsimp(f)
def _eval_adjoint(self):
if self.is_commutative:
return self.func(self.function.adjoint(), *self.limits)
return None
def _eval_conjugate(self):
return self.func(self.function.conjugate(), *self.limits)
def _eval_product(self, term, limits):
from sympy.concrete.delta import deltaproduct, _has_simple_delta
from sympy.concrete.summations import summation
from sympy.functions import KroneckerDelta, RisingFactorial
(k, a, n) = limits
if k not in term.free_symbols:
if (term - 1).is_zero:
return S.One
return term**(n - a + 1)
if a == n:
return term.subs(k, a)
if term.has(KroneckerDelta) and _has_simple_delta(term, limits[0]):
return deltaproduct(term, limits)
dif = n - a
if dif.is_Integer:
return Mul(*[term.subs(k, a + i) for i in range(dif + 1)])
elif term.is_polynomial(k):
poly = term.as_poly(k)
A = B = Q = S.One
all_roots = roots(poly)
M = 0
for r, m in all_roots.items():
M += m
A *= RisingFactorial(a - r, n - a + 1)**m
Q *= (n - r)**m
if M < poly.degree():
arg = quo(poly, Q.as_poly(k))
B = self.func(arg, (k, a, n)).doit()
return poly.LC()**(n - a + 1) * A * B
elif term.is_Add:
p, q = term.as_numer_denom()
p = self._eval_product(p, (k, a, n))
q = self._eval_product(q, (k, a, n))
return p / q
elif term.is_Mul:
exclude, include = [], []
for t in term.args:
p = self._eval_product(t, (k, a, n))
if p is not None:
exclude.append(p)
else:
include.append(t)
if not exclude:
return None
else:
arg = term._new_rawargs(*include)
A = Mul(*exclude)
B = self.func(arg, (k, a, n)).doit()
return A * B
elif term.is_Pow:
if not term.base.has(k):
s = summation(term.exp, (k, a, n))
return term.base**s
elif not term.exp.has(k):
p = self._eval_product(term.base, (k, a, n))
if p is not None:
return p**term.exp
elif isinstance(term, Product):
evaluated = term.doit()
f = self._eval_product(evaluated, limits)
if f is None:
return self.func(evaluated, limits)
else:
return f
def _eval_simplify(self, ratio, measure):
from sympy.simplify.simplify import product_simplify
return product_simplify(self)
def _eval_transpose(self):
if self.is_commutative:
return self.func(self.function.transpose(), *self.limits)
return None
def is_convergent(self):
r"""
See docs of Sum.is_convergent() for explanation of convergence
in SymPy.
The infinite product:
.. math::
\prod_{1 \leq i < \infty} f(i)
is defined by the sequence of partial products:
.. math::
\prod_{i=1}^{n} f(i) = f(1) f(2) \cdots f(n)
as n increases without bound. The product converges to a non-zero
value if and only if the sum:
.. math::
\sum_{1 \leq i < \infty} \log{f(n)}
converges.
References
==========
.. [1] https://en.wikipedia.org/wiki/Infinite_product
Examples
========
>>> from sympy import Interval, S, Product, Symbol, cos, pi, exp, oo
>>> n = Symbol('n', integer=True)
>>> Product(n/(n + 1), (n, 1, oo)).is_convergent()
False
>>> Product(1/n**2, (n, 1, oo)).is_convergent()
False
>>> Product(cos(pi/n), (n, 1, oo)).is_convergent()
True
>>> Product(exp(-n**2), (n, 1, oo)).is_convergent()
False
"""
from sympy.concrete.summations import Sum
sequence_term = self.function
log_sum = log(sequence_term)
lim = self.limits
try:
is_conv = Sum(log_sum, *lim).is_convergent()
except NotImplementedError:
if Sum(sequence_term - 1, *lim).is_absolutely_convergent() is S.true:
return S.true
raise NotImplementedError("The algorithm to find the product convergence of %s "
"is not yet implemented" % (sequence_term))
return is_conv
def reverse_order(expr, *indices):
"""
Reverse the order of a limit in a Product.
Usage
=====
``reverse_order(expr, *indices)`` reverses some limits in the expression
``expr`` which can be either a ``Sum`` or a ``Product``. The selectors in
the argument ``indices`` specify some indices whose limits get reversed.
These selectors are either variable names or numerical indices counted
starting from the inner-most limit tuple.
Examples
========
>>> from sympy import Product, simplify, RisingFactorial, gamma, Sum
>>> from sympy.abc import x, y, a, b, c, d
>>> P = Product(x, (x, a, b))
>>> Pr = P.reverse_order(x)
>>> Pr
Product(1/x, (x, b + 1, a - 1))
>>> Pr = Pr.doit()
>>> Pr
1/RisingFactorial(b + 1, a - b - 1)
>>> simplify(Pr)
gamma(b + 1)/gamma(a)
>>> P = P.doit()
>>> P
RisingFactorial(a, -a + b + 1)
>>> simplify(P)
gamma(b + 1)/gamma(a)
While one should prefer variable names when specifying which limits
to reverse, the index counting notation comes in handy in case there
are several symbols with the same name.
>>> S = Sum(x*y, (x, a, b), (y, c, d))
>>> S
Sum(x*y, (x, a, b), (y, c, d))
>>> S0 = S.reverse_order(0)
>>> S0
Sum(-x*y, (x, b + 1, a - 1), (y, c, d))
>>> S1 = S0.reverse_order(1)
>>> S1
Sum(x*y, (x, b + 1, a - 1), (y, d + 1, c - 1))
Of course we can mix both notations:
>>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(x, 1)
Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
>>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(y, x)
Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
See Also
========
index, reorder_limit, reorder
References
==========
.. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM,
Volume 28 Issue 2, April 1981, Pages 305-350
http://dl.acm.org/citation.cfm?doid=322248.322255
"""
l_indices = list(indices)
for i, indx in enumerate(l_indices):
if not isinstance(indx, int):
l_indices[i] = expr.index(indx)
e = 1
limits = []
for i, limit in enumerate(expr.limits):
l = limit
if i in l_indices:
e = -e
l = (limit[0], limit[2] + 1, limit[1] - 1)
limits.append(l)
return Product(expr.function ** e, *limits)
def product(*args, **kwargs):
r"""
Compute the product.
The notation for symbols is similar to the notation used in Sum or
Integral. product(f, (i, a, b)) computes the product of f with
respect to i from a to b, i.e.,
::
b
_____
product(f(n), (i, a, b)) = | | f(n)
| |
i = a
If it cannot compute the product, it returns an unevaluated Product object.
Repeated products can be computed by introducing additional symbols tuples::
>>> from sympy import product, symbols
>>> i, n, m, k = symbols('i n m k', integer=True)
>>> product(i, (i, 1, k))
factorial(k)
>>> product(m, (i, 1, k))
m**k
>>> product(i, (i, 1, k), (k, 1, n))
Product(factorial(k), (k, 1, n))
"""
prod = Product(*args, **kwargs)
if isinstance(prod, Product):
return prod.doit(deep=False)
else:
return prod
| bsd-3-clause | -3,797,335,564,918,823,400 | 28.558366 | 104 | 0.519713 | false |
sentinelleader/limbo | limbo/plugins/emojicodedict.py | 14 | 46328 | #
# This file is based on emoji (https://github.com/kyokomi/emoji).
#
# The MIT License (MIT)
#
# Copyright (c) 2014 kyokomi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
emojiCodeDict = {
":capricorn:": u"\U00002651",
":end:": u"\U0001f51a",
":no_mobile_phones:": u"\U0001f4f5",
":couple:": u"\U0001f46b",
":snowman:": u"\U000026c4",
":sunrise_over_mountains:": u"\U0001f304",
":suspension_railway:": u"\U0001f69f",
":arrows_counterclockwise:": u"\U0001f504",
":bug:": u"\U0001f41b",
":confused:": u"\U0001f615",
":dress:": u"\U0001f457",
":honeybee:": u"\U0001f41d",
":waning_crescent_moon:": u"\U0001f318",
":balloon:": u"\U0001f388",
":bus:": u"\U0001f68c",
":package:": u"\U0001f4e6",
":pencil2:": u"\U0000270f",
":rage:": u"\U0001f621",
":space_invader:": u"\U0001f47e",
":white_medium_small_square:": u"\U000025fd",
":fast_forward:": u"\U000023e9",
":rice_cracker:": u"\U0001f358",
":incoming_envelope:": u"\U0001f4e8",
":sa:": u"\U0001f202",
":womens:": u"\U0001f6ba",
":arrow_right:": u"\U000027a1",
":construction_worker:": u"\U0001f477",
":notes:": u"\U0001f3b6",
":goat:": u"\U0001f410",
":grey_question:": u"\U00002754",
":lantern:": u"\U0001f3ee",
":rice_scene:": u"\U0001f391",
":running:": u"\U0001f3c3",
":ferris_wheel:": u"\U0001f3a1",
":musical_score:": u"\U0001f3bc",
":sparkle:": u"\U00002747",
":wink:": u"\U0001f609",
":art:": u"\U0001f3a8",
":clock330:": u"\U0001f55e",
":minidisc:": u"\U0001f4bd",
":no_entry_sign:": u"\U0001f6ab",
":wind_chime:": u"\U0001f390",
":cyclone:": u"\U0001f300",
":herb:": u"\U0001f33f",
":leopard:": u"\U0001f406",
":banana:": u"\U0001f34c",
":handbag:": u"\U0001f45c",
":honey_pot:": u"\U0001f36f",
":ok:": u"\U0001f197",
":hearts:": u"\U00002665",
":passport_control:": u"\U0001f6c2",
":moyai:": u"\U0001f5ff",
":smile:": u"\U0001f604",
":tiger2:": u"\U0001f405",
":twisted_rightwards_arrows:": u"\U0001f500",
":children_crossing:": u"\U0001f6b8",
":cow:": u"\U0001f42e",
":point_up:": u"\U0000261d",
":house:": u"\U0001f3e0",
":man_with_turban:": u"\U0001f473",
":mountain_railway:": u"\U0001f69e",
":vibration_mode:": u"\U0001f4f3",
":blowfish:": u"\U0001f421",
":it:": u"\U0001f1ee\U0001f1f9",
":oden:": u"\U0001f362",
":clock3:": u"\U0001f552",
":lollipop:": u"\U0001f36d",
":train:": u"\U0001f68b",
":scissors:": u"\U00002702",
":triangular_ruler:": u"\U0001f4d0",
":wedding:": u"\U0001f492",
":flashlight:": u"\U0001f526",
":secret:": u"\U00003299",
":sushi:": u"\U0001f363",
":blue_car:": u"\U0001f699",
":cd:": u"\U0001f4bf",
":milky_way:": u"\U0001f30c",
":mortar_board:": u"\U0001f393",
":crown:": u"\U0001f451",
":speech_balloon:": u"\U0001f4ac",
":bento:": u"\U0001f371",
":grey_exclamation:": u"\U00002755",
":hotel:": u"\U0001f3e8",
":keycap_ten:": u"\U0001f51f",
":newspaper:": u"\U0001f4f0",
":outbox_tray:": u"\U0001f4e4",
":racehorse:": u"\U0001f40e",
":laughing:": u"\U0001f606",
":black_large_square:": u"\U00002b1b",
":books:": u"\U0001f4da",
":eight_spoked_asterisk:": u"\U00002733",
":heavy_check_mark:": u"\U00002714",
":m:": u"\U000024c2",
":wave:": u"\U0001f44b",
":bicyclist:": u"\U0001f6b4",
":cocktail:": u"\U0001f378",
":european_castle:": u"\U0001f3f0",
":point_down:": u"\U0001f447",
":tokyo_tower:": u"\U0001f5fc",
":battery:": u"\U0001f50b",
":dancer:": u"\U0001f483",
":repeat:": u"\U0001f501",
":ru:": u"\U0001f1f7\U0001f1fa",
":new_moon:": u"\U0001f311",
":church:": u"\U000026ea",
":date:": u"\U0001f4c5",
":earth_americas:": u"\U0001f30e",
":footprints:": u"\U0001f463",
":libra:": u"\U0000264e",
":mountain_cableway:": u"\U0001f6a0",
":small_red_triangle_down:": u"\U0001f53b",
":top:": u"\U0001f51d",
":sunglasses:": u"\U0001f60e",
":abcd:": u"\U0001f521",
":cl:": u"\U0001f191",
":ski:": u"\U0001f3bf",
":book:": u"\U0001f4d6",
":hourglass_flowing_sand:": u"\U000023f3",
":stuck_out_tongue_closed_eyes:": u"\U0001f61d",
":cold_sweat:": u"\U0001f630",
":headphones:": u"\U0001f3a7",
":confetti_ball:": u"\U0001f38a",
":gemini:": u"\U0000264a",
":new:": u"\U0001f195",
":pray:": u"\U0001f64f",
":watch:": u"\U0000231a",
":coffee:": u"\U00002615",
":ghost:": u"\U0001f47b",
":on:": u"\U0001f51b",
":pouch:": u"\U0001f45d",
":taxi:": u"\U0001f695",
":hocho:": u"\U0001f52a",
":yum:": u"\U0001f60b",
":heavy_plus_sign:": u"\U00002795",
":tada:": u"\U0001f389",
":arrow_heading_down:": u"\U00002935",
":clock530:": u"\U0001f560",
":poultry_leg:": u"\U0001f357",
":elephant:": u"\U0001f418",
":gb:": u"\U0001f1ec\U0001f1e7",
":mahjong:": u"\U0001f004",
":rice:": u"\U0001f35a",
":musical_note:": u"\U0001f3b5",
":beginner:": u"\U0001f530",
":small_red_triangle:": u"\U0001f53a",
":tomato:": u"\U0001f345",
":clock1130:": u"\U0001f566",
":japanese_castle:": u"\U0001f3ef",
":sun_with_face:": u"\U0001f31e",
":four:": u"\U00000034\U000020e3",
":microphone:": u"\U0001f3a4",
":tennis:": u"\U0001f3be",
":arrow_up_down:": u"\U00002195",
":cn:": u"\U0001f1e8\U0001f1f3",
":horse_racing:": u"\U0001f3c7",
":no_bicycles:": u"\U0001f6b3",
":snail:": u"\U0001f40c",
":free:": u"\U0001f193",
":beetle:": u"\U0001f41e",
":black_small_square:": u"\U000025aa",
":file_folder:": u"\U0001f4c1",
":hushed:": u"\U0001f62f",
":skull:": u"\U0001f480",
":ab:": u"\U0001f18e",
":rocket:": u"\U0001f680",
":sweet_potato:": u"\U0001f360",
":guitar:": u"\U0001f3b8",
":poodle:": u"\U0001f429",
":tulip:": u"\U0001f337",
":large_orange_diamond:": u"\U0001f536",
":-1:": u"\U0001f44e",
":chart_with_upwards_trend:": u"\U0001f4c8",
":de:": u"\U0001f1e9\U0001f1ea",
":grapes:": u"\U0001f347",
":ideograph_advantage:": u"\U0001f250",
":japanese_ogre:": u"\U0001f479",
":telephone:": u"\U0000260e",
":clock230:": u"\U0001f55d",
":hourglass:": u"\U0000231b",
":leftwards_arrow_with_hook:": u"\U000021a9",
":sparkler:": u"\U0001f387",
":black_joker:": u"\U0001f0cf",
":clock730:": u"\U0001f562",
":first_quarter_moon_with_face:": u"\U0001f31b",
":man:": u"\U0001f468",
":clock4:": u"\U0001f553",
":fishing_pole_and_fish:": u"\U0001f3a3",
":tophat:": u"\U0001f3a9",
":white_medium_square:": u"\U000025fb",
":mega:": u"\U0001f4e3",
":spaghetti:": u"\U0001f35d",
":dart:": u"\U0001f3af",
":girl:": u"\U0001f467",
":womans_hat:": u"\U0001f452",
":bullettrain_front:": u"\U0001f685",
":department_store:": u"\U0001f3ec",
":heartbeat:": u"\U0001f493",
":palm_tree:": u"\U0001f334",
":swimmer:": u"\U0001f3ca",
":yellow_heart:": u"\U0001f49b",
":arrow_upper_right:": u"\U00002197",
":clock2:": u"\U0001f551",
":high_heel:": u"\U0001f460",
":arrow_double_up:": u"\U000023eb",
":cry:": u"\U0001f622",
":dvd:": u"\U0001f4c0",
":e-mail:": u"\U0001f4e7",
":baby_bottle:": u"\U0001f37c",
":cool:": u"\U0001f192",
":floppy_disk:": u"\U0001f4be",
":iphone:": u"\U0001f4f1",
":minibus:": u"\U0001f690",
":rooster:": u"\U0001f413",
":three:": u"\U00000033\U000020e3",
":white_small_square:": u"\U000025ab",
":cancer:": u"\U0000264b",
":question:": u"\U00002753",
":sake:": u"\U0001f376",
":birthday:": u"\U0001f382",
":dog2:": u"\U0001f415",
":loudspeaker:": u"\U0001f4e2",
":arrow_up_small:": u"\U0001f53c",
":camel:": u"\U0001f42b",
":koala:": u"\U0001f428",
":mag_right:": u"\U0001f50e",
":soccer:": u"\U000026bd",
":bike:": u"\U0001f6b2",
":ear_of_rice:": u"\U0001f33e",
":shit:": u"\U0001f4a9",
":u7981:": u"\U0001f232",
":bath:": u"\U0001f6c0",
":baby:": u"\U0001f476",
":lock_with_ink_pen:": u"\U0001f50f",
":necktie:": u"\U0001f454",
":bikini:": u"\U0001f459",
":blush:": u"\U0001f60a",
":heartpulse:": u"\U0001f497",
":pig_nose:": u"\U0001f43d",
":straight_ruler:": u"\U0001f4cf",
":u6e80:": u"\U0001f235",
":gift:": u"\U0001f381",
":traffic_light:": u"\U0001f6a5",
":hibiscus:": u"\U0001f33a",
":couple_with_heart:": u"\U0001f491",
":pushpin:": u"\U0001f4cc",
":u6709:": u"\U0001f236",
":walking:": u"\U0001f6b6",
":grinning:": u"\U0001f600",
":hash:": u"\U00000023\U000020e3",
":radio_button:": u"\U0001f518",
":raised_hand:": u"\U0000270b",
":shaved_ice:": u"\U0001f367",
":barber:": u"\U0001f488",
":cat:": u"\U0001f431",
":heavy_exclamation_mark:": u"\U00002757",
":ice_cream:": u"\U0001f368",
":mask:": u"\U0001f637",
":pig2:": u"\U0001f416",
":triangular_flag_on_post:": u"\U0001f6a9",
":arrow_upper_left:": u"\U00002196",
":bee:": u"\U0001f41d",
":beer:": u"\U0001f37a",
":black_nib:": u"\U00002712",
":exclamation:": u"\U00002757",
":dog:": u"\U0001f436",
":fire:": u"\U0001f525",
":ant:": u"\U0001f41c",
":broken_heart:": u"\U0001f494",
":chart:": u"\U0001f4b9",
":clock1:": u"\U0001f550",
":bomb:": u"\U0001f4a3",
":virgo:": u"\U0000264d",
":a:": u"\U0001f170",
":fork_and_knife:": u"\U0001f374",
":copyright:": u"\U000000a9",
":curly_loop:": u"\U000027b0",
":full_moon:": u"\U0001f315",
":shoe:": u"\U0001f45e",
":european_post_office:": u"\U0001f3e4",
":ng:": u"\U0001f196",
":office:": u"\U0001f3e2",
":raising_hand:": u"\U0001f64b",
":revolving_hearts:": u"\U0001f49e",
":aquarius:": u"\U00002652",
":electric_plug:": u"\U0001f50c",
":meat_on_bone:": u"\U0001f356",
":mens:": u"\U0001f6b9",
":briefcase:": u"\U0001f4bc",
":ship:": u"\U0001f6a2",
":anchor:": u"\U00002693",
":ballot_box_with_check:": u"\U00002611",
":bear:": u"\U0001f43b",
":beers:": u"\U0001f37b",
":dromedary_camel:": u"\U0001f42a",
":nut_and_bolt:": u"\U0001f529",
":construction:": u"\U0001f6a7",
":golf:": u"\U000026f3",
":toilet:": u"\U0001f6bd",
":blue_book:": u"\U0001f4d8",
":boom:": u"\U0001f4a5",
":deciduous_tree:": u"\U0001f333",
":kissing_closed_eyes:": u"\U0001f61a",
":smiley_cat:": u"\U0001f63a",
":fuelpump:": u"\U000026fd",
":kiss:": u"\U0001f48b",
":clock10:": u"\U0001f559",
":sheep:": u"\U0001f411",
":white_flower:": u"\U0001f4ae",
":boar:": u"\U0001f417",
":currency_exchange:": u"\U0001f4b1",
":facepunch:": u"\U0001f44a",
":flower_playing_cards:": u"\U0001f3b4",
":person_frowning:": u"\U0001f64d",
":poop:": u"\U0001f4a9",
":satisfied:": u"\U0001f606",
":8ball:": u"\U0001f3b1",
":disappointed_relieved:": u"\U0001f625",
":panda_face:": u"\U0001f43c",
":ticket:": u"\U0001f3ab",
":us:": u"\U0001f1fa\U0001f1f8",
":waxing_crescent_moon:": u"\U0001f312",
":dragon:": u"\U0001f409",
":gun:": u"\U0001f52b",
":mount_fuji:": u"\U0001f5fb",
":new_moon_with_face:": u"\U0001f31a",
":star2:": u"\U0001f31f",
":grimacing:": u"\U0001f62c",
":confounded:": u"\U0001f616",
":congratulations:": u"\U00003297",
":custard:": u"\U0001f36e",
":frowning:": u"\U0001f626",
":maple_leaf:": u"\U0001f341",
":police_car:": u"\U0001f693",
":cloud:": u"\U00002601",
":jeans:": u"\U0001f456",
":fish:": u"\U0001f41f",
":wavy_dash:": u"\U00003030",
":clock5:": u"\U0001f554",
":santa:": u"\U0001f385",
":japan:": u"\U0001f5fe",
":oncoming_taxi:": u"\U0001f696",
":whale:": u"\U0001f433",
":arrow_forward:": u"\U000025b6",
":kissing_heart:": u"\U0001f618",
":bullettrain_side:": u"\U0001f684",
":fearful:": u"\U0001f628",
":moneybag:": u"\U0001f4b0",
":runner:": u"\U0001f3c3",
":mailbox:": u"\U0001f4eb",
":sandal:": u"\U0001f461",
":zzz:": u"\U0001f4a4",
":apple:": u"\U0001f34e",
":arrow_heading_up:": u"\U00002934",
":family:": u"\U0001f46a",
":heavy_minus_sign:": u"\U00002796",
":saxophone:": u"\U0001f3b7",
":u5272:": u"\U0001f239",
":black_square_button:": u"\U0001f532",
":bouquet:": u"\U0001f490",
":love_letter:": u"\U0001f48c",
":metro:": u"\U0001f687",
":small_blue_diamond:": u"\U0001f539",
":thought_balloon:": u"\U0001f4ad",
":arrow_up:": u"\U00002b06",
":no_pedestrians:": u"\U0001f6b7",
":smirk:": u"\U0001f60f",
":blue_heart:": u"\U0001f499",
":large_blue_diamond:": u"\U0001f537",
":vs:": u"\U0001f19a",
":v:": u"\U0000270c",
":wheelchair:": u"\U0000267f",
":couplekiss:": u"\U0001f48f",
":tent:": u"\U000026fa",
":purple_heart:": u"\U0001f49c",
":relaxed:": u"\U0000263a",
":accept:": u"\U0001f251",
":green_heart:": u"\U0001f49a",
":pouting_cat:": u"\U0001f63e",
":tram:": u"\U0001f68a",
":bangbang:": u"\U0000203c",
":collision:": u"\U0001f4a5",
":convenience_store:": u"\U0001f3ea",
":person_with_blond_hair:": u"\U0001f471",
":uk:": u"\U0001f1ec\U0001f1e7",
":peach:": u"\U0001f351",
":tired_face:": u"\U0001f62b",
":bread:": u"\U0001f35e",
":mailbox_closed:": u"\U0001f4ea",
":open_mouth:": u"\U0001f62e",
":pig:": u"\U0001f437",
":put_litter_in_its_place:": u"\U0001f6ae",
":u7a7a:": u"\U0001f233",
":bulb:": u"\U0001f4a1",
":clock9:": u"\U0001f558",
":envelope_with_arrow:": u"\U0001f4e9",
":pisces:": u"\U00002653",
":baggage_claim:": u"\U0001f6c4",
":egg:": u"\U0001f373",
":sweat_smile:": u"\U0001f605",
":boat:": u"\U000026f5",
":fr:": u"\U0001f1eb\U0001f1f7",
":heavy_division_sign:": u"\U00002797",
":muscle:": u"\U0001f4aa",
":paw_prints:": u"\U0001f43e",
":arrow_left:": u"\U00002b05",
":black_circle:": u"\U000026ab",
":kissing_smiling_eyes:": u"\U0001f619",
":star:": u"\U00002b50",
":steam_locomotive:": u"\U0001f682",
":1234:": u"\U0001f522",
":clock130:": u"\U0001f55c",
":kr:": u"\U0001f1f0\U0001f1f7",
":monorail:": u"\U0001f69d",
":school:": u"\U0001f3eb",
":seven:": u"\U00000037\U000020e3",
":baby_chick:": u"\U0001f424",
":bridge_at_night:": u"\U0001f309",
":hotsprings:": u"\U00002668",
":rose:": u"\U0001f339",
":love_hotel:": u"\U0001f3e9",
":princess:": u"\U0001f478",
":ramen:": u"\U0001f35c",
":scroll:": u"\U0001f4dc",
":tropical_fish:": u"\U0001f420",
":heart_eyes_cat:": u"\U0001f63b",
":information_desk_person:": u"\U0001f481",
":mouse:": u"\U0001f42d",
":no_smoking:": u"\U0001f6ad",
":post_office:": u"\U0001f3e3",
":stars:": u"\U0001f320",
":arrow_double_down:": u"\U000023ec",
":unlock:": u"\U0001f513",
":arrow_backward:": u"\U000025c0",
":hand:": u"\U0000270b",
":hospital:": u"\U0001f3e5",
":ocean:": u"\U0001f30a",
":mountain_bicyclist:": u"\U0001f6b5",
":octopus:": u"\U0001f419",
":sos:": u"\U0001f198",
":dizzy_face:": u"\U0001f635",
":tongue:": u"\U0001f445",
":train2:": u"\U0001f686",
":checkered_flag:": u"\U0001f3c1",
":orange_book:": u"\U0001f4d9",
":sound:": u"\U0001f509",
":aerial_tramway:": u"\U0001f6a1",
":bell:": u"\U0001f514",
":dragon_face:": u"\U0001f432",
":flipper:": u"\U0001f42c",
":ok_woman:": u"\U0001f646",
":performing_arts:": u"\U0001f3ad",
":postal_horn:": u"\U0001f4ef",
":clock1030:": u"\U0001f565",
":email:": u"\U00002709",
":green_book:": u"\U0001f4d7",
":point_up_2:": u"\U0001f446",
":high_brightness:": u"\U0001f506",
":running_shirt_with_sash:": u"\U0001f3bd",
":bookmark:": u"\U0001f516",
":sob:": u"\U0001f62d",
":arrow_lower_right:": u"\U00002198",
":point_left:": u"\U0001f448",
":purse:": u"\U0001f45b",
":sparkles:": u"\U00002728",
":black_medium_small_square:": u"\U000025fe",
":pound:": u"\U0001f4b7",
":rabbit:": u"\U0001f430",
":woman:": u"\U0001f469",
":negative_squared_cross_mark:": u"\U0000274e",
":open_book:": u"\U0001f4d6",
":smiling_imp:": u"\U0001f608",
":spades:": u"\U00002660",
":baseball:": u"\U000026be",
":fountain:": u"\U000026f2",
":joy:": u"\U0001f602",
":lipstick:": u"\U0001f484",
":partly_sunny:": u"\U000026c5",
":ram:": u"\U0001f40f",
":red_circle:": u"\U0001f534",
":cop:": u"\U0001f46e",
":green_apple:": u"\U0001f34f",
":registered:": u"\U000000ae",
":+1:": u"\U0001f44d",
":crying_cat_face:": u"\U0001f63f",
":innocent:": u"\U0001f607",
":mobile_phone_off:": u"\U0001f4f4",
":underage:": u"\U0001f51e",
":dolphin:": u"\U0001f42c",
":busts_in_silhouette:": u"\U0001f465",
":umbrella:": u"\U00002614",
":angel:": u"\U0001f47c",
":small_orange_diamond:": u"\U0001f538",
":sunflower:": u"\U0001f33b",
":link:": u"\U0001f517",
":notebook:": u"\U0001f4d3",
":oncoming_bus:": u"\U0001f68d",
":bookmark_tabs:": u"\U0001f4d1",
":calendar:": u"\U0001f4c6",
":izakaya_lantern:": u"\U0001f3ee",
":mans_shoe:": u"\U0001f45e",
":name_badge:": u"\U0001f4db",
":closed_lock_with_key:": u"\U0001f510",
":fist:": u"\U0000270a",
":id:": u"\U0001f194",
":ambulance:": u"\U0001f691",
":musical_keyboard:": u"\U0001f3b9",
":ribbon:": u"\U0001f380",
":seedling:": u"\U0001f331",
":tv:": u"\U0001f4fa",
":football:": u"\U0001f3c8",
":nail_care:": u"\U0001f485",
":seat:": u"\U0001f4ba",
":alarm_clock:": u"\U000023f0",
":money_with_wings:": u"\U0001f4b8",
":relieved:": u"\U0001f60c",
":womans_clothes:": u"\U0001f45a",
":lips:": u"\U0001f444",
":clubs:": u"\U00002663",
":house_with_garden:": u"\U0001f3e1",
":sunrise:": u"\U0001f305",
":monkey:": u"\U0001f412",
":six:": u"\U00000036\U000020e3",
":smiley:": u"\U0001f603",
":feet:": u"\U0001f43e",
":waning_gibbous_moon:": u"\U0001f316",
":yen:": u"\U0001f4b4",
":baby_symbol:": u"\U0001f6bc",
":signal_strength:": u"\U0001f4f6",
":boy:": u"\U0001f466",
":busstop:": u"\U0001f68f",
":computer:": u"\U0001f4bb",
":night_with_stars:": u"\U0001f303",
":older_woman:": u"\U0001f475",
":parking:": u"\U0001f17f",
":trumpet:": u"\U0001f3ba",
":100:": u"\U0001f4af",
":sweat_drops:": u"\U0001f4a6",
":wc:": u"\U0001f6be",
":b:": u"\U0001f171",
":cupid:": u"\U0001f498",
":five:": u"\U00000035\U000020e3",
":part_alternation_mark:": u"\U0000303d",
":snowboarder:": u"\U0001f3c2",
":warning:": u"\U000026a0",
":white_large_square:": u"\U00002b1c",
":zap:": u"\U000026a1",
":arrow_down_small:": u"\U0001f53d",
":clock430:": u"\U0001f55f",
":expressionless:": u"\U0001f611",
":phone:": u"\U0000260e",
":roller_coaster:": u"\U0001f3a2",
":lemon:": u"\U0001f34b",
":one:": u"\U00000031\U000020e3",
":christmas_tree:": u"\U0001f384",
":hankey:": u"\U0001f4a9",
":hatched_chick:": u"\U0001f425",
":u7533:": u"\U0001f238",
":large_blue_circle:": u"\U0001f535",
":up:": u"\U0001f199",
":wine_glass:": u"\U0001f377",
":x:": u"\U0000274c",
":nose:": u"\U0001f443",
":rewind:": u"\U000023ea",
":two_hearts:": u"\U0001f495",
":envelope:": u"\U00002709",
":oncoming_automobile:": u"\U0001f698",
":ophiuchus:": u"\U000026ce",
":ring:": u"\U0001f48d",
":tropical_drink:": u"\U0001f379",
":turtle:": u"\U0001f422",
":crescent_moon:": u"\U0001f319",
":koko:": u"\U0001f201",
":microscope:": u"\U0001f52c",
":rugby_football:": u"\U0001f3c9",
":smoking:": u"\U0001f6ac",
":anger:": u"\U0001f4a2",
":aries:": u"\U00002648",
":city_sunset:": u"\U0001f306",
":clock1230:": u"\U0001f567",
":mailbox_with_no_mail:": u"\U0001f4ed",
":movie_camera:": u"\U0001f3a5",
":pager:": u"\U0001f4df",
":zero:": u"\U00000030\U000020e3",
":bank:": u"\U0001f3e6",
":eight_pointed_black_star:": u"\U00002734",
":knife:": u"\U0001f52a",
":u7121:": u"\U0001f21a",
":customs:": u"\U0001f6c3",
":melon:": u"\U0001f348",
":rowboat:": u"\U0001f6a3",
":corn:": u"\U0001f33d",
":eggplant:": u"\U0001f346",
":heart_decoration:": u"\U0001f49f",
":rotating_light:": u"\U0001f6a8",
":round_pushpin:": u"\U0001f4cd",
":cat2:": u"\U0001f408",
":chocolate_bar:": u"\U0001f36b",
":no_bell:": u"\U0001f515",
":radio:": u"\U0001f4fb",
":droplet:": u"\U0001f4a7",
":hamburger:": u"\U0001f354",
":fire_engine:": u"\U0001f692",
":heart:": u"\U00002764",
":potable_water:": u"\U0001f6b0",
":telephone_receiver:": u"\U0001f4de",
":dash:": u"\U0001f4a8",
":globe_with_meridians:": u"\U0001f310",
":guardsman:": u"\U0001f482",
":heavy_multiplication_x:": u"\U00002716",
":chart_with_downwards_trend:": u"\U0001f4c9",
":imp:": u"\U0001f47f",
":earth_asia:": u"\U0001f30f",
":mouse2:": u"\U0001f401",
":notebook_with_decorative_cover:": u"\U0001f4d4",
":telescope:": u"\U0001f52d",
":trolleybus:": u"\U0001f68e",
":card_index:": u"\U0001f4c7",
":euro:": u"\U0001f4b6",
":dollar:": u"\U0001f4b5",
":fax:": u"\U0001f4e0",
":mailbox_with_mail:": u"\U0001f4ec",
":raised_hands:": u"\U0001f64c",
":disappointed:": u"\U0001f61e",
":foggy:": u"\U0001f301",
":person_with_pouting_face:": u"\U0001f64e",
":statue_of_liberty:": u"\U0001f5fd",
":dolls:": u"\U0001f38e",
":light_rail:": u"\U0001f688",
":pencil:": u"\U0001f4dd",
":speak_no_evil:": u"\U0001f64a",
":calling:": u"\U0001f4f2",
":clock830:": u"\U0001f563",
":cow2:": u"\U0001f404",
":hear_no_evil:": u"\U0001f649",
":scream_cat:": u"\U0001f640",
":smile_cat:": u"\U0001f638",
":tractor:": u"\U0001f69c",
":clock11:": u"\U0001f55a",
":doughnut:": u"\U0001f369",
":hammer:": u"\U0001f528",
":loop:": u"\U000027bf",
":moon:": u"\U0001f314",
":soon:": u"\U0001f51c",
":cinema:": u"\U0001f3a6",
":factory:": u"\U0001f3ed",
":flushed:": u"\U0001f633",
":mute:": u"\U0001f507",
":neutral_face:": u"\U0001f610",
":scorpius:": u"\U0000264f",
":wolf:": u"\U0001f43a",
":clapper:": u"\U0001f3ac",
":joy_cat:": u"\U0001f639",
":pensive:": u"\U0001f614",
":sleeping:": u"\U0001f634",
":credit_card:": u"\U0001f4b3",
":leo:": u"\U0000264c",
":man_with_gua_pi_mao:": u"\U0001f472",
":open_hands:": u"\U0001f450",
":tea:": u"\U0001f375",
":arrow_down:": u"\U00002b07",
":nine:": u"\U00000039\U000020e3",
":punch:": u"\U0001f44a",
":slot_machine:": u"\U0001f3b0",
":clap:": u"\U0001f44f",
":information_source:": u"\U00002139",
":tiger:": u"\U0001f42f",
":city_sunrise:": u"\U0001f307",
":dango:": u"\U0001f361",
":thumbsdown:": u"\U0001f44e",
":u6307:": u"\U0001f22f",
":curry:": u"\U0001f35b",
":cherries:": u"\U0001f352",
":clock6:": u"\U0001f555",
":clock7:": u"\U0001f556",
":older_man:": u"\U0001f474",
":oncoming_police_car:": u"\U0001f694",
":syringe:": u"\U0001f489",
":heavy_dollar_sign:": u"\U0001f4b2",
":open_file_folder:": u"\U0001f4c2",
":arrow_right_hook:": u"\U000021aa",
":articulated_lorry:": u"\U0001f69b",
":dancers:": u"\U0001f46f",
":kissing_cat:": u"\U0001f63d",
":rainbow:": u"\U0001f308",
":u5408:": u"\U0001f234",
":boot:": u"\U0001f462",
":carousel_horse:": u"\U0001f3a0",
":fried_shrimp:": u"\U0001f364",
":lock:": u"\U0001f512",
":non-potable_water:": u"\U0001f6b1",
":o:": u"\U00002b55",
":persevere:": u"\U0001f623",
":diamond_shape_with_a_dot_inside:": u"\U0001f4a0",
":fallen_leaf:": u"\U0001f342",
":massage:": u"\U0001f486",
":volcano:": u"\U0001f30b",
":gem:": u"\U0001f48e",
":shower:": u"\U0001f6bf",
":speaker:": u"\U0001f508",
":last_quarter_moon_with_face:": u"\U0001f31c",
":mag:": u"\U0001f50d",
":anguished:": u"\U0001f627",
":monkey_face:": u"\U0001f435",
":sunny:": u"\U00002600",
":tangerine:": u"\U0001f34a",
":point_right:": u"\U0001f449",
":railway_car:": u"\U0001f683",
":triumph:": u"\U0001f624",
":two:": u"\U00000032\U000020e3",
":gift_heart:": u"\U0001f49d",
":ledger:": u"\U0001f4d2",
":sagittarius:": u"\U00002650",
":snowflake:": u"\U00002744",
":abc:": u"\U0001f524",
":horse:": u"\U0001f434",
":ok_hand:": u"\U0001f44c",
":video_camera:": u"\U0001f4f9",
":sparkling_heart:": u"\U0001f496",
":taurus:": u"\U00002649",
":frog:": u"\U0001f438",
":hamster:": u"\U0001f439",
":helicopter:": u"\U0001f681",
":fries:": u"\U0001f35f",
":mushroom:": u"\U0001f344",
":penguin:": u"\U0001f427",
":truck:": u"\U0001f69a",
":bar_chart:": u"\U0001f4ca",
":evergreen_tree:": u"\U0001f332",
":bow:": u"\U0001f647",
":clock12:": u"\U0001f55b",
":four_leaf_clover:": u"\U0001f340",
":inbox_tray:": u"\U0001f4e5",
":smirk_cat:": u"\U0001f63c",
":two_men_holding_hands:": u"\U0001f46c",
":water_buffalo:": u"\U0001f403",
":alien:": u"\U0001f47d",
":video_game:": u"\U0001f3ae",
":candy:": u"\U0001f36c",
":page_facing_up:": u"\U0001f4c4",
":watermelon:": u"\U0001f349",
":white_check_mark:": u"\U00002705",
":blossom:": u"\U0001f33c",
":crocodile:": u"\U0001f40a",
":no_mouth:": u"\U0001f636",
":o2:": u"\U0001f17e",
":shirt:": u"\U0001f455",
":clock8:": u"\U0001f557",
":eyes:": u"\U0001f440",
":rabbit2:": u"\U0001f407",
":tanabata_tree:": u"\U0001f38b",
":wrench:": u"\U0001f527",
":es:": u"\U0001f1ea\U0001f1f8",
":trophy:": u"\U0001f3c6",
":two_women_holding_hands:": u"\U0001f46d",
":clock630:": u"\U0001f561",
":pineapple:": u"\U0001f34d",
":stuck_out_tongue:": u"\U0001f61b",
":angry:": u"\U0001f620",
":athletic_shoe:": u"\U0001f45f",
":cookie:": u"\U0001f36a",
":flags:": u"\U0001f38f",
":game_die:": u"\U0001f3b2",
":bird:": u"\U0001f426",
":jack_o_lantern:": u"\U0001f383",
":ox:": u"\U0001f402",
":paperclip:": u"\U0001f4ce",
":sleepy:": u"\U0001f62a",
":astonished:": u"\U0001f632",
":back:": u"\U0001f519",
":closed_book:": u"\U0001f4d5",
":hatching_chick:": u"\U0001f423",
":arrows_clockwise:": u"\U0001f503",
":car:": u"\U0001f697",
":ear:": u"\U0001f442",
":haircut:": u"\U0001f487",
":icecream:": u"\U0001f366",
":bust_in_silhouette:": u"\U0001f464",
":diamonds:": u"\U00002666",
":no_good:": u"\U0001f645",
":pizza:": u"\U0001f355",
":chicken:": u"\U0001f414",
":eyeglasses:": u"\U0001f453",
":see_no_evil:": u"\U0001f648",
":earth_africa:": u"\U0001f30d",
":fireworks:": u"\U0001f386",
":page_with_curl:": u"\U0001f4c3",
":rice_ball:": u"\U0001f359",
":white_square_button:": u"\U0001f533",
":cake:": u"\U0001f370",
":red_car:": u"\U0001f697",
":tm:": u"\U00002122",
":unamused:": u"\U0001f612",
":fish_cake:": u"\U0001f365",
":key:": u"\U0001f511",
":speedboat:": u"\U0001f6a4",
":closed_umbrella:": u"\U0001f302",
":pear:": u"\U0001f350",
":satellite:": u"\U0001f4e1",
":scream:": u"\U0001f631",
":first_quarter_moon:": u"\U0001f313",
":jp:": u"\U0001f1ef\U0001f1f5",
":repeat_one:": u"\U0001f502",
":shell:": u"\U0001f41a",
":interrobang:": u"\U00002049",
":trident:": u"\U0001f531",
":u55b6:": u"\U0001f23a",
":atm:": u"\U0001f3e7",
":door:": u"\U0001f6aa",
":kissing:": u"\U0001f617",
":six_pointed_star:": u"\U0001f52f",
":thumbsup:": u"\U0001f44d",
":u6708:": u"\U0001f237",
":do_not_litter:": u"\U0001f6af",
":whale2:": u"\U0001f40b",
":school_satchel:": u"\U0001f392",
":cactus:": u"\U0001f335",
":clipboard:": u"\U0001f4cb",
":dizzy:": u"\U0001f4ab",
":waxing_gibbous_moon:": u"\U0001f314",
":camera:": u"\U0001f4f7",
":capital_abcd:": u"\U0001f520",
":leaves:": u"\U0001f343",
":left_luggage:": u"\U0001f6c5",
":bamboo:": u"\U0001f38d",
":bowling:": u"\U0001f3b3",
":eight:": u"\U00000038\U000020e3",
":kimono:": u"\U0001f458",
":left_right_arrow:": u"\U00002194",
":stuck_out_tongue_winking_eye:": u"\U0001f61c",
":surfer:": u"\U0001f3c4",
":sweat:": u"\U0001f613",
":violin:": u"\U0001f3bb",
":postbox:": u"\U0001f4ee",
":bride_with_veil:": u"\U0001f470",
":recycle:": u"\U0000267b",
":station:": u"\U0001f689",
":vhs:": u"\U0001f4fc",
":crossed_flags:": u"\U0001f38c",
":memo:": u"\U0001f4dd",
":no_entry:": u"\U000026d4",
":white_circle:": u"\U000026aa",
":arrow_lower_left:": u"\U00002199",
":chestnut:": u"\U0001f330",
":crystal_ball:": u"\U0001f52e",
":last_quarter_moon:": u"\U0001f317",
":loud_sound:": u"\U0001f50a",
":strawberry:": u"\U0001f353",
":worried:": u"\U0001f61f",
":circus_tent:": u"\U0001f3aa",
":weary:": u"\U0001f629",
":bathtub:": u"\U0001f6c1",
":snake:": u"\U0001f40d",
":grin:": u"\U0001f601",
":symbols:": u"\U0001f523",
":airplane:": u"\U00002708",
":heart_eyes:": u"\U0001f60d",
":sailboat:": u"\U000026f5",
":stew:": u"\U0001f372",
":tshirt:": u"\U0001f455",
":rat:": u"\U0001f400",
":black_medium_square:": u"\U000025fc",
":clock930:": u"\U0001f564",
":full_moon_with_face:": u"\U0001f31d",
":japanese_goblin:": u"\U0001f47a",
":restroom:": u"\U0001f6bb",
":vertical_traffic_light:": u"\U0001f6a6",
":basketball:": u"\U0001f3c0",
":cherry_blossom:": u"\U0001f338",
":low_brightness:": u"\U0001f505",
":pill:": u"\U0001f48a",
}
| mit | -7,657,151,947,005,909,000 | 50.5902 | 80 | 0.372172 | false |
juneJuly/backfuzz | plugins/imap/imap.py | 3 | 1323 | from functions import *
"""IMAP Fuzzer"""
PROPERTY={}
PROPERTY['PROTOCOL']="IMAP"
PROPERTY['NAME']=": IMAP Fuzzer"
PROPERTY['DESC']="Fuzz an IMAP server"
PROPERTY['AUTHOR']='localh0t'
user_stage = ['. login']
pass_stage = ['. login [email protected]']
stage_1 = ['. list ""','. lsub ""', '. status INBOX','. examine','. select','. create','. delete', '. rename INBOX','. fetch 1','. store 1 flags', '. copy 1:2','. subscribe','. unsubscribe','. getquotaroot','. getacl']
stage_2 = ['. list', '. status','. rename','. fetch','. store 1','. copy','. lsub']
stage_3 = ['. store']
class FuzzerClass:
def fuzzer(self):
(username,password) = createUser()
# Stage 0
fuzzTCP()
# User Stage
sock = createSocketTCP(0,0)
fuzzCommands(sock,user_stage,"test","DoubleCommand")
# Pass Stage
sock = createSocketTCP(0,0)
fuzzCommands(sock,pass_stage,0,"SingleCommand")
# Stage 1
login = ". login " + str(username)
sock = createSocketTCP(0,0)
sendCredential(sock,login,password)
fuzzCommands(sock,stage_1,0,"SingleCommand")
# Stage 2
sock = createSocketTCP(0,0)
sendCredential(sock,login,password)
fuzzCommands(sock,stage_2,1,"DoubleCommand")
# Stage 3
sock = createSocketTCP(0,0)
sendCredential(sock,login,password)
fuzzCommands(sock,stage_3,"+flags NonJunk","DoubleCommand")
exitProgram(2) | gpl-3.0 | -8,654,868,588,709,718,000 | 32.948718 | 218 | 0.666667 | false |
tempbottle/kbengine | kbe/src/lib/python/Lib/idlelib/Percolator.py | 82 | 3244 | from idlelib.WidgetRedirector import WidgetRedirector
from idlelib.Delegator import Delegator
class Percolator:
def __init__(self, text):
# XXX would be nice to inherit from Delegator
self.text = text
self.redir = WidgetRedirector(text)
self.top = self.bottom = Delegator(text)
self.bottom.insert = self.redir.register("insert", self.insert)
self.bottom.delete = self.redir.register("delete", self.delete)
self.filters = []
def close(self):
while self.top is not self.bottom:
self.removefilter(self.top)
self.top = None
self.bottom.setdelegate(None); self.bottom = None
self.redir.close(); self.redir = None
self.text = None
def insert(self, index, chars, tags=None):
# Could go away if inheriting from Delegator
self.top.insert(index, chars, tags)
def delete(self, index1, index2=None):
# Could go away if inheriting from Delegator
self.top.delete(index1, index2)
def insertfilter(self, filter):
# Perhaps rename to pushfilter()?
assert isinstance(filter, Delegator)
assert filter.delegate is None
filter.setdelegate(self.top)
self.top = filter
def removefilter(self, filter):
# XXX Perhaps should only support popfilter()?
assert isinstance(filter, Delegator)
assert filter.delegate is not None
f = self.top
if f is filter:
self.top = filter.delegate
filter.setdelegate(None)
else:
while f.delegate is not filter:
assert f is not self.bottom
f.resetcache()
f = f.delegate
f.setdelegate(filter.delegate)
filter.setdelegate(None)
def _percolator(parent):
import tkinter as tk
import re
class Tracer(Delegator):
def __init__(self, name):
self.name = name
Delegator.__init__(self, None)
def insert(self, *args):
print(self.name, ": insert", args)
self.delegate.insert(*args)
def delete(self, *args):
print(self.name, ": delete", args)
self.delegate.delete(*args)
root = tk.Tk()
root.title("Test Percolator")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
text = tk.Text(root)
p = Percolator(text)
t1 = Tracer("t1")
t2 = Tracer("t2")
def toggle1():
if var1.get() == 0:
var1.set(1)
p.insertfilter(t1)
elif var1.get() == 1:
var1.set(0)
p.removefilter(t1)
def toggle2():
if var2.get() == 0:
var2.set(1)
p.insertfilter(t2)
elif var2.get() == 1:
var2.set(0)
p.removefilter(t2)
text.pack()
var1 = tk.IntVar()
cb1 = tk.Checkbutton(root, text="Tracer1", command=toggle1, variable=var1)
cb1.pack()
var2 = tk.IntVar()
cb2 = tk.Checkbutton(root, text="Tracer2", command=toggle2, variable=var2)
cb2.pack()
root.mainloop()
if __name__ == "__main__":
from idlelib.idle_test.htest import run
run(_percolator)
| lgpl-3.0 | 8,063,651,408,356,150,000 | 30.192308 | 78 | 0.577682 | false |
jbobotek/elcano | Vision/OpticalMouse/ADNS3080ImageGrabber.py | 4 | 6354 |
import serial
import string
import math
import time
from Tkinter import *
from threading import Timer
comPort = '/dev/ttyACM0' #default com port
comPortBaud = 38400
class App:
grid_size = 15
num_pixels = 30
image_started = FALSE
image_current_row = 0;
ser = serial.Serial(comPort, comPortBaud)
pixel_dictionary = {}
def __init__(self, master):
# set main window's title
master.title("ADNS3080ImageGrabber")
frame = Frame(master)
frame.grid(row=0,column=0)
self.comPortStr = StringVar()
self.comPort = Entry(frame,textvariable=self.comPortStr)
self.comPort.grid(row=0,column=0)
self.comPort.delete(0, END)
self.comPort.insert(0,comPort)
self.button = Button(frame, text="Open", fg="red", command=self.open_serial)
self.button.grid(row=0,column=1)
self.entryStr = StringVar()
self.entry = Entry(frame,textvariable=self.entryStr)
self.entry.grid(row=0,column=2)
self.entry.delete(0, END)
self.entry.insert(0,"I")
self.send_button = Button(frame, text="Send", command=self.send_to_serial)
self.send_button.grid(row=0,column=3)
self.canvas = Canvas(master, width=self.grid_size*self.num_pixels, height=self.grid_size*self.num_pixels)
self.canvas.grid(row=1)
## start attempts to read from serial port
self.read_loop()
def __del__(self):
self.stop_read_loop()
def open_serial(self):
# close the serial port
if( self.ser.isOpen() ):
try:
self.ser.close()
except:
i=i # do nothing
# open the serial port
try:
self.ser = serial.Serial(port=self.comPortStr.get(),baudrate=comPortBaud, timeout=1)
print("serial port '" + self.comPortStr.get() + "' opened!")
except:
print("failed to open serial port '" + self.comPortStr.get() + "'")
def send_to_serial(self):
if self.ser.isOpen():
self.ser.write(self.entryStr.get())
print "sent '" + self.entryStr.get() + "' to " + self.ser.portstr
else:
print "Serial port not open!"
def read_loop(self):
try:
self.t.cancel()
except:
aVar = 1 # do nothing
#print("reading")
if( self.ser.isOpen() ) :
self.read_from_serial();
self.t = Timer(0.0,self.read_loop)
self.t.start()
def stop_read_loop(self):
try:
self.t.cancel()
except:
print("failed to cancel timer")
# do nothing
def read_from_serial(self):
if( self.ser.isOpen() ):
while( self.ser.inWaiting() > 0 ):
self.line_processed = FALSE
line = self.ser.readline()
# process the line read
print("line starts")
if( line.find("-------------------------") == 0 ):
self.line_processed = TRUE
self.image_started = FALSE
self.image_current_row = 0
else:
self.image_started= TRUE
if( self.image_started == TRUE ):
if( self.image_current_row >= self.num_pixels ):
self.image_started == FALSE
else:
words = line.split()
if len(words) >= 30:
self.line_processed = TRUE
x = 0
for v in words:
try:
colour = int(v)
except:
colour = 0;
#self.display_pixel(x,self.image_current_row,colour)
self.display_pixel(self.num_pixels-1-self.image_current_row,self.num_pixels-1-x,colour)
x += 1
self.image_current_row += 1
else:
print("line " + str(self.image_current_row) + "incomplete (" + str(len(words)) + " of " + str(self.num_pixels) + "), ignoring")
#print("bad line: " + line);
if( line.find("image data") >= 0 ):
self.line_processed = TRUE
self.image_started = TRUE
self.image_current_row = 0
# clear canvas
#self.canvas.delete(ALL) # remove all items
#display the line if we couldn't understand it
# if( self.line_processed == FALSE ):
# print( line )
def display_default_image(self):
# display the grid
for x in range(0, self.num_pixels-1):
for y in range(0, self.num_pixels-1):
colour = x * y / 3.53
self.display_pixel(x,y,colour)
def display_pixel(self, x, y, colour):
if( x >= 0 and x < self.num_pixels and y >= 0 and y < self.num_pixels ) :
#find the old pixel if it exists and delete it
if self.pixel_dictionary.has_key(x+y*self.num_pixels) :
self.old_pixel = self.pixel_dictionary[x+y*self.num_pixels]
self.canvas.delete(self.old_pixel)
del(self.old_pixel)
fillColour = "#%02x%02x%02x" % (colour, colour, colour)
#draw a new pixel and add to pixel_array
self.new_pixel = self.canvas.create_rectangle(x*self.grid_size, y*self.grid_size, (x+1)*self.grid_size, (y+1)*self.grid_size, fill=fillColour)
self.pixel_dictionary[x+y*self.num_pixels] = self.new_pixel
## main loop ##
root = Tk()
#root.withdraw()
#serPort = SerialHandler(comPort,comPortBaud)
# create main display
app = App(root)
app.display_default_image()
print("entering main loop!")
root.mainloop()
app.stop_read_loop()
print("exiting")
| mit | 4,718,260,182,726,765,000 | 33.104972 | 155 | 0.489613 | false |
remyroy/uwsgi | contrib/runuwsgi.py | 17 | 2577 | import django
from django.core.management.base import BaseCommand
from django.conf import settings
import os
import sys
class Command(BaseCommand):
help = "Runs this project as a uWSGI application. Requires the uwsgi binary in system path."
http_port = '8000'
socket_addr = None
def handle(self, *args, **options):
for arg in args:
k, v = arg.split('=')
if k == 'http':
if self.http_port:
self.http_port = v
elif k == 'socket':
self.http_port = None
self.socket_addr = v
# load http and python plugin: first the specific version, otherwise try with the generic one
if self.http_port:
os.environ['UWSGI_PLUGINS'] = 'http,python%d%d:python' % (sys.version_info[0], sys.version_info[1])
else:
os.environ['UWSGI_PLUGINS'] = 'python%d%d:python' % (sys.version_info[0], sys.version_info[1])
# load the Django WSGI handler
os.environ['UWSGI_MODULE'] = 'django.core.handlers.wsgi:WSGIHandler()'
# DJANGO settings
if options['settings']:
os.environ['DJANGO_SETTINGS_MODULE'] = options['settings']
else:
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
# bind the http server to the default port
if self.http_port:
os.environ['UWSGI_HTTP'] = ':%s' % self.http_port
elif self.socket_addr:
os.environ['UWSGI_SOCKET'] = self.socket_addr
# map admin static files
os.environ['UWSGI_STATIC_MAP'] = '%s=%s' % (settings.ADMIN_MEDIA_PREFIX, os.path.join(django.__path__[0], 'contrib', 'admin', 'media'))
# remove sockets/pidfile at exit
os.environ['UWSGI_VACUUM'] = '1'
# retrieve/set the PythonHome
os.environ['UWSGI_PYHOME'] = sys.prefix
# increase buffer size a bit
os.environ['UWSGI_BUFFER_SIZE'] = '8192'
# add threads for concurrency
os.environ['UWSGI_THREADS'] = '8'
# enable the master process
os.environ['UWSGI_MASTER'] = '1'
# use uWSGI python module aliasing to fix the PYTHONPATH
os.environ['UWSGI_PYMODULE_ALIAS'] = '%s=./' % os.path.basename(os.getcwd())
# exec the uwsgi binary
os.execvp('uwsgi', ('uwsgi',))
def usage(self, subcomand):
return r"""
run this project on the uWSGI server
http=PORT run the embedded http server on port PORT
socket=ADDR bind the uwsgi server on address ADDR (this will disable the http server)
"""
| gpl-2.0 | -9,208,600,569,160,983,000 | 37.462687 | 143 | 0.597982 | false |
SimonSapin/servo | tests/wpt/web-platform-tests/tools/third_party/pytest/testing/test_compat.py | 30 | 2615 | from __future__ import absolute_import, division, print_function
import sys
import pytest
from _pytest.compat import is_generator, get_real_func, safe_getattr
from _pytest.outcomes import OutcomeException
def test_is_generator():
def zap():
yield
def foo():
pass
assert is_generator(zap)
assert not is_generator(foo)
def test_real_func_loop_limit():
class Evil(object):
def __init__(self):
self.left = 1000
def __repr__(self):
return "<Evil left={left}>".format(left=self.left)
def __getattr__(self, attr):
if not self.left:
raise RuntimeError("its over")
self.left -= 1
return self
evil = Evil()
with pytest.raises(ValueError):
res = get_real_func(evil)
print(res)
@pytest.mark.skipif(
sys.version_info < (3, 4), reason="asyncio available in Python 3.4+"
)
def test_is_generator_asyncio(testdir):
testdir.makepyfile(
"""
from _pytest.compat import is_generator
import asyncio
@asyncio.coroutine
def baz():
yield from [1,2,3]
def test_is_generator_asyncio():
assert not is_generator(baz)
"""
)
# avoid importing asyncio into pytest's own process,
# which in turn imports logging (#8)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 passed*"])
@pytest.mark.skipif(
sys.version_info < (3, 5), reason="async syntax available in Python 3.5+"
)
def test_is_generator_async_syntax(testdir):
testdir.makepyfile(
"""
from _pytest.compat import is_generator
def test_is_generator_py35():
async def foo():
await foo()
async def bar():
pass
assert not is_generator(foo)
assert not is_generator(bar)
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
class ErrorsHelper(object):
@property
def raise_exception(self):
raise Exception("exception should be catched")
@property
def raise_fail(self):
pytest.fail("fail should be catched")
def test_helper_failures():
helper = ErrorsHelper()
with pytest.raises(Exception):
helper.raise_exception
with pytest.raises(OutcomeException):
helper.raise_fail
def test_safe_getattr():
helper = ErrorsHelper()
assert safe_getattr(helper, "raise_exception", "default") == "default"
assert safe_getattr(helper, "raise_fail", "default") == "default"
| mpl-2.0 | 3,648,248,295,932,064,300 | 22.772727 | 77 | 0.604207 | false |
dvliman/jaikuengine | .google_appengine/lib/django-1.2/django/contrib/gis/sitemaps/views.py | 45 | 4353 | from django.http import HttpResponse, Http404
from django.template import loader
from django.contrib.sites.models import get_current_site
from django.core import urlresolvers
from django.core.paginator import EmptyPage, PageNotAnInteger
from django.contrib.gis.db.models.fields import GeometryField
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.models import get_model
from django.utils.encoding import smart_str
from django.contrib.gis.shortcuts import render_to_kml, render_to_kmz
def index(request, sitemaps):
"""
This view generates a sitemap index that uses the proper view
for resolving geographic section sitemap URLs.
"""
current_site = get_current_site(request)
sites = []
protocol = request.is_secure() and 'https' or 'http'
for section, site in sitemaps.items():
if callable(site):
pages = site().paginator.num_pages
else:
pages = site.paginator.num_pages
sitemap_url = urlresolvers.reverse('django.contrib.gis.sitemaps.views.sitemap', kwargs={'section': section})
sites.append('%s://%s%s' % (protocol, current_site.domain, sitemap_url))
if pages > 1:
for page in range(2, pages+1):
sites.append('%s://%s%s?p=%s' % (protocol, current_site.domain, sitemap_url, page))
xml = loader.render_to_string('sitemap_index.xml', {'sitemaps': sites})
return HttpResponse(xml, mimetype='application/xml')
def sitemap(request, sitemaps, section=None):
"""
This view generates a sitemap with additional geographic
elements defined by Google.
"""
maps, urls = [], []
if section is not None:
if section not in sitemaps:
raise Http404("No sitemap available for section: %r" % section)
maps.append(sitemaps[section])
else:
maps = sitemaps.values()
page = request.GET.get("p", 1)
current_site = get_current_site(request)
for site in maps:
try:
if callable(site):
urls.extend(site().get_urls(page=page, site=current_site))
else:
urls.extend(site.get_urls(page=page, site=current_site))
except EmptyPage:
raise Http404("Page %s empty" % page)
except PageNotAnInteger:
raise Http404("No page '%s'" % page)
xml = smart_str(loader.render_to_string('gis/sitemaps/geo_sitemap.xml', {'urlset': urls}))
return HttpResponse(xml, mimetype='application/xml')
def kml(request, label, model, field_name=None, compress=False, using=DEFAULT_DB_ALIAS):
"""
This view generates KML for the given app label, model, and field name.
The model's default manager must be GeoManager, and the field name
must be that of a geographic field.
"""
placemarks = []
klass = get_model(label, model)
if not klass:
raise Http404('You must supply a valid app label and module name. Got "%s.%s"' % (label, model))
if field_name:
try:
info = klass._meta.get_field_by_name(field_name)
if not isinstance(info[0], GeometryField):
raise Exception
except:
raise Http404('Invalid geometry field.')
connection = connections[using]
if connection.ops.postgis:
# PostGIS will take care of transformation.
placemarks = klass._default_manager.using(using).kml(field_name=field_name)
else:
# There's no KML method on Oracle or MySQL, so we use the `kml`
# attribute of the lazy geometry instead.
placemarks = []
if connection.ops.oracle:
qs = klass._default_manager.using(using).transform(4326, field_name=field_name)
else:
qs = klass._default_manager.using(using).all()
for mod in qs:
setattr(mod, 'kml', getattr(mod, field_name).kml)
placemarks.append(mod)
# Getting the render function and rendering to the correct.
if compress:
render = render_to_kmz
else:
render = render_to_kml
return render('gis/kml/placemarks.kml', {'places' : placemarks})
def kmz(request, label, model, field_name=None, using=DEFAULT_DB_ALIAS):
"""
This view returns KMZ for the given app label, model, and field name.
"""
return kml(request, label, model, field_name, compress=True, using=using)
| apache-2.0 | -3,984,534,456,684,396,500 | 38.216216 | 116 | 0.648289 | false |
hrashk/sympy | sympy/assumptions/refine.py | 7 | 6862 | from __future__ import print_function, division
from sympy.core import S, Add, Expr
from sympy.assumptions import Q, ask
from sympy.core.logic import fuzzy_not
def refine(expr, assumptions=True):
"""
Simplify an expression using assumptions.
Gives the form of expr that would be obtained if symbols
in it were replaced by explicit numerical expressions satisfying
the assumptions.
Examples
========
>>> from sympy import refine, sqrt, Q
>>> from sympy.abc import x
>>> refine(sqrt(x**2), Q.real(x))
Abs(x)
>>> refine(sqrt(x**2), Q.positive(x))
x
"""
if not expr.is_Atom:
args = [refine(arg, assumptions) for arg in expr.args]
# TODO: this will probably not work with Integral or Polynomial
expr = expr.func(*args)
name = expr.__class__.__name__
handler = handlers_dict.get(name, None)
if handler is None:
return expr
new_expr = handler(expr, assumptions)
if (new_expr is None) or (expr == new_expr):
return expr
if not isinstance(new_expr, Expr):
return new_expr
return refine(new_expr, assumptions)
def refine_abs(expr, assumptions):
"""
Handler for the absolute value.
Examples
========
>>> from sympy import Symbol, Q, refine, Abs
>>> from sympy.assumptions.refine import refine_abs
>>> from sympy.abc import x
>>> refine_abs(Abs(x), Q.real(x))
>>> refine_abs(Abs(x), Q.positive(x))
x
>>> refine_abs(Abs(x), Q.negative(x))
-x
"""
arg = expr.args[0]
if ask(Q.real(arg), assumptions) and \
fuzzy_not(ask(Q.negative(arg), assumptions)):
# if it's nonnegative
return arg
if ask(Q.negative(arg), assumptions):
return -arg
def refine_Pow(expr, assumptions):
"""
Handler for instances of Pow.
>>> from sympy import Symbol, Q
>>> from sympy.assumptions.refine import refine_Pow
>>> from sympy.abc import x,y,z
>>> refine_Pow((-1)**x, Q.real(x))
>>> refine_Pow((-1)**x, Q.even(x))
1
>>> refine_Pow((-1)**x, Q.odd(x))
-1
For powers of -1, even parts of the exponent can be simplified:
>>> refine_Pow((-1)**(x+y), Q.even(x))
(-1)**y
>>> refine_Pow((-1)**(x+y+z), Q.odd(x) & Q.odd(z))
(-1)**y
>>> refine_Pow((-1)**(x+y+2), Q.odd(x))
(-1)**(y + 1)
>>> refine_Pow((-1)**(x+3), True)
(-1)**(x + 1)
"""
from sympy.core import Pow, Rational
from sympy.functions.elementary.complexes import Abs
from sympy.functions import sign
if isinstance(expr.base, Abs):
if ask(Q.real(expr.base.args[0]), assumptions) and \
ask(Q.even(expr.exp), assumptions):
return expr.base.args[0] ** expr.exp
if ask(Q.real(expr.base), assumptions):
if expr.base.is_number:
if ask(Q.even(expr.exp), assumptions):
return abs(expr.base) ** expr.exp
if ask(Q.odd(expr.exp), assumptions):
return sign(expr.base) * abs(expr.base) ** expr.exp
if isinstance(expr.exp, Rational):
if type(expr.base) is Pow:
return abs(expr.base.base) ** (expr.base.exp * expr.exp)
if expr.base is S.NegativeOne:
if expr.exp.is_Add:
old = expr
# For powers of (-1) we can remove
# - even terms
# - pairs of odd terms
# - a single odd term + 1
# - A numerical constant N can be replaced with mod(N,2)
coeff, terms = expr.exp.as_coeff_add()
terms = set(terms)
even_terms = set([])
odd_terms = set([])
initial_number_of_terms = len(terms)
for t in terms:
if ask(Q.even(t), assumptions):
even_terms.add(t)
elif ask(Q.odd(t), assumptions):
odd_terms.add(t)
terms -= even_terms
if len(odd_terms) % 2:
terms -= odd_terms
new_coeff = (coeff + S.One) % 2
else:
terms -= odd_terms
new_coeff = coeff % 2
if new_coeff != coeff or len(terms) < initial_number_of_terms:
terms.add(new_coeff)
expr = expr.base**(Add(*terms))
# Handle (-1)**((-1)**n/2 + m/2)
e2 = 2*expr.exp
if ask(Q.even(e2), assumptions):
if e2.could_extract_minus_sign():
e2 *= expr.base
if e2.is_Add:
i, p = e2.as_two_terms()
if p.is_Pow and p.base is S.NegativeOne:
if ask(Q.integer(p.exp), assumptions):
i = (i + 1)/2
if ask(Q.even(i), assumptions):
return expr.base**p.exp
elif ask(Q.odd(i), assumptions):
return expr.base**(p.exp + 1)
else:
return expr.base**(p.exp + i)
if old != expr:
return expr
def refine_exp(expr, assumptions):
"""
Handler for exponential function.
>>> from sympy import Symbol, Q, exp, I, pi
>>> from sympy.assumptions.refine import refine_exp
>>> from sympy.abc import x
>>> refine_exp(exp(pi*I*2*x), Q.real(x))
>>> refine_exp(exp(pi*I*2*x), Q.integer(x))
1
"""
arg = expr.args[0]
if arg.is_Mul:
coeff = arg.as_coefficient(S.Pi*S.ImaginaryUnit)
if coeff:
if ask(Q.integer(2*coeff), assumptions):
if ask(Q.even(coeff), assumptions):
return S.One
elif ask(Q.odd(coeff), assumptions):
return S.NegativeOne
elif ask(Q.even(coeff + S.Half), assumptions):
return -S.ImaginaryUnit
elif ask(Q.odd(coeff + S.Half), assumptions):
return S.ImaginaryUnit
def refine_Relational(expr, assumptions):
"""
Handler for Relational
>>> from sympy.assumptions.refine import refine_Relational
>>> from sympy.assumptions.ask import Q
>>> from sympy.abc import x
>>> refine_Relational(x<0, ~Q.is_true(x<0))
False
"""
return ask(Q.is_true(expr), assumptions)
handlers_dict = {
'Abs': refine_abs,
'Pow': refine_Pow,
'exp': refine_exp,
'Equality' : refine_Relational,
'Unequality' : refine_Relational,
'GreaterThan' : refine_Relational,
'LessThan' : refine_Relational,
'StrictGreaterThan' : refine_Relational,
'StrictLessThan' : refine_Relational
}
| bsd-3-clause | 5,804,006,793,148,654,000 | 30.768519 | 78 | 0.518071 | false |
bwrsandman/GitPython | git/test/test_commit.py | 12 | 12863 | # -*- coding: utf-8 -*-
# test_commit.py
# Copyright (C) 2008, 2009 Michael Trier ([email protected]) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from __future__ import print_function
from git.test.lib import (
TestBase,
assert_equal,
assert_not_equal,
with_rw_repo,
fixture_path,
StringProcessAdapter
)
from git import (
Commit,
Actor,
)
from gitdb import IStream
from gitdb.test.lib import with_rw_directory
from git.compat import (
string_types,
text_type
)
from git import Repo
from git.repo.fun import touch
from io import BytesIO
import time
import sys
import re
import os
def assert_commit_serialization(rwrepo, commit_id, print_performance_info=False):
"""traverse all commits in the history of commit identified by commit_id and check
if the serialization works.
:param print_performance_info: if True, we will show how fast we are"""
ns = 0 # num serializations
nds = 0 # num deserializations
st = time.time()
for cm in rwrepo.commit(commit_id).traverse():
nds += 1
# assert that we deserialize commits correctly, hence we get the same
# sha on serialization
stream = BytesIO()
cm._serialize(stream)
ns += 1
streamlen = stream.tell()
stream.seek(0)
istream = rwrepo.odb.store(IStream(Commit.type, streamlen, stream))
assert istream.hexsha == cm.hexsha.encode('ascii')
nc = Commit(rwrepo, Commit.NULL_BIN_SHA, cm.tree,
cm.author, cm.authored_date, cm.author_tz_offset,
cm.committer, cm.committed_date, cm.committer_tz_offset,
cm.message, cm.parents, cm.encoding)
assert nc.parents == cm.parents
stream = BytesIO()
nc._serialize(stream)
ns += 1
streamlen = stream.tell()
stream.seek(0)
# reuse istream
istream.size = streamlen
istream.stream = stream
istream.binsha = None
nc.binsha = rwrepo.odb.store(istream).binsha
# if it worked, we have exactly the same contents !
assert nc.hexsha == cm.hexsha
# END check commits
elapsed = time.time() - st
if print_performance_info:
print("Serialized %i and deserialized %i commits in %f s ( (%f, %f) commits / s"
% (ns, nds, elapsed, ns / elapsed, nds / elapsed), file=sys.stderr)
# END handle performance info
class TestCommit(TestBase):
def test_bake(self):
commit = self.rorepo.commit('2454ae89983a4496a445ce347d7a41c0bb0ea7ae')
# commits have no dict
self.failUnlessRaises(AttributeError, setattr, commit, 'someattr', 1)
commit.author # bake
assert_equal("Sebastian Thiel", commit.author.name)
assert_equal("[email protected]", commit.author.email)
assert commit.author == commit.committer
assert isinstance(commit.authored_date, int) and isinstance(commit.committed_date, int)
assert isinstance(commit.author_tz_offset, int) and isinstance(commit.committer_tz_offset, int)
assert commit.message == "Added missing information to docstrings of commit and stats module\n"
def test_stats(self):
commit = self.rorepo.commit('33ebe7acec14b25c5f84f35a664803fcab2f7781')
stats = commit.stats
def check_entries(d):
assert isinstance(d, dict)
for key in ("insertions", "deletions", "lines"):
assert key in d
# END assertion helper
assert stats.files
assert stats.total
check_entries(stats.total)
assert "files" in stats.total
for filepath, d in stats.files.items():
check_entries(d)
# END for each stated file
# assure data is parsed properly
michael = Actor._from_string("Michael Trier <[email protected]>")
assert commit.author == michael
assert commit.committer == michael
assert commit.authored_date == 1210193388
assert commit.committed_date == 1210193388
assert commit.author_tz_offset == 14400, commit.author_tz_offset
assert commit.committer_tz_offset == 14400, commit.committer_tz_offset
assert commit.message == "initial project\n"
def test_unicode_actor(self):
# assure we can parse unicode actors correctly
name = u"Üäöß ÄußÉ"
assert len(name) == 9
special = Actor._from_string(u"%s <[email protected]>" % name)
assert special.name == name
assert isinstance(special.name, text_type)
def test_traversal(self):
start = self.rorepo.commit("a4d06724202afccd2b5c54f81bcf2bf26dea7fff")
first = self.rorepo.commit("33ebe7acec14b25c5f84f35a664803fcab2f7781")
p0 = start.parents[0]
p1 = start.parents[1]
p00 = p0.parents[0]
p10 = p1.parents[0]
# basic branch first, depth first
dfirst = start.traverse(branch_first=False)
bfirst = start.traverse(branch_first=True)
assert next(dfirst) == p0
assert next(dfirst) == p00
assert next(bfirst) == p0
assert next(bfirst) == p1
assert next(bfirst) == p00
assert next(bfirst) == p10
# at some point, both iterations should stop
assert list(bfirst)[-1] == first
stoptraverse = self.rorepo.commit("254d04aa3180eb8b8daf7b7ff25f010cd69b4e7d").traverse(as_edge=True)
l = list(stoptraverse)
assert len(l[0]) == 2
# ignore self
assert next(start.traverse(ignore_self=False)) == start
# depth
assert len(list(start.traverse(ignore_self=False, depth=0))) == 1
# prune
assert next(start.traverse(branch_first=1, prune=lambda i, d: i == p0)) == p1
# predicate
assert next(start.traverse(branch_first=1, predicate=lambda i, d: i == p1)) == p1
# traversal should stop when the beginning is reached
self.failUnlessRaises(StopIteration, next, first.traverse())
# parents of the first commit should be empty ( as the only parent has a null
# sha )
assert len(first.parents) == 0
def test_iteration(self):
# we can iterate commits
all_commits = Commit.list_items(self.rorepo, self.rorepo.head)
assert all_commits
assert all_commits == list(self.rorepo.iter_commits())
# this includes merge commits
mcomit = self.rorepo.commit('d884adc80c80300b4cc05321494713904ef1df2d')
assert mcomit in all_commits
# we can limit the result to paths
ltd_commits = list(self.rorepo.iter_commits(paths='CHANGES'))
assert ltd_commits and len(ltd_commits) < len(all_commits)
# show commits of multiple paths, resulting in a union of commits
less_ltd_commits = list(Commit.iter_items(self.rorepo, 'master', paths=('CHANGES', 'AUTHORS')))
assert len(ltd_commits) < len(less_ltd_commits)
def test_iter_items(self):
# pretty not allowed
self.failUnlessRaises(ValueError, Commit.iter_items, self.rorepo, 'master', pretty="raw")
def test_rev_list_bisect_all(self):
"""
'git rev-list --bisect-all' returns additional information
in the commit header. This test ensures that we properly parse it.
"""
revs = self.rorepo.git.rev_list('933d23bf95a5bd1624fbcdf328d904e1fa173474',
first_parent=True,
bisect_all=True)
commits = Commit._iter_from_process_or_stream(self.rorepo, StringProcessAdapter(revs.encode('ascii')))
expected_ids = (
'7156cece3c49544abb6bf7a0c218eb36646fad6d',
'1f66cfbbce58b4b552b041707a12d437cc5f400a',
'33ebe7acec14b25c5f84f35a664803fcab2f7781',
'933d23bf95a5bd1624fbcdf328d904e1fa173474'
)
for sha1, commit in zip(expected_ids, commits):
assert_equal(sha1, commit.hexsha)
@with_rw_directory
def test_ambiguous_arg_iteration(self, rw_dir):
rw_repo = Repo.init(os.path.join(rw_dir, 'test_ambiguous_arg'))
path = os.path.join(rw_repo.working_tree_dir, 'master')
touch(path)
rw_repo.index.add([path])
rw_repo.index.commit('initial commit')
list(rw_repo.iter_commits(rw_repo.head.ref)) # should fail unless bug is fixed
def test_count(self):
assert self.rorepo.tag('refs/tags/0.1.5').commit.count() == 143
def test_list(self):
# This doesn't work anymore, as we will either attempt getattr with bytes, or compare 20 byte string
# with actual 20 byte bytes. This usage makes no sense anyway
assert isinstance(Commit.list_items(self.rorepo, '0.1.5', max_count=5)[
'5117c9c8a4d3af19a9958677e45cda9269de1541'], Commit)
def test_str(self):
commit = Commit(self.rorepo, Commit.NULL_BIN_SHA)
assert_equal(Commit.NULL_HEX_SHA, str(commit))
def test_repr(self):
commit = Commit(self.rorepo, Commit.NULL_BIN_SHA)
assert_equal('<git.Commit "%s">' % Commit.NULL_HEX_SHA, repr(commit))
def test_equality(self):
commit1 = Commit(self.rorepo, Commit.NULL_BIN_SHA)
commit2 = Commit(self.rorepo, Commit.NULL_BIN_SHA)
commit3 = Commit(self.rorepo, "\1" * 20)
assert_equal(commit1, commit2)
assert_not_equal(commit2, commit3)
def test_iter_parents(self):
# should return all but ourselves, even if skip is defined
c = self.rorepo.commit('0.1.5')
for skip in (0, 1):
piter = c.iter_parents(skip=skip)
first_parent = next(piter)
assert first_parent != c
assert first_parent == c.parents[0]
# END for each
def test_name_rev(self):
name_rev = self.rorepo.head.commit.name_rev
assert isinstance(name_rev, string_types)
@with_rw_repo('HEAD', bare=True)
def test_serialization(self, rwrepo):
# create all commits of our repo
assert_commit_serialization(rwrepo, '0.1.6')
def test_serialization_unicode_support(self):
assert Commit.default_encoding.lower() == 'utf-8'
# create a commit with unicode in the message, and the author's name
# Verify its serialization and deserialization
cmt = self.rorepo.commit('0.1.6')
assert isinstance(cmt.message, text_type) # it automatically decodes it as such
assert isinstance(cmt.author.name, text_type) # same here
cmt.message = u"üäêèß"
assert len(cmt.message) == 5
cmt.author.name = u"äüß"
assert len(cmt.author.name) == 3
cstream = BytesIO()
cmt._serialize(cstream)
cstream.seek(0)
assert len(cstream.getvalue())
ncmt = Commit(self.rorepo, cmt.binsha)
ncmt._deserialize(cstream)
assert cmt.author.name == ncmt.author.name
assert cmt.message == ncmt.message
# actually, it can't be printed in a shell as repr wants to have ascii only
# it appears
cmt.author.__repr__()
def test_gpgsig(self):
cmt = self.rorepo.commit()
cmt._deserialize(open(fixture_path('commit_with_gpgsig'), 'rb'))
fixture_sig = """-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.11 (GNU/Linux)
iQIcBAABAgAGBQJRk8zMAAoJEG5mS6x6i9IjsTEP/0v2Wx/i7dqyKban6XMIhVdj
uI0DycfXqnCCZmejidzeao+P+cuK/ZAA/b9fU4MtwkDm2USvnIOrB00W0isxsrED
sdv6uJNa2ybGjxBolLrfQcWutxGXLZ1FGRhEvkPTLMHHvVriKoNFXcS7ewxP9MBf
NH97K2wauqA+J4BDLDHQJgADCOmLrGTAU+G1eAXHIschDqa6PZMH5nInetYZONDh
3SkOOv8VKFIF7gu8X7HC+7+Y8k8U0TW0cjlQ2icinwCc+KFoG6GwXS7u/VqIo1Yp
Tack6sxIdK7NXJhV5gAeAOMJBGhO0fHl8UUr96vGEKwtxyZhWf8cuIPOWLk06jA0
g9DpLqmy/pvyRfiPci+24YdYRBua/vta+yo/Lp85N7Hu/cpIh+q5WSLvUlv09Dmo
TTTG8Hf6s3lEej7W8z2xcNZoB6GwXd8buSDU8cu0I6mEO9sNtAuUOHp2dBvTA6cX
PuQW8jg3zofnx7CyNcd3KF3nh2z8mBcDLgh0Q84srZJCPRuxRcp9ylggvAG7iaNd
XMNvSK8IZtWLkx7k3A3QYt1cN4y1zdSHLR2S+BVCEJea1mvUE+jK5wiB9S4XNtKm
BX/otlTa8pNE3fWYBxURvfHnMY4i3HQT7Bc1QjImAhMnyo2vJk4ORBJIZ1FTNIhJ
JzJMZDRLQLFvnzqZuCjE
=przd
-----END PGP SIGNATURE-----"""
assert cmt.gpgsig == fixture_sig
cmt.gpgsig = "<test\ndummy\nsig>"
assert cmt.gpgsig != fixture_sig
cstream = BytesIO()
cmt._serialize(cstream)
assert re.search(r"^gpgsig <test\n dummy\n sig>$", cstream.getvalue().decode('ascii'), re.MULTILINE)
cstream.seek(0)
cmt.gpgsig = None
cmt._deserialize(cstream)
assert cmt.gpgsig == "<test\ndummy\nsig>"
cmt.gpgsig = None
cstream = BytesIO()
cmt._serialize(cstream)
assert not re.search(r"^gpgsig ", cstream.getvalue().decode('ascii'), re.MULTILINE)
| bsd-3-clause | -8,771,092,237,728,249,000 | 36.24058 | 110 | 0.648272 | false |
shacker/django | tests/template_tests/filter_tests/test_striptags.py | 197 | 1632 | from django.template.defaultfilters import striptags
from django.test import SimpleTestCase
from django.utils.functional import lazystr
from django.utils.safestring import mark_safe
from ..utils import setup
class StriptagsTests(SimpleTestCase):
@setup({'striptags01': '{{ a|striptags }} {{ b|striptags }}'})
def test_striptags01(self):
output = self.engine.render_to_string(
'striptags01',
{
'a': '<a>x</a> <p><b>y</b></p>',
'b': mark_safe('<a>x</a> <p><b>y</b></p>'),
},
)
self.assertEqual(output, 'x y x y')
@setup({'striptags02': '{% autoescape off %}{{ a|striptags }} {{ b|striptags }}{% endautoescape %}'})
def test_striptags02(self):
output = self.engine.render_to_string(
'striptags02',
{
'a': '<a>x</a> <p><b>y</b></p>',
'b': mark_safe('<a>x</a> <p><b>y</b></p>'),
},
)
self.assertEqual(output, 'x y x y')
class FunctionTests(SimpleTestCase):
def test_strip(self):
self.assertEqual(
striptags('some <b>html</b> with <script>alert("You smell")</script> disallowed <img /> tags'),
'some html with alert("You smell") disallowed tags',
)
def test_non_string_input(self):
self.assertEqual(striptags(123), '123')
def test_strip_lazy_string(self):
self.assertEqual(
striptags(lazystr('some <b>html</b> with <script>alert("Hello")</script> disallowed <img /> tags')),
'some html with alert("Hello") disallowed tags',
)
| bsd-3-clause | -5,463,942,984,996,876,000 | 32.306122 | 112 | 0.554534 | false |
xq262144/hue | desktop/core/ext-py/pysaml2-2.4.0/example/idp2_repoze/modules/login.mako.py | 31 | 2690 | # -*- encoding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 6
_modified_time = 1367126126.936375
_template_filename='htdocs/login.mako'
_template_uri='login.mako'
_template_cache=cache.Cache(__name__, _modified_time)
_source_encoding='utf-8'
_exports = []
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, u'root.mako', _template_uri)
def render_body(context,**pageargs):
context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
redirect_uri = context.get('redirect_uri', UNDEFINED)
key = context.get('key', UNDEFINED)
action = context.get('action', UNDEFINED)
authn_reference = context.get('authn_reference', UNDEFINED)
login = context.get('login', UNDEFINED)
password = context.get('password', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 1
__M_writer(u'\n\n<h1>Please log in</h1>\n<p class="description">\n To register it\'s quite simple: enter a login and a password\n</p>\n\n<form action="')
# SOURCE LINE 8
__M_writer(unicode(action))
__M_writer(u'" method="post">\n <input type="hidden" name="key" value="')
# SOURCE LINE 9
__M_writer(unicode(key))
__M_writer(u'"/>\n <input type="hidden" name="authn_reference" value="')
# SOURCE LINE 10
__M_writer(unicode(authn_reference))
__M_writer(u'"/>\n <input type="hidden" name="redirect_uri" value="')
# SOURCE LINE 11
__M_writer(unicode(redirect_uri))
__M_writer(u'"/>\n\n <div class="label">\n <label for="login">Username</label>\n </div>\n <div>\n <input type="text" name="login" value="')
# SOURCE LINE 17
__M_writer(unicode(login))
__M_writer(u'"/><br/>\n </div>\n\n <div class="label">\n <label for="password">Password</label>\n </div>\n <div>\n <input type="password" name="password"\n value="')
# SOURCE LINE 25
__M_writer(unicode(password))
__M_writer(u'"/>\n </div>\n\n <input class="submit" type="submit" name="form.submitted" value="Log In"/>\n</form>\n')
return ''
finally:
context.caller_stack._pop_frame()
| apache-2.0 | -73,144,192,529,359,500 | 43.098361 | 213 | 0.60223 | false |
chirilo/mozillians | vendor-local/lib/python/tablib/packages/odf/elementtypes.py | 83 | 10218 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2008 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import *
# Inline element don't cause a box
# They are analogous to the HTML elements SPAN, B, I etc.
inline_elements = (
(TEXTNS,u'a'),
(TEXTNS,u'author-initials'),
(TEXTNS,u'author-name'),
(TEXTNS,u'bibliography-mark'),
(TEXTNS,u'bookmark-ref'),
(TEXTNS,u'chapter'),
(TEXTNS,u'character-count'),
(TEXTNS,u'conditional-text'),
(TEXTNS,u'creation-date'),
(TEXTNS,u'creation-time'),
(TEXTNS,u'creator'),
(TEXTNS,u'database-display'),
(TEXTNS,u'database-name'),
(TEXTNS,u'database-next'),
(TEXTNS,u'database-row-number'),
(TEXTNS,u'database-row-select'),
(TEXTNS,u'date'),
(TEXTNS,u'dde-connection'),
(TEXTNS,u'description'),
(TEXTNS,u'editing-cycles'),
(TEXTNS,u'editing-duration'),
(TEXTNS,u'execute-macro'),
(TEXTNS,u'expression'),
(TEXTNS,u'file-name'),
(TEXTNS,u'hidden-paragraph'),
(TEXTNS,u'hidden-text'),
(TEXTNS,u'image-count'),
(TEXTNS,u'initial-creator'),
(TEXTNS,u'keywords'),
(TEXTNS,u'measure'),
(TEXTNS,u'modification-date'),
(TEXTNS,u'modification-time'),
(TEXTNS,u'note-ref'),
(TEXTNS,u'object-count'),
(TEXTNS,u'page-continuation'),
(TEXTNS,u'page-count'),
(TEXTNS,u'page-number'),
(TEXTNS,u'page-variable-get'),
(TEXTNS,u'page-variable-set'),
(TEXTNS,u'paragraph-count'),
(TEXTNS,u'placeholder'),
(TEXTNS,u'print-date'),
(TEXTNS,u'printed-by'),
(TEXTNS,u'print-time'),
(TEXTNS,u'reference-ref'),
(TEXTNS,u'ruby'),
(TEXTNS,u'ruby-base'),
(TEXTNS,u'ruby-text'),
(TEXTNS,u'script'),
(TEXTNS,u'sender-city'),
(TEXTNS,u'sender-company'),
(TEXTNS,u'sender-country'),
(TEXTNS,u'sender-email'),
(TEXTNS,u'sender-fax'),
(TEXTNS,u'sender-firstname'),
(TEXTNS,u'sender-initials'),
(TEXTNS,u'sender-lastname'),
(TEXTNS,u'sender-phone-private'),
(TEXTNS,u'sender-phone-work'),
(TEXTNS,u'sender-position'),
(TEXTNS,u'sender-postal-code'),
(TEXTNS,u'sender-state-or-province'),
(TEXTNS,u'sender-street'),
(TEXTNS,u'sender-title'),
(TEXTNS,u'sequence'),
(TEXTNS,u'sequence-ref'),
(TEXTNS,u'sheet-name'),
(TEXTNS,u'span'),
(TEXTNS,u'subject'),
(TEXTNS,u'table-count'),
(TEXTNS,u'table-formula'),
(TEXTNS,u'template-name'),
(TEXTNS,u'text-input'),
(TEXTNS,u'time'),
(TEXTNS,u'title'),
(TEXTNS,u'user-defined'),
(TEXTNS,u'user-field-get'),
(TEXTNS,u'user-field-input'),
(TEXTNS,u'variable-get'),
(TEXTNS,u'variable-input'),
(TEXTNS,u'variable-set'),
(TEXTNS,u'word-count'),
)
# It is almost impossible to determine what elements are block elements.
# There are so many that don't fit the form
block_elements = (
(TEXTNS,u'h'),
(TEXTNS,u'p'),
(TEXTNS,u'list'),
(TEXTNS,u'list-item'),
(TEXTNS,u'section'),
)
declarative_elements = (
(OFFICENS,u'font-face-decls'),
(PRESENTATIONNS,u'date-time-decl'),
(PRESENTATIONNS,u'footer-decl'),
(PRESENTATIONNS,u'header-decl'),
(TABLENS,u'table-template'),
(TEXTNS,u'alphabetical-index-entry-template'),
(TEXTNS,u'alphabetical-index-source'),
(TEXTNS,u'bibliography-entry-template'),
(TEXTNS,u'bibliography-source'),
(TEXTNS,u'dde-connection-decls'),
(TEXTNS,u'illustration-index-entry-template'),
(TEXTNS,u'illustration-index-source'),
(TEXTNS,u'index-source-styles'),
(TEXTNS,u'index-title-template'),
(TEXTNS,u'note-continuation-notice-backward'),
(TEXTNS,u'note-continuation-notice-forward'),
(TEXTNS,u'notes-configuration'),
(TEXTNS,u'object-index-entry-template'),
(TEXTNS,u'object-index-source'),
(TEXTNS,u'sequence-decls'),
(TEXTNS,u'table-index-entry-template'),
(TEXTNS,u'table-index-source'),
(TEXTNS,u'table-of-content-entry-template'),
(TEXTNS,u'table-of-content-source'),
(TEXTNS,u'user-field-decls'),
(TEXTNS,u'user-index-entry-template'),
(TEXTNS,u'user-index-source'),
(TEXTNS,u'variable-decls'),
)
empty_elements = (
(ANIMNS,u'animate'),
(ANIMNS,u'animateColor'),
(ANIMNS,u'animateMotion'),
(ANIMNS,u'animateTransform'),
(ANIMNS,u'audio'),
(ANIMNS,u'param'),
(ANIMNS,u'set'),
(ANIMNS,u'transitionFilter'),
(CHARTNS,u'categories'),
(CHARTNS,u'data-point'),
(CHARTNS,u'domain'),
(CHARTNS,u'error-indicator'),
(CHARTNS,u'floor'),
(CHARTNS,u'grid'),
(CHARTNS,u'legend'),
(CHARTNS,u'mean-value'),
(CHARTNS,u'regression-curve'),
(CHARTNS,u'stock-gain-marker'),
(CHARTNS,u'stock-loss-marker'),
(CHARTNS,u'stock-range-line'),
(CHARTNS,u'symbol-image'),
(CHARTNS,u'wall'),
(DR3DNS,u'cube'),
(DR3DNS,u'extrude'),
(DR3DNS,u'light'),
(DR3DNS,u'rotate'),
(DR3DNS,u'sphere'),
(DRAWNS,u'contour-path'),
(DRAWNS,u'contour-polygon'),
(DRAWNS,u'equation'),
(DRAWNS,u'fill-image'),
(DRAWNS,u'floating-frame'),
(DRAWNS,u'glue-point'),
(DRAWNS,u'gradient'),
(DRAWNS,u'handle'),
(DRAWNS,u'hatch'),
(DRAWNS,u'layer'),
(DRAWNS,u'marker'),
(DRAWNS,u'opacity'),
(DRAWNS,u'page-thumbnail'),
(DRAWNS,u'param'),
(DRAWNS,u'stroke-dash'),
(FORMNS,u'connection-resource'),
(FORMNS,u'list-value'),
(FORMNS,u'property'),
(MANIFESTNS,u'algorithm'),
(MANIFESTNS,u'key-derivation'),
(METANS,u'auto-reload'),
(METANS,u'document-statistic'),
(METANS,u'hyperlink-behaviour'),
(METANS,u'template'),
(NUMBERNS,u'am-pm'),
(NUMBERNS,u'boolean'),
(NUMBERNS,u'day'),
(NUMBERNS,u'day-of-week'),
(NUMBERNS,u'era'),
(NUMBERNS,u'fraction'),
(NUMBERNS,u'hours'),
(NUMBERNS,u'minutes'),
(NUMBERNS,u'month'),
(NUMBERNS,u'quarter'),
(NUMBERNS,u'scientific-number'),
(NUMBERNS,u'seconds'),
(NUMBERNS,u'text-content'),
(NUMBERNS,u'week-of-year'),
(NUMBERNS,u'year'),
(OFFICENS,u'dde-source'),
(PRESENTATIONNS,u'date-time'),
(PRESENTATIONNS,u'footer'),
(PRESENTATIONNS,u'header'),
(PRESENTATIONNS,u'placeholder'),
(PRESENTATIONNS,u'play'),
(PRESENTATIONNS,u'show'),
(PRESENTATIONNS,u'sound'),
(SCRIPTNS,u'event-listener'),
(STYLENS,u'column'),
(STYLENS,u'column-sep'),
(STYLENS,u'drop-cap'),
(STYLENS,u'footnote-sep'),
(STYLENS,u'list-level-properties'),
(STYLENS,u'map'),
(STYLENS,u'ruby-properties'),
(STYLENS,u'table-column-properties'),
(STYLENS,u'tab-stop'),
(STYLENS,u'text-properties'),
(SVGNS,u'definition-src'),
(SVGNS,u'font-face-format'),
(SVGNS,u'font-face-name'),
(SVGNS,u'stop'),
(TABLENS,u'body'),
(TABLENS,u'cell-address'),
(TABLENS,u'cell-range-source'),
(TABLENS,u'change-deletion'),
(TABLENS,u'consolidation'),
(TABLENS,u'database-source-query'),
(TABLENS,u'database-source-sql'),
(TABLENS,u'database-source-table'),
(TABLENS,u'data-pilot-display-info'),
(TABLENS,u'data-pilot-field-reference'),
(TABLENS,u'data-pilot-group-member'),
(TABLENS,u'data-pilot-layout-info'),
(TABLENS,u'data-pilot-member'),
(TABLENS,u'data-pilot-sort-info'),
(TABLENS,u'data-pilot-subtotal'),
(TABLENS,u'dependency'),
(TABLENS,u'error-macro'),
(TABLENS,u'even-columns'),
(TABLENS,u'even-rows'),
(TABLENS,u'filter-condition'),
(TABLENS,u'first-column'),
(TABLENS,u'first-row'),
(TABLENS,u'highlighted-range'),
(TABLENS,u'insertion-cut-off'),
(TABLENS,u'iteration'),
(TABLENS,u'label-range'),
(TABLENS,u'last-column'),
(TABLENS,u'last-row'),
(TABLENS,u'movement-cut-off'),
(TABLENS,u'named-expression'),
(TABLENS,u'named-range'),
(TABLENS,u'null-date'),
(TABLENS,u'odd-columns'),
(TABLENS,u'odd-rows'),
(TABLENS,u'operation'),
(TABLENS,u'scenario'),
(TABLENS,u'sort-by'),
(TABLENS,u'sort-groups'),
(TABLENS,u'source-range-address'),
(TABLENS,u'source-service'),
(TABLENS,u'subtotal-field'),
(TABLENS,u'table-column'),
(TABLENS,u'table-source'),
(TABLENS,u'target-range-address'),
(TEXTNS,u'alphabetical-index-auto-mark-file'),
(TEXTNS,u'alphabetical-index-mark'),
(TEXTNS,u'alphabetical-index-mark-end'),
(TEXTNS,u'alphabetical-index-mark-start'),
(TEXTNS,u'bookmark'),
(TEXTNS,u'bookmark-end'),
(TEXTNS,u'bookmark-start'),
(TEXTNS,u'change'),
(TEXTNS,u'change-end'),
(TEXTNS,u'change-start'),
(TEXTNS,u'dde-connection-decl'),
(TEXTNS,u'index-entry-bibliography'),
(TEXTNS,u'index-entry-chapter'),
(TEXTNS,u'index-entry-link-end'),
(TEXTNS,u'index-entry-link-start'),
(TEXTNS,u'index-entry-page-number'),
(TEXTNS,u'index-entry-tab-stop'),
(TEXTNS,u'index-entry-text'),
(TEXTNS,u'index-source-style'),
(TEXTNS,u'line-break'),
(TEXTNS,u'page'),
(TEXTNS,u'reference-mark'),
(TEXTNS,u'reference-mark-end'),
(TEXTNS,u'reference-mark-start'),
(TEXTNS,u's'),
(TEXTNS,u'section-source'),
(TEXTNS,u'sequence-decl'),
(TEXTNS,u'soft-page-break'),
(TEXTNS,u'sort-key'),
(TEXTNS,u'tab'),
(TEXTNS,u'toc-mark'),
(TEXTNS,u'toc-mark-end'),
(TEXTNS,u'toc-mark-start'),
(TEXTNS,u'user-field-decl'),
(TEXTNS,u'user-index-mark'),
(TEXTNS,u'user-index-mark-end'),
(TEXTNS,u'user-index-mark-start'),
(TEXTNS,u'variable-decl')
)
| bsd-3-clause | -8,691,467,549,983,235,000 | 30.436923 | 80 | 0.632084 | false |
huichen-cs/learnsorting | quick_sort_unittest.py | 1 | 1092 | import unittest
from quick_sort_concept import quick_sort
class QuickSortTest(unittest.TestCase):
def test_quick_sort_random_1(self):
data = [4, 1, 10, 4, 4, 3, 9, 4, 1, 9]
expected = [1, 1, 3, 4, 4, 4, 4, 9, 9, 10]
output = quick_sort(data)
self.assertEqual(expected, output)
def test_quick_sort_random_2(self):
data = [10, 3, 10, 9, 7, 9, 6, 2, 7, 7]
expected = [2, 3, 6, 7, 7, 7, 9, 9, 10, 10]
output = quick_sort(data)
self.assertEqual(expected, output)
def test_quick_sort_sorted_asc(self):
data = [2, 3, 6, 7, 7, 7, 9, 9, 10, 10]
expected = [2, 3, 6, 7, 7, 7, 9, 9, 10, 10]
output = quick_sort(data)
self.assertEqual(expected, output)
def test_quick_sort_sorted_des(self):
data = [10, 10, 9, 9, 7, 7, 7, 6, 3, 2]
expected = [2, 3, 6, 7, 7, 7, 9, 9, 10, 10]
output = quick_sort(data)
self.assertEqual(expected, output)
if __name__ == "__main__":
unittest.main() | gpl-3.0 | -1,180,622,017,240,610,800 | 31.151515 | 51 | 0.504579 | false |
OpenTrons/opentrons_sdk | api/src/opentrons/system/wifi.py | 3 | 6702 | import hashlib
import logging
import os
import shutil
from typing import Generator, Optional, Dict, Any
from dataclasses import dataclass
from opentrons.config import CONFIG
from opentrons.system import nmcli
log = logging.getLogger(__name__)
class ConfigureArgsError(Exception):
pass
EAP_CONFIG_SHAPE = {
'options': [
{'name': method.qualified_name(),
'displayName': method.display_name(),
'options': [{k: v for k, v in arg.items()
if k in ['name',
'displayName',
'required',
'type']}
for arg in method.args()]}
for method in nmcli.EAP_TYPES]
}
@dataclass(frozen=True)
class Key:
directory: str
file: str
@dataclass(frozen=True)
class AddKeyResult:
created: bool
key: Key
def add_key(key_file_name: str, key_contents: bytes) -> AddKeyResult:
"""
Add a key file (for later use in EAP config) to the system.
"""
keys_dir = CONFIG['wifi_keys_dir']
hasher = hashlib.sha256()
hasher.update(key_contents)
key_hash = hasher.hexdigest()
if key_hash in os.listdir(keys_dir):
files = os.listdir(os.path.join(keys_dir, key_hash))
if files:
return AddKeyResult(created=False,
key=Key(directory=key_hash,
file=files[0]))
else:
log.warning(
"Key directory with nothing in it: {}"
.format(key_hash))
os.rmdir(os.path.join(keys_dir, key_hash))
key_hash_path = os.path.join(keys_dir, key_hash)
os.mkdir(key_hash_path)
with open(os.path.join(key_hash_path,
os.path.basename(key_file_name)), 'wb') as f:
f.write(key_contents)
return AddKeyResult(created=True,
key=Key(directory=key_hash,
file=key_file_name))
def list_keys() -> Generator[Key, None, None]:
"""
List wifi keys known to the system.
:return: A generator yielding Key objects
"""
keys_dir = CONFIG['wifi_keys_dir']
# TODO(mc, 2018-10-24): add last modified info to keys for sort purposes
for path in os.listdir(keys_dir):
full_path = os.path.join(keys_dir, path)
if os.path.isdir(full_path):
in_path = os.listdir(full_path)
if len(in_path) > 1:
log.warning("Garbage in key dir for key {}".format(path))
yield Key(directory=path,
file=in_path[0])
else:
log.warning("Garbage in wifi keys dir: {}".format(full_path))
def remove_key(requested_hash: str) -> Optional[str]:
"""
Try to delete key file
:param requested_hash: The hash to delete
:return: The name of the deleted file or None if not found
"""
keys_dir = CONFIG['wifi_keys_dir']
available_keys = os.listdir(keys_dir)
if requested_hash not in available_keys:
return None
key_path = os.path.join(keys_dir, requested_hash)
name = os.listdir(key_path)[0]
shutil.rmtree(key_path)
return name
def get_key_file(key: str) -> str:
"""
Get the full path of a key file
:param key: The key to look for
:return: the path
"""
keys_dir = CONFIG['wifi_keys_dir']
available_keys = os.listdir(keys_dir)
if key not in available_keys:
raise ConfigureArgsError(f'Key ID {key} is not valid on the system')
files_in_dir = os.listdir(os.path.join(keys_dir, key))
if len(files_in_dir) > 1:
raise OSError(
f'Key ID {key} has multiple files, try deleting and re-uploading'
)
return os.path.join(keys_dir, key, files_in_dir[0])
def _eap_check_no_extra_args(
config: Dict[str, Any], options: Any):
# options is an Any because the type annotation for EAP_CONFIG_SHAPE itself
# can’t quite express the type properly because of the inference from the
# dict annotation.
"""Check for args that are not required for this method (to aid debugging)
``config`` should be the user config.
``options`` should be the options sub-member for the eap method.
Before this method is called, the validity of the 'eapType' key should be
established.
"""
arg_names = [k for k in config.keys() if k != 'eapType']
valid_names = [o['name'] for o in options]
for an in arg_names:
if an not in valid_names:
raise ConfigureArgsError(
'Option {} is not valid for EAP method {}'
.format(an, config['eapType']))
def _eap_check_option_ok(opt: Dict[str, str], config: Dict[str, Any]):
"""
Check that a given EAP option is in the user config (if required)
and, if specified, is the right type.
``opt`` should be an options dict from EAP_CONFIG_SHAPE.
``config`` should be the user config dict.
Before this method is called, the validity of the eapType key should be
established.
"""
if opt['name'] not in config:
if opt['required']:
raise ConfigureArgsError(
'Required argument {} for eap method {} not present'
.format(opt['displayName'], config['eapType']))
else:
return
name = opt['name']
o_type = opt['type']
arg = config[name]
if name in config:
if o_type in ('string', 'password') and not isinstance(arg, str):
raise ConfigureArgsError('Option {} should be a str'
.format(name))
elif o_type == 'file' and not isinstance(arg, str):
raise ConfigureArgsError('Option {} must be a str'
.format(name))
def eap_check_config(eap_config: Dict[str, Any]) -> Dict[str, Any]:
"""Check the eap specific args, and replace values where needed."""
eap_type = eap_config.get('eapType')
for method in EAP_CONFIG_SHAPE['options']:
if method['name'] == eap_type:
options = method['options']
break
else:
raise ConfigureArgsError('EAP method {} is not valid'.format(eap_type))
_eap_check_no_extra_args(eap_config, options)
for opt in options: # type: ignore
# Ignoring most types to do with EAP_CONFIG_SHAPE because of issues
# wth type inference for dict comprehensions
_eap_check_option_ok(opt, eap_config)
if opt['type'] == 'file' and opt['name'] in eap_config:
# Special work for file: rewrite from key id to path
eap_config[opt['name']] = get_key_file(eap_config[opt['name']])
return eap_config
| apache-2.0 | 2,438,871,421,344,143,400 | 32.5 | 79 | 0.588507 | false |
fitermay/intellij-community | python/lib/Lib/site-packages/django/contrib/syndication/views.py | 87 | 8404 | import datetime
from django.conf import settings
from django.contrib.sites.models import get_current_site
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.http import HttpResponse, Http404
from django.template import loader, Template, TemplateDoesNotExist, RequestContext
from django.utils import feedgenerator, tzinfo
from django.utils.encoding import force_unicode, iri_to_uri, smart_unicode
from django.utils.html import escape
def add_domain(domain, url, secure=False):
if not (url.startswith('http://')
or url.startswith('https://')
or url.startswith('mailto:')):
# 'url' must already be ASCII and URL-quoted, so no need for encoding
# conversions here.
if secure:
protocol = 'https'
else:
protocol = 'http'
url = iri_to_uri(u'%s://%s%s' % (protocol, domain, url))
return url
class FeedDoesNotExist(ObjectDoesNotExist):
pass
class Feed(object):
feed_type = feedgenerator.DefaultFeed
title_template = None
description_template = None
def __call__(self, request, *args, **kwargs):
try:
obj = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
raise Http404('Feed object does not exist.')
feedgen = self.get_feed(obj, request)
response = HttpResponse(mimetype=feedgen.mime_type)
feedgen.write(response, 'utf-8')
return response
def item_title(self, item):
# Titles should be double escaped by default (see #6533)
return escape(force_unicode(item))
def item_description(self, item):
return force_unicode(item)
def item_link(self, item):
try:
return item.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured('Give your %s class a get_absolute_url() method, or define an item_link() method in your Feed class.' % item.__class__.__name__)
def __get_dynamic_attr(self, attname, obj, default=None):
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr):
# Check func_code.co_argcount rather than try/excepting the
# function and catching the TypeError, because something inside
# the function may raise the TypeError. This technique is more
# accurate.
if hasattr(attr, 'func_code'):
argcount = attr.func_code.co_argcount
else:
argcount = attr.__call__.func_code.co_argcount
if argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr
def feed_extra_kwargs(self, obj):
"""
Returns an extra keyword arguments dictionary that is used when
initializing the feed generator.
"""
return {}
def item_extra_kwargs(self, item):
"""
Returns an extra keyword arguments dictionary that is used with
the `add_item` call of the feed generator.
"""
return {}
def get_object(self, request, *args, **kwargs):
return None
def get_feed(self, obj, request):
"""
Returns a feedgenerator.DefaultFeed object, fully populated, for
this feed. Raises FeedDoesNotExist for invalid parameters.
"""
current_site = get_current_site(request)
link = self.__get_dynamic_attr('link', obj)
link = add_domain(current_site.domain, link, request.is_secure())
feed = self.feed_type(
title = self.__get_dynamic_attr('title', obj),
subtitle = self.__get_dynamic_attr('subtitle', obj),
link = link,
description = self.__get_dynamic_attr('description', obj),
language = settings.LANGUAGE_CODE.decode(),
feed_url = add_domain(
current_site.domain,
self.__get_dynamic_attr('feed_url', obj) or request.path,
request.is_secure(),
),
author_name = self.__get_dynamic_attr('author_name', obj),
author_link = self.__get_dynamic_attr('author_link', obj),
author_email = self.__get_dynamic_attr('author_email', obj),
categories = self.__get_dynamic_attr('categories', obj),
feed_copyright = self.__get_dynamic_attr('feed_copyright', obj),
feed_guid = self.__get_dynamic_attr('feed_guid', obj),
ttl = self.__get_dynamic_attr('ttl', obj),
**self.feed_extra_kwargs(obj)
)
title_tmp = None
if self.title_template is not None:
try:
title_tmp = loader.get_template(self.title_template)
except TemplateDoesNotExist:
pass
description_tmp = None
if self.description_template is not None:
try:
description_tmp = loader.get_template(self.description_template)
except TemplateDoesNotExist:
pass
for item in self.__get_dynamic_attr('items', obj):
if title_tmp is not None:
title = title_tmp.render(RequestContext(request, {'obj': item, 'site': current_site}))
else:
title = self.__get_dynamic_attr('item_title', item)
if description_tmp is not None:
description = description_tmp.render(RequestContext(request, {'obj': item, 'site': current_site}))
else:
description = self.__get_dynamic_attr('item_description', item)
link = add_domain(
current_site.domain,
self.__get_dynamic_attr('item_link', item),
request.is_secure(),
)
enc = None
enc_url = self.__get_dynamic_attr('item_enclosure_url', item)
if enc_url:
enc = feedgenerator.Enclosure(
url = smart_unicode(enc_url),
length = smart_unicode(self.__get_dynamic_attr('item_enclosure_length', item)),
mime_type = smart_unicode(self.__get_dynamic_attr('item_enclosure_mime_type', item))
)
author_name = self.__get_dynamic_attr('item_author_name', item)
if author_name is not None:
author_email = self.__get_dynamic_attr('item_author_email', item)
author_link = self.__get_dynamic_attr('item_author_link', item)
else:
author_email = author_link = None
pubdate = self.__get_dynamic_attr('item_pubdate', item)
if pubdate and not pubdate.tzinfo:
ltz = tzinfo.LocalTimezone(pubdate)
pubdate = pubdate.replace(tzinfo=ltz)
feed.add_item(
title = title,
link = link,
description = description,
unique_id = self.__get_dynamic_attr('item_guid', item, link),
enclosure = enc,
pubdate = pubdate,
author_name = author_name,
author_email = author_email,
author_link = author_link,
categories = self.__get_dynamic_attr('item_categories', item),
item_copyright = self.__get_dynamic_attr('item_copyright', item),
**self.item_extra_kwargs(item)
)
return feed
def feed(request, url, feed_dict=None):
"""Provided for backwards compatibility."""
import warnings
warnings.warn('The syndication feed() view is deprecated. Please use the '
'new class based view API.',
category=DeprecationWarning)
if not feed_dict:
raise Http404("No feeds are registered.")
try:
slug, param = url.split('/', 1)
except ValueError:
slug, param = url, ''
try:
f = feed_dict[slug]
except KeyError:
raise Http404("Slug %r isn't registered." % slug)
try:
feedgen = f(slug, request).get_feed(param)
except FeedDoesNotExist:
raise Http404("Invalid feed parameters. Slug %r is valid, but other parameters, or lack thereof, are not." % slug)
response = HttpResponse(mimetype=feedgen.mime_type)
feedgen.write(response, 'utf-8')
return response
| apache-2.0 | -7,074,891,198,409,065,000 | 37.728111 | 167 | 0.57901 | false |
alienity/three.js | utils/exporters/blender/addons/io_three/exporter/api/mesh.py | 124 | 23228 | """
Blender API for querying mesh data. Animation data is also
handled here since Three.js associates the animation (skeletal,
morph targets) with the geometry nodes.
"""
import operator
from bpy import data, types, context
from . import material, texture, animation
from . import object as object_
from .. import constants, utilities, logger, exceptions
def _mesh(func):
"""
:param func:
"""
def inner(name, *args, **kwargs):
"""
:param name:
:param *args:
:param **kwargs:
"""
if isinstance(name, types.Mesh):
mesh = name
else:
mesh = data.meshes[name]
return func(mesh, *args, **kwargs)
return inner
@_mesh
def skeletal_animation(mesh, options):
"""
:param mesh:
:param options:
:rtype: []
"""
logger.debug("mesh.animation(%s, %s)", mesh, options)
armature = _armature(mesh)
if not armature:
logger.warning("No armature found (%s)", mesh)
return []
anim_type = options.get(constants.ANIMATION)
# pose_position = armature.data.pose_position
dispatch = {
constants.POSE: animation.pose_animation,
constants.REST: animation.rest_animation
}
func = dispatch[anim_type]
# armature.data.pose_position = anim_type.upper()
animations = func(armature, options)
# armature.data.pose_position = pose_position
return animations
@_mesh
def bones(mesh, options):
"""
:param mesh:
:param options:
:rtype: [], {}
"""
logger.debug("mesh.bones(%s)", mesh)
armature = _armature(mesh)
if not armature:
return [], {}
anim_type = options.get(constants.ANIMATION)
# pose_position = armature.data.pose_position
if anim_type == constants.OFF:
logger.info("Animation type not set, defaulting "
"to using REST position for the armature.")
func = _rest_bones
# armature.data.pose_position = "REST"
else:
dispatch = {
constants.REST: _rest_bones,
constants.POSE: _pose_bones
}
logger.info("Using %s for the armature", anim_type)
func = dispatch[anim_type]
# armature.data.pose_position = anim_type.upper()
bones_, bone_map = func(armature)
# armature.data.pose_position = pose_position
return (bones_, bone_map)
@_mesh
def buffer_normal(mesh):
"""
:param mesh:
:rtype: []
"""
normals_ = []
for face in mesh.tessfaces:
vert_count = len(face.vertices)
if vert_count is not 3:
msg = "Non-triangulated face detected"
raise exceptions.BufferGeometryError(msg)
for vertex_index in face.vertices:
normal = mesh.vertices[vertex_index].normal
vector = (normal.x, normal.y, normal.z)
normals_.extend(vector)
return normals_
@_mesh
def buffer_position(mesh):
"""
:param mesh:
:rtype: []
"""
position = []
for face in mesh.tessfaces:
vert_count = len(face.vertices)
if vert_count is not 3:
msg = "Non-triangulated face detected"
raise exceptions.BufferGeometryError(msg)
for vertex_index in face.vertices:
vertex = mesh.vertices[vertex_index]
vector = (vertex.co.x, vertex.co.y, vertex.co.z)
position.extend(vector)
return position
@_mesh
def buffer_uv(mesh):
"""
:param mesh:
:rtype: []
"""
uvs_ = []
if len(mesh.uv_layers) is 0:
return uvs_
elif len(mesh.uv_layers) > 1:
# if memory serves me correctly buffer geometry
# only uses one UV layer
logger.warning("%s has more than 1 UV layer", mesh.name)
for uv_data in mesh.uv_layers[0].data:
uv_tuple = (uv_data.uv[0], uv_data.uv[1])
uvs_.extend(uv_tuple)
return uvs_
@_mesh
def faces(mesh, options):
"""
:param mesh:
:param options:
"""
logger.debug("mesh.faces(%s, %s)", mesh, options)
vertex_uv = len(mesh.uv_textures) > 0
has_colors = len(mesh.vertex_colors) > 0
logger.info("Has UVs = %s", vertex_uv)
logger.info("Has vertex colours = %s", has_colors)
opt_colours = options[constants.COLORS] and has_colors
opt_uvs = options[constants.UVS] and vertex_uv
opt_materials = options.get(constants.FACE_MATERIALS)
opt_normals = options[constants.NORMALS]
logger.debug("Vertex colours enabled = %s", opt_colours)
logger.debug("UVS enabled = %s", opt_uvs)
logger.debug("Materials enabled = %s", opt_materials)
logger.debug("Normals enabled = %s", opt_normals)
uv_layers = _uvs(mesh) if opt_uvs else None
vertex_normals = _normals(mesh) if opt_normals else None
vertex_colours = vertex_colors(mesh) if opt_colours else None
faces_data = []
colour_indices = {}
if vertex_colours:
logger.debug("Indexing colours")
for index, colour in enumerate(vertex_colours):
colour_indices[str(colour)] = index
normal_indices = {}
if vertex_normals:
logger.debug("Indexing normals")
for index, normal in enumerate(vertex_normals):
normal_indices[str(normal)] = index
logger.info("Parsing %d faces", len(mesh.tessfaces))
for face in mesh.tessfaces:
vert_count = len(face.vertices)
if vert_count not in (3, 4):
logger.error("%d vertices for face %d detected",
vert_count,
face.index)
raise exceptions.NGonError("ngons are not supported")
mat_index = face.material_index is not None and opt_materials
mask = {
constants.QUAD: vert_count is 4,
constants.MATERIALS: mat_index,
constants.UVS: False,
constants.NORMALS: False,
constants.COLORS: False
}
face_data = []
face_data.extend([v for v in face.vertices])
if mask[constants.MATERIALS]:
face_data.append(face.material_index)
# @TODO: this needs the same optimization as what
# was done for colours and normals
if uv_layers:
for index, uv_layer in enumerate(uv_layers):
layer = mesh.tessface_uv_textures[index]
for uv_data in layer.data[face.index].uv:
uv_tuple = (uv_data[0], uv_data[1])
face_data.append(uv_layer.index(uv_tuple))
mask[constants.UVS] = True
if vertex_normals:
for vertex in face.vertices:
normal = mesh.vertices[vertex].normal
normal = (normal.x, normal.y, normal.z)
face_data.append(normal_indices[str(normal)])
mask[constants.NORMALS] = True
if vertex_colours:
colours = mesh.tessface_vertex_colors.active.data[face.index]
for each in (colours.color1, colours.color2, colours.color3):
each = utilities.rgb2int(each)
face_data.append(colour_indices[str(each)])
mask[constants.COLORS] = True
if mask[constants.QUAD]:
colour = utilities.rgb2int(colours.color4)
face_data.append(colour_indices[str(colour)])
face_data.insert(0, utilities.bit_mask(mask))
faces_data.extend(face_data)
return faces_data
@_mesh
def morph_targets(mesh, options):
"""
:param mesh:
:param options:
"""
logger.debug("mesh.morph_targets(%s, %s)", mesh, options)
obj = object_.objects_using_mesh(mesh)[0]
original_frame = context.scene.frame_current
frame_step = options.get(constants.FRAME_STEP, 1)
scene_frames = range(context.scene.frame_start,
context.scene.frame_end+1,
frame_step)
morphs = []
for frame in scene_frames:
logger.info("Processing data at frame %d", frame)
context.scene.frame_set(frame, 0.0)
morphs.append([])
vertices_ = object_.extract_mesh(obj, options).vertices[:]
for vertex in vertices_:
morphs[-1].extend([vertex.co.x, vertex.co.y, vertex.co.z])
context.scene.frame_set(original_frame, 0.0)
morphs_detected = False
for index, each in enumerate(morphs):
if index is 0:
continue
morphs_detected = morphs[index-1] != each
if morphs_detected:
logger.info("Valid morph target data detected")
break
else:
logger.info("No valid morph data detected")
return []
manifest = []
for index, morph in enumerate(morphs):
manifest.append({
constants.NAME: 'animation_%06d' % index,
constants.VERTICES: morph
})
return manifest
@_mesh
def materials(mesh, options):
"""
:param mesh:
:param options:
"""
logger.debug("mesh.materials(%s, %s)", mesh, options)
indices = set([face.material_index for face in mesh.tessfaces])
material_sets = [(mesh.materials[index], index) for index in indices]
materials_ = []
maps = options.get(constants.MAPS)
mix = options.get(constants.MIX_COLORS)
use_colors = options.get(constants.COLORS)
logger.info("Colour mix is set to %s", mix)
logger.info("Vertex colours set to %s", use_colors)
for mat, index in material_sets:
try:
dbg_color = constants.DBG_COLORS[index]
except IndexError:
dbg_color = constants.DBG_COLORS[0]
logger.info("Compiling attributes for %s", mat.name)
attributes = {
constants.COLOR_AMBIENT: material.ambient_color(mat),
constants.COLOR_EMISSIVE: material.emissive_color(mat),
constants.SHADING: material.shading(mat),
constants.OPACITY: material.opacity(mat),
constants.TRANSPARENT: material.transparent(mat),
constants.VISIBLE: material.visible(mat),
constants.WIREFRAME: material.wireframe(mat),
constants.BLENDING: material.blending(mat),
constants.DEPTH_TEST: material.depth_test(mat),
constants.DEPTH_WRITE: material.depth_write(mat),
constants.DBG_NAME: mat.name,
constants.DBG_COLOR: dbg_color,
constants.DBG_INDEX: index
}
if use_colors:
colors = material.use_vertex_colors(mat)
attributes[constants.VERTEX_COLORS] = colors
if (use_colors and mix) or (not use_colors):
colors = material.diffuse_color(mat)
attributes[constants.COLOR_DIFFUSE] = colors
if attributes[constants.SHADING] == constants.PHONG:
logger.info("Adding specular attributes")
attributes.update({
constants.SPECULAR_COEF: material.specular_coef(mat),
constants.COLOR_SPECULAR: material.specular_color(mat)
})
if mesh.show_double_sided:
logger.info("Double sided is on")
attributes[constants.DOUBLE_SIDED] = True
materials_.append(attributes)
if not maps:
continue
diffuse = _diffuse_map(mat)
if diffuse:
logger.info("Diffuse map found")
attributes.update(diffuse)
light = _light_map(mat)
if light:
logger.info("Light map found")
attributes.update(light)
specular = _specular_map(mat)
if specular:
logger.info("Specular map found")
attributes.update(specular)
if attributes[constants.SHADING] == constants.PHONG:
normal = _normal_map(mat)
if normal:
logger.info("Normal map found")
attributes.update(normal)
bump = _bump_map(mat)
if bump:
logger.info("Bump map found")
attributes.update(bump)
return materials_
@_mesh
def normals(mesh):
"""
:param mesh:
:rtype: []
"""
logger.debug("mesh.normals(%s)", mesh)
normal_vectors = []
for vector in _normals(mesh):
normal_vectors.extend(vector)
return normal_vectors
@_mesh
def skin_weights(mesh, bone_map, influences):
"""
:param mesh:
:param bone_map:
:param influences:
"""
logger.debug("mesh.skin_weights(%s)", mesh)
return _skinning_data(mesh, bone_map, influences, 1)
@_mesh
def skin_indices(mesh, bone_map, influences):
"""
:param mesh:
:param bone_map:
:param influences:
"""
logger.debug("mesh.skin_indices(%s)", mesh)
return _skinning_data(mesh, bone_map, influences, 0)
@_mesh
def texture_registration(mesh):
"""
:param mesh:
"""
logger.debug("mesh.texture_registration(%s)", mesh)
materials_ = mesh.materials or []
registration = {}
funcs = (
(constants.MAP_DIFFUSE, material.diffuse_map),
(constants.SPECULAR_MAP, material.specular_map),
(constants.LIGHT_MAP, material.light_map),
(constants.BUMP_MAP, material.bump_map),
(constants.NORMAL_MAP, material.normal_map)
)
def _registration(file_path, file_name):
"""
:param file_path:
:param file_name:
"""
return {
'file_path': file_path,
'file_name': file_name,
'maps': []
}
logger.info("found %d materials", len(materials_))
for mat in materials_:
for (key, func) in funcs:
tex = func(mat)
if tex is None:
continue
logger.info("%s has texture %s", key, tex.name)
file_path = texture.file_path(tex)
file_name = texture.file_name(tex)
reg = registration.setdefault(
utilities.hash(file_path),
_registration(file_path, file_name))
reg["maps"].append(key)
return registration
@_mesh
def uvs(mesh):
"""
:param mesh:
:rtype: []
"""
logger.debug("mesh.uvs(%s)", mesh)
uvs_ = []
for layer in _uvs(mesh):
uvs_.append([])
logger.info("Parsing UV layer %d", len(uvs_))
for pair in layer:
uvs_[-1].extend(pair)
return uvs_
@_mesh
def vertex_colors(mesh):
"""
:param mesh:
"""
logger.debug("mesh.vertex_colors(%s)", mesh)
vertex_colours = []
try:
vertex_colour = mesh.tessface_vertex_colors.active.data
except AttributeError:
logger.info("No vertex colours found")
return
for face in mesh.tessfaces:
colours = (vertex_colour[face.index].color1,
vertex_colour[face.index].color2,
vertex_colour[face.index].color3,
vertex_colour[face.index].color4)
for colour in colours:
colour = utilities.rgb2int((colour.r, colour.g, colour.b))
if colour not in vertex_colours:
vertex_colours.append(colour)
return vertex_colours
@_mesh
def vertices(mesh):
"""
:param mesh:
:rtype: []
"""
logger.debug("mesh.vertices(%s)", mesh)
vertices_ = []
for vertex in mesh.vertices:
vertices_.extend((vertex.co.x, vertex.co.y, vertex.co.z))
return vertices_
def _normal_map(mat):
"""
:param mat:
"""
tex = material.normal_map(mat)
if tex is None:
return
logger.info("Found normal texture map %s", tex.name)
normal = {
constants.MAP_NORMAL:
texture.file_name(tex),
constants.MAP_NORMAL_FACTOR:
material.normal_scale(mat),
constants.MAP_NORMAL_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_NORMAL_WRAP: texture.wrap(tex),
constants.MAP_NORMAL_REPEAT: texture.repeat(tex)
}
return normal
def _bump_map(mat):
"""
:param mat:
"""
tex = material.bump_map(mat)
if tex is None:
return
logger.info("Found bump texture map %s", tex.name)
bump = {
constants.MAP_BUMP:
texture.file_name(tex),
constants.MAP_BUMP_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_BUMP_WRAP: texture.wrap(tex),
constants.MAP_BUMP_REPEAT: texture.repeat(tex),
constants.MAP_BUMP_SCALE:
material.bump_scale(mat),
}
return bump
def _specular_map(mat):
"""
:param mat:
"""
tex = material.specular_map(mat)
if tex is None:
return
logger.info("Found specular texture map %s", tex.name)
specular = {
constants.MAP_SPECULAR:
texture.file_name(tex),
constants.MAP_SPECULAR_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_SPECULAR_WRAP: texture.wrap(tex),
constants.MAP_SPECULAR_REPEAT: texture.repeat(tex)
}
return specular
def _light_map(mat):
"""
:param mat:
"""
tex = material.light_map(mat)
if tex is None:
return
logger.info("Found light texture map %s", tex.name)
light = {
constants.MAP_LIGHT:
texture.file_name(tex),
constants.MAP_LIGHT_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_LIGHT_WRAP: texture.wrap(tex),
constants.MAP_LIGHT_REPEAT: texture.repeat(tex)
}
return light
def _diffuse_map(mat):
"""
:param mat:
"""
tex = material.diffuse_map(mat)
if tex is None:
return
logger.info("Found diffuse texture map %s", tex.name)
diffuse = {
constants.MAP_DIFFUSE:
texture.file_name(tex),
constants.MAP_DIFFUSE_ANISOTROPY:
texture.anisotropy(tex),
constants.MAP_DIFFUSE_WRAP: texture.wrap(tex),
constants.MAP_DIFFUSE_REPEAT: texture.repeat(tex)
}
return diffuse
def _normals(mesh):
"""
:param mesh:
:rtype: []
"""
vectors = []
vectors_ = {}
for face in mesh.tessfaces:
for vertex_index in face.vertices:
normal = mesh.vertices[vertex_index].normal
vector = (normal.x, normal.y, normal.z)
str_vec = str(vector)
try:
vectors_[str_vec]
except KeyError:
vectors.append(vector)
vectors_[str_vec] = True
return vectors
def _uvs(mesh):
"""
:param mesh:
"""
uv_layers = []
for layer in mesh.uv_layers:
uv_layers.append([])
for uv_data in layer.data:
uv_tuple = (uv_data.uv[0], uv_data.uv[1])
if uv_tuple not in uv_layers[-1]:
uv_layers[-1].append(uv_tuple)
return uv_layers
def _armature(mesh):
"""
:param mesh:
"""
obj = object_.objects_using_mesh(mesh)[0]
armature = obj.find_armature()
if armature:
logger.info("Found armature %s for %s", armature.name, obj.name)
else:
logger.info("Found no armature for %s", obj.name)
return armature
def _skinning_data(mesh, bone_map, influences, array_index):
"""
:param mesh:
:param bone_map:
:param influences:
:param array_index:
"""
armature = _armature(mesh)
manifest = []
if not armature:
return manifest
obj = object_.objects_using_mesh(mesh)[0]
logger.debug("Skinned object found %s", obj.name)
for vertex in mesh.vertices:
bone_array = []
for group in vertex.groups:
bone_array.append((group.group, group.weight))
bone_array.sort(key=operator.itemgetter(1), reverse=True)
for index in range(influences):
if index >= len(bone_array):
manifest.append(0)
continue
name = obj.vertex_groups[bone_array[index][0]].name
for bone_index, bone in enumerate(armature.pose.bones):
if bone.name != name:
continue
if array_index is 0:
entry = bone_map.get(bone_index, -1)
else:
entry = bone_array[index][1]
manifest.append(entry)
break
else:
manifest.append(0)
return manifest
def _pose_bones(armature):
"""
:param armature:
:rtype: [], {}
"""
bones_ = []
bone_map = {}
bone_count = 0
armature_matrix = armature.matrix_world
for bone_count, pose_bone in enumerate(armature.pose.bones):
armature_bone = pose_bone.bone
bone_index = None
if armature_bone.parent is None:
bone_matrix = armature_matrix * armature_bone.matrix_local
bone_index = -1
else:
parent_bone = armature_bone.parent
parent_matrix = armature_matrix * parent_bone.matrix_local
bone_matrix = armature_matrix * armature_bone.matrix_local
bone_matrix = parent_matrix.inverted() * bone_matrix
bone_index = index = 0
for pose_parent in armature.pose.bones:
armature_parent = pose_parent.bone.name
if armature_parent == parent_bone.name:
bone_index = index
index += 1
bone_map[bone_count] = bone_count
pos, rot, scl = bone_matrix.decompose()
bones_.append({
constants.PARENT: bone_index,
constants.NAME: armature_bone.name,
constants.POS: (pos.x, pos.z, -pos.y),
constants.ROTQ: (rot.x, rot.z, -rot.y, rot.w),
constants.SCL: (scl.x, scl.z, scl.y)
})
return bones_, bone_map
def _rest_bones(armature):
"""
:param armature:
:rtype: [], {}
"""
bones_ = []
bone_map = {}
bone_count = 0
bone_index_rel = 0
for bone in armature.data.bones:
logger.info("Parsing bone %s", bone.name)
if not bone.use_deform:
logger.debug("Ignoring bone %s at: %d",
bone.name, bone_index_rel)
continue
if bone.parent is None:
bone_pos = bone.head_local
bone_index = -1
else:
bone_pos = bone.head_local - bone.parent.head_local
bone_index = 0
index = 0
for parent in armature.data.bones:
if parent.name == bone.parent.name:
bone_index = bone_map.get(index)
index += 1
bone_world_pos = armature.matrix_world * bone_pos
x_axis = bone_world_pos.x
y_axis = bone_world_pos.z
z_axis = -bone_world_pos.y
logger.debug("Adding bone %s at: %s, %s",
bone.name, bone_index, bone_index_rel)
bone_map[bone_count] = bone_index_rel
bone_index_rel += 1
# @TODO: the rotq probably should not have these
# hard coded values
bones_.append({
constants.PARENT: bone_index,
constants.NAME: bone.name,
constants.POS: (x_axis, y_axis, z_axis),
constants.ROTQ: (0, 0, 0, 1)
})
bone_count += 1
return (bones_, bone_map)
| mit | 5,771,859,406,008,319,000 | 24.275299 | 73 | 0.568107 | false |
andersonvom/python_koans | python3/koans/about_classes.py | 22 | 4779 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutClasses(Koan):
class Dog:
"Dogs need regular walkies. Never, ever let them drive."
def test_instances_of_classes_can_be_created_adding_parentheses(self):
# NOTE: The .__name__ attribute will convert the class
# into a string value.
fido = self.Dog()
self.assertEqual(__, fido.__class__.__name__)
def test_classes_have_docstrings(self):
self.assertRegexpMatches(self.Dog.__doc__, __)
# ------------------------------------------------------------------
class Dog2:
def __init__(self):
self._name = 'Paul'
def set_name(self, a_name):
self._name = a_name
def test_init_method_is_the_constructor(self):
dog = self.Dog2()
self.assertEqual(__, dog._name)
def test_private_attributes_are_not_really_private(self):
dog = self.Dog2()
dog.set_name("Fido")
self.assertEqual(__, dog._name)
# The _ prefix in _name implies private ownership, but nothing is truly
# private in Python.
def test_you_can_also_access_the_value_out_using_getattr_and_dict(self):
fido = self.Dog2()
fido.set_name("Fido")
self.assertEqual(__, getattr(fido, "_name"))
# getattr(), setattr() and delattr() are a way of accessing attributes
# by method rather than through assignment operators
self.assertEqual(__, fido.__dict__["_name"])
# Yes, this works here, but don't rely on the __dict__ object! Some
# class implementations use optimization which result in __dict__ not
# showing everything.
# ------------------------------------------------------------------
class Dog3:
def __init__(self):
self._name = None
def set_name(self, a_name):
self._name = a_name
def get_name(self):
return self._name
name = property(get_name, set_name)
def test_that_name_can_be_read_as_a_property(self):
fido = self.Dog3()
fido.set_name("Fido")
# access as method
self.assertEqual(__, fido.get_name())
# access as property
self.assertEqual(__, fido.name)
# ------------------------------------------------------------------
class Dog4:
def __init__(self):
self._name = None
@property
def name(self):
return self._name
@name.setter
def name(self, a_name):
self._name = a_name
def test_creating_properties_with_decorators_is_slightly_easier(self):
fido = self.Dog4()
fido.name = "Fido"
self.assertEqual(__, fido.name)
# ------------------------------------------------------------------
class Dog5:
def __init__(self, initial_name):
self._name = initial_name
@property
def name(self):
return self._name
def test_init_provides_initial_values_for_instance_variables(self):
fido = self.Dog5("Fido")
self.assertEqual(__, fido.name)
def test_args_must_match_init(self):
with self.assertRaises(___):
self.Dog5()
# THINK ABOUT IT:
# Why is this so?
def test_different_objects_have_difference_instance_variables(self):
fido = self.Dog5("Fido")
rover = self.Dog5("Rover")
self.assertEqual(__, rover.name == fido.name)
# ------------------------------------------------------------------
class Dog6:
def __init__(self, initial_name):
self._name = initial_name
def get_self(self):
return self
def __str__(self):
#
# Implement this!
#
return __
def __repr__(self):
return "<Dog named '" + self._name + "'>"
def test_inside_a_method_self_refers_to_the_containing_object(self):
fido = self.Dog6("Fido")
self.assertEqual(__, fido.get_self()) # Not a string!
def test_str_provides_a_string_version_of_the_object(self):
fido = self.Dog6("Fido")
self.assertEqual("Fido", str(fido))
def test_str_is_used_explicitly_in_string_interpolation(self):
fido = self.Dog6("Fido")
self.assertEqual(__, "My dog is " + str(fido))
def test_repr_provides_a_more_complete_string_version(self):
fido = self.Dog6("Fido")
self.assertEqual(__, repr(fido))
def test_all_objects_support_str_and_repr(self):
seq = [1, 2, 3]
self.assertEqual(__, str(seq))
self.assertEqual(__, repr(seq))
self.assertEqual(__, str("STRING"))
self.assertEqual(__, repr("STRING"))
| mit | -7,448,872,620,996,882,000 | 27.446429 | 79 | 0.521657 | false |
inspirehep/invenio | modules/bibcheck/lib/plugins/doi.py | 6 | 2215 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Bibcheck plugin add the DOIs (from crossref) """
from invenio.bibrecord import record_add_field
from invenio.crossrefutils import get_doi_for_records
from invenio.bibupload import find_record_from_doi
def check_records(records, doi_field="0247_a", extra_subfields=(("2", "DOI"), ("9", "bibcheck"))):
"""
Find the DOI for the records using crossref and add it to the specified
field.
This plugin won't ask for the DOI if it's already set.
"""
records_to_check = {}
for record in records:
has_doi = False
for position, value in record.iterfield("0247_2"):
if value.lower() == "doi":
has_doi = True
break
if not has_doi:
records_to_check[record.record_id] = record
dois = get_doi_for_records(records_to_check.values())
for record_id, doi in dois.iteritems():
record = records_to_check[record_id]
dup_doi_recid = find_record_from_doi(doi)
if dup_doi_recid:
record.warn("DOI %s to be added to record %s already exists in record/s %s" % (doi, record_id, dup_doi_recid))
continue
subfields = [(doi_field[5], doi.encode("utf-8"))] + map(tuple, extra_subfields)
record_add_field(record, tag=doi_field[:3], ind1=doi_field[3],
ind2=doi_field[4], subfields=subfields)
record.set_amended("Added DOI in field %s" % doi_field)
| gpl-2.0 | -324,546,823,726,813,630 | 40.018519 | 122 | 0.658691 | false |
otmaneJai/Zipline | zipline/utils/memoize.py | 7 | 2540 | """
Tools for memoization of function results.
"""
from functools import wraps
from six import iteritems
from weakref import WeakKeyDictionary
class lazyval(object):
"""
Decorator that marks that an attribute should not be computed until
needed, and that the value should be memoized.
Example
-------
>>> from zipline.utils.memoize import lazyval
>>> class C(object):
... def __init__(self):
... self.count = 0
... @lazyval
... def val(self):
... self.count += 1
... return "val"
...
>>> c = C()
>>> c.count
0
>>> c.val, c.count
('val', 1)
>>> c.val, c.count
('val', 1)
"""
def __init__(self, get):
self._get = get
self._cache = WeakKeyDictionary()
def __get__(self, instance, owner):
if instance is None:
return self
try:
return self._cache[instance]
except KeyError:
self._cache[instance] = val = self._get(instance)
return val
def remember_last(f):
"""
Decorator that remembers the last computed value of a function and doesn't
recompute it when called with the same inputs multiple times.
Parameters
----------
f : The function to be memoized. All arguments to f should be hashable.
Example
-------
>>> counter = 0
>>> @remember_last
... def foo(x):
... global counter
... counter += 1
... return x, counter
>>> foo(1)
(1, 1)
>>> foo(1)
(1, 1)
>>> foo(0)
(0, 2)
>>> foo(1)
(1, 3)
Notes
-----
This decorator is equivalent to `lru_cache(1)` in Python 3, but with less
bells and whistles for handling things like threadsafety. If we ever
decide we need such bells and whistles, we should just make functools32 a
dependency.
"""
# This needs to be a mutable data structure so we can change it from inside
# the function. In pure Python 3, we'd use the nonlocal keyword for this.
_previous = [None, None]
KEY, VALUE = 0, 1
_kwd_mark = object()
@wraps(f)
def memoized_f(*args, **kwds):
# Hashing logic taken from functools32.lru_cache.
key = args
if kwds:
key += _kwd_mark + tuple(sorted(iteritems(kwds)))
key_hash = hash(key)
if key_hash != _previous[KEY]:
_previous[VALUE] = f(*args, **kwds)
_previous[KEY] = key_hash
return _previous[VALUE]
return memoized_f
| apache-2.0 | 7,487,738,887,531,389,000 | 24.148515 | 79 | 0.554724 | false |
amousset/ansible | lib/ansible/template/safe_eval.py | 47 | 4154 | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import sys
from six.moves import builtins
from ansible import constants as C
from ansible.plugins import filter_loader, test_loader
def safe_eval(expr, locals={}, include_exceptions=False):
'''
This is intended for allowing things like:
with_items: a_list_variable
Where Jinja2 would return a string but we do not want to allow it to
call functions (outside of Jinja2, where the env is constrained). If
the input data to this function came from an untrusted (remote) source,
it should first be run through _clean_data_struct() to ensure the data
is further sanitized prior to evaluation.
Based on:
http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
'''
# this is the whitelist of AST nodes we are going to
# allow in the evaluation. Any node type other than
# those listed here will raise an exception in our custom
# visitor class defined below.
SAFE_NODES = set(
(
ast.Add,
ast.BinOp,
ast.Call,
ast.Compare,
ast.Dict,
ast.Div,
ast.Expression,
ast.List,
ast.Load,
ast.Mult,
ast.Num,
ast.Name,
ast.Str,
ast.Sub,
ast.Tuple,
ast.UnaryOp,
)
)
# AST node types were expanded after 2.6
if not sys.version.startswith('2.6'):
SAFE_NODES.union(
set(
(ast.Set,)
)
)
filter_list = []
for filter in filter_loader.all():
filter_list.extend(filter.filters().keys())
test_list = []
for test in test_loader.all():
test_list.extend(test.tests().keys())
CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list + test_list
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False):
if type(node) not in SAFE_NODES:
raise Exception("invalid expression (%s)" % expr)
elif isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Name) and inside_call:
if hasattr(builtins, node.id) and node.id not in CALL_WHITELIST:
raise Exception("invalid function: %s" % node.id)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(child_node, inside_call)
if not isinstance(expr, basestring):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (expr, None)
return expr
cnv = CleansingNodeVisitor()
try:
parsed_tree = ast.parse(expr, mode='eval')
cnv.visit(parsed_tree)
compiled = compile(parsed_tree, expr, 'eval')
result = eval(compiled, {}, locals)
if include_exceptions:
return (result, None)
else:
return result
except SyntaxError as e:
# special handling for syntax errors, we just return
# the expression string back as-is
if include_exceptions:
return (expr, None)
return expr
except Exception as e:
if include_exceptions:
return (expr, e)
return expr
| gpl-3.0 | 824,560,155,578,565,800 | 31.968254 | 98 | 0.62181 | false |
comptech/atrex | Software/gaussfitter.py | 1 | 23761 | """
===========
gaussfitter
===========
.. codeauthor:: Adam Ginsburg <[email protected]> 3/17/08
Latest version available at <http://code.google.com/p/agpy/source/browse/trunk/agpy/gaussfitter.py>
"""
import numpy
from numpy.ma import median
from numpy import pi
#from scipy import optimize,stats,pi
from scipy.optimize import curve_fit
from mpfit import mpfit
"""
Note about mpfit/leastsq:
I switched everything over to the Markwardt mpfit routine for a few reasons,
but foremost being the ability to set limits on parameters, not just force them
to be fixed. As far as I can tell, leastsq does not have that capability.
The version of mpfit I use can be found here:
http://code.google.com/p/agpy/source/browse/trunk/mpfit
.. todo::
-turn into a class instead of a collection of objects
-implement WCS-based gaussian fitting with correct coordinates
"""
def moments(data,circle,rotate,vheight,estimator=median,**kwargs):
"""Returns (height, amplitude, x, y, width_x, width_y, rotation angle)
the gaussian parameters of a 2D distribution by calculating its
moments. Depending on the input parameters, will only output
a subset of the above.
If using masked arrays, pass estimator=numpy.ma.median
"""
total = numpy.abs(data).sum()
Y, X = numpy.indices(data.shape) # python convention: reverse x,y numpy.indices
y = numpy.argmax((X*numpy.abs(data)).sum(axis=1)/total)
x = numpy.argmax((Y*numpy.abs(data)).sum(axis=0)/total)
col = data[int(y),:]
# FIRST moment, not second!
width_x = numpy.sqrt(numpy.abs((numpy.arange(col.size)-y)*col).sum()/numpy.abs(col).sum())
row = data[:, int(x)]
width_y = numpy.sqrt(numpy.abs((numpy.arange(row.size)-x)*row).sum()/numpy.abs(row).sum())
width = ( width_x + width_y ) / 2.
height = estimator(data.ravel())
amplitude = data.max()-height
mylist = [amplitude,x,y]
if numpy.isnan(width_y) or numpy.isnan(width_x) or numpy.isnan(height) or numpy.isnan(amplitude):
raise ValueError("something is nan")
if vheight==1:
mylist = [height] + mylist
if circle==0:
mylist = mylist + [width_x,width_y]
if rotate==1:
mylist = mylist + [0.] #rotation "moment" is just zero...
# also, circles don't rotate.
else:
mylist = mylist + [width]
return mylist
def twodgaussian(inpars, circle=False, rotate=True, vheight=True, shape=None):
"""Returns a 2d gaussian function of the form:
x' = numpy.cos(rota) * x - numpy.sin(rota) * y
y' = numpy.sin(rota) * x + numpy.cos(rota) * y
(rota should be in degrees)
g = b + a * numpy.exp ( - ( ((x-center_x)/width_x)**2 +
((y-center_y)/width_y)**2 ) / 2 )
inpars = [b,a,center_x,center_y,width_x,width_y,rota]
(b is background height, a is peak amplitude)
where x and y are the input parameters of the returned function,
and all other parameters are specified by this function
However, the above values are passed by list. The list should be:
inpars = (height,amplitude,center_x,center_y,width_x,width_y,rota)
You can choose to ignore / neglect some of the above input parameters
unumpy.sing the following options:
circle=0 - default is an elliptical gaussian (different x, y
widths), but can reduce the input by one parameter if it's a
circular gaussian
rotate=1 - default allows rotation of the gaussian ellipse. Can
remove last parameter by setting rotate=0
vheight=1 - default allows a variable height-above-zero, i.e. an
additive constant for the Gaussian function. Can remove first
parameter by setting this to 0
shape=None - if shape is set (to a 2-parameter list) then returns
an image with the gaussian defined by inpars
"""
inpars_old = inpars
inpars = list(inpars)
if vheight == 1:
height = inpars.pop(0)
height = float(height)
else:
height = float(0)
amplitude, center_y, center_x = inpars.pop(0),inpars.pop(0),inpars.pop(0)
amplitude = float(amplitude)
center_x = float(center_x)
center_y = float(center_y)
if circle == 1:
width = inpars.pop(0)
width_x = float(width)
width_y = float(width)
rotate = 0
else:
width_x, width_y = inpars.pop(0),inpars.pop(0)
width_x = float(width_x)
width_y = float(width_y)
if rotate == 1:
rota = inpars.pop(0)
rota = pi/180. * float(rota)
rcen_x = center_x * numpy.cos(rota) - center_y * numpy.sin(rota)
rcen_y = center_x * numpy.sin(rota) + center_y * numpy.cos(rota)
else:
rcen_x = center_x
rcen_y = center_y
if len(inpars) > 0:
raise ValueError("There are still input parameters:" + str(inpars) + \
" and you've input: " + str(inpars_old) + \
" circle=%d, rotate=%d, vheight=%d" % (circle,rotate,vheight) )
def rotgauss(x,y):
if rotate==1:
xp = x * numpy.cos(rota) - y * numpy.sin(rota)
yp = x * numpy.sin(rota) + y * numpy.cos(rota)
else:
xp = x
yp = y
g = height+amplitude*numpy.exp(
-(((rcen_x-xp)/width_x)**2+
((rcen_y-yp)/width_y)**2)/2.)
return g
if shape is not None:
return rotgauss(*numpy.indices(shape))
else:
return rotgauss
def gaussfit(data,err=None,params=(),autoderiv=True,return_all=False,circle=False,
fixed=numpy.repeat(False,7),limitedmin=[False,False,False,False,True,True,True],
limitedmax=[False,False,False,False,False,False,True],
usemoment=numpy.array([],dtype='bool'),
minpars=numpy.repeat(0,7),maxpars=[0,0,0,0,0,0,360],
rotate=1,vheight=1,quiet=True,returnmp=False,
returnfitimage=False,**kwargs):
"""
Gaussian fitter with the ability to fit a variety of different forms of
2-dimensional gaussian.
Input Parameters:
data - 2-dimensional data array
err=None - error array with same size as data array
params=[] - initial input parameters for Gaussian function.
(height, amplitude, x, y, width_x, width_y, rota)
if not input, these will be determined from the moments of the system,
assuming no rotation
autoderiv=1 - use the autoderiv provided in the lmder.f function (the
alternative is to us an analytic derivative with lmdif.f: this method
is less robust)
return_all=0 - Default is to return only the Gaussian parameters.
1 - fit params, fit error
returnfitimage - returns (best fit params,best fit image)
returnmp - returns the full mpfit struct
circle=0 - default is an elliptical gaussian (different x, y widths),
but can reduce the input by one parameter if it's a circular gaussian
rotate=1 - default allows rotation of the gaussian ellipse. Can remove
last parameter by setting rotate=0. numpy.expects angle in DEGREES
vheight=1 - default allows a variable height-above-zero, i.e. an
additive constant for the Gaussian function. Can remove first
parameter by setting this to 0
usemoment - can choose which parameters to use a moment estimation for.
Other parameters will be taken from params. Needs to be a boolean
array.
Output:
Default output is a set of Gaussian parameters with the same shape as
the input parameters
Can also output the covariance matrix, 'infodict' that contains a lot
more detail about the fit (see scipy.optimize.leastsq), and a message
from leastsq telling what the exit status of the fitting routine was
Warning: Does NOT necessarily output a rotation angle between 0 and 360 degrees.
"""
usemoment=numpy.array(usemoment,dtype='bool')
params=numpy.array(params,dtype='float')
if usemoment.any() and len(params)==len(usemoment):
moment = numpy.array(moments(data,circle,rotate,vheight,**kwargs),dtype='float')
params[usemoment] = moment[usemoment]
elif params == [] or len(params)==0:
params = (moments(data,circle,rotate,vheight,**kwargs))
if vheight==0:
vheight=1
params = numpy.concatenate([[0],params])
fixed[0] = 1
# mpfit will fail if it is given a start parameter outside the allowed range:
for i in xrange(len(params)):
if params[i] > maxpars[i] and limitedmax[i]: params[i] = maxpars[i]
if params[i] < minpars[i] and limitedmin[i]: params[i] = minpars[i]
if err is None:
errorfunction = lambda p: numpy.ravel((twodgaussian(p,circle,rotate,vheight)\
(*numpy.indices(data.shape)) - data))
else:
errorfunction = lambda p: numpy.ravel((twodgaussian(p,circle,rotate,vheight)\
(*numpy.indices(data.shape)) - data)/err)
def mpfitfun(data,err):
if err is None:
def f(p,fjac=None): return [0,numpy.ravel(data-twodgaussian(p,circle,rotate,vheight)\
(*numpy.indices(data.shape)))]
else:
def f(p,fjac=None): return [0,numpy.ravel((data-twodgaussian(p,circle,rotate,vheight)\
(*numpy.indices(data.shape)))/err)]
return f
parinfo = [
{'n':1,'value':params[1],'limits':[minpars[1],maxpars[1]],'limited':[limitedmin[1],limitedmax[1]],'fixed':fixed[1],'parname':"AMPLITUDE",'error':0},
{'n':2,'value':params[2],'limits':[minpars[2],maxpars[2]],'limited':[limitedmin[2],limitedmax[2]],'fixed':fixed[2],'parname':"XSHIFT",'error':0},
{'n':3,'value':params[3],'limits':[minpars[3],maxpars[3]],'limited':[limitedmin[3],limitedmax[3]],'fixed':fixed[3],'parname':"YSHIFT",'error':0},
{'n':4,'value':params[4],'limits':[minpars[4],maxpars[4]],'limited':[limitedmin[4],limitedmax[4]],'fixed':fixed[4],'parname':"XWIDTH",'error':0} ]
if vheight == 1:
parinfo.insert(0,{'n':0,'value':params[0],'limits':[minpars[0],maxpars[0]],'limited':[limitedmin[0],limitedmax[0]],'fixed':fixed[0],'parname':"HEIGHT",'error':0})
if circle == 0:
parinfo.append({'n':5,'value':params[5],'limits':[minpars[5],maxpars[5]],'limited':[limitedmin[5],limitedmax[5]],'fixed':fixed[5],'parname':"YWIDTH",'error':0})
if rotate == 1:
parinfo.append({'n':6,'value':params[6],'limits':[minpars[6],maxpars[6]],'limited':[limitedmin[6],limitedmax[6]],'fixed':fixed[6],'parname':"ROTATION",'error':0})
if autoderiv == 0:
# the analytic derivative, while not terribly difficult, is less
# efficient and useful. I only bothered putting it here because I was
# instructed to do so for a class project - please ask if you would
# like this feature implemented
raise ValueError("I'm sorry, I haven't implemented this feature yet.")
else:
# p, cov, infodict, errmsg, success = optimize.leastsq(errorfunction,\
# params, full_output=1)
mp = mpfit(mpfitfun(data,err),parinfo=parinfo,quiet=quiet)
if returnmp:
returns = (mp)
elif return_all == 0:
returns = mp.params
elif return_all == 1:
returns = mp.params,mp.perror
if returnfitimage:
fitimage = twodgaussian(mp.params,circle,rotate,vheight)(*numpy.indices(data.shape))
returns = (returns,fitimage)
return returns
def onedmoments(Xax,data,vheight=True,estimator=median,negamp=None,
veryverbose=False, **kwargs):
"""Returns (height, amplitude, x, width_x)
the gaussian parameters of a 1D distribution by calculating its
moments. Depending on the input parameters, will only output
a subset of the above.
If using masked arrays, pass estimator=numpy.ma.median
'estimator' is used to measure the background level (height)
negamp can be used to force the peak negative (True), positive (False),
or it will be "autodetected" (negamp=None)
"""
dx = numpy.mean(Xax[1:] - Xax[:-1]) # assume a regular grid
integral = (data*dx).sum()
height = estimator(data)
# try to figure out whether pos or neg based on the minimum width of the pos/neg peaks
Lpeakintegral = integral - height*len(Xax)*dx - (data[data>height]*dx).sum()
Lamplitude = data.min()-height
Lwidth_x = 0.5*(numpy.abs(Lpeakintegral / Lamplitude))
Hpeakintegral = integral - height*len(Xax)*dx - (data[data<height]*dx).sum()
Hamplitude = data.max()-height
Hwidth_x = 0.5*(numpy.abs(Hpeakintegral / Hamplitude))
Lstddev = Xax[data<data.mean()].std()
Hstddev = Xax[data>data.mean()].std()
#print "Lstddev: %10.3g Hstddev: %10.3g" % (Lstddev,Hstddev)
#print "Lwidth_x: %10.3g Hwidth_x: %10.3g" % (Lwidth_x,Hwidth_x)
if negamp: # can force the guess to be negative
xcen,amplitude,width_x = Xax[numpy.argmin(data)],Lamplitude,Lwidth_x
elif negamp is None:
if Hstddev < Lstddev:
xcen,amplitude,width_x, = Xax[numpy.argmax(data)],Hamplitude,Hwidth_x
else:
xcen,amplitude,width_x, = Xax[numpy.argmin(data)],Lamplitude,Lwidth_x
else: # if negamp==False, make positive
xcen,amplitude,width_x = Xax[numpy.argmax(data)],Hamplitude,Hwidth_x
if veryverbose:
print "negamp: %s amp,width,cen Lower: %g, %g Upper: %g, %g Center: %g" %\
(negamp,Lamplitude,Lwidth_x,Hamplitude,Hwidth_x,xcen)
mylist = [amplitude,xcen,width_x]
if numpy.isnan(width_x) or numpy.isnan(height) or numpy.isnan(amplitude):
raise ValueError("something is nan")
if vheight:
mylist = [height] + mylist
return mylist
def onedgaussian(x,H,A,dx,w):
"""
Returns a 1-dimensional gaussian of form
H+A*numpy.exp(-(x-dx)**2/(2*w**2))
"""
return H+A*numpy.exp(-(x-dx)**2/(2*w**2))
def onedgaussfit(xax, data, err=None,
params=[0,1,0,1],fixed=[False,False,False,False],
limitedmin=[False,False,False,True],
limitedmax=[False,False,False,False], minpars=[0,0,0,0],
maxpars=[0,0,0,0], quiet=True, shh=True,
veryverbose=False,
vheight=True, negamp=False,
usemoments=False):
"""
Inputs:
xax - x axis
data - y axis
err - error corresponding to data
params - Fit parameters: Height of background, Amplitude, Shift, Width
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0)
limitedmax/maxpars - set upper limits on each parameter
quiet - should MPFIT output each iteration?
shh - output final parameters?
usemoments - replace default parameters with moments
Returns:
Fit parameters
Model
Fit errors
chi2
"""
def mpfitfun(x,y,err):
if err is None:
def f(p,fjac=None): return [0,(y-onedgaussian(x,*p))]
else:
def f(p,fjac=None): return [0,(y-onedgaussian(x,*p))/err]
return f
if xax == None:
xax = numpy.arange(len(data))
if vheight is False:
height = params[0]
fixed[0] = True
if usemoments:
params = onedmoments(xax,data,vheight=vheight,negamp=negamp, veryverbose=veryverbose)
if vheight is False: params = [height]+params
if veryverbose: print "OneD moments: h: %g a: %g c: %g w: %g" % tuple(params)
parinfo = [ {'n':0,'value':params[0],'limits':[minpars[0],maxpars[0]],'limited':[limitedmin[0],limitedmax[0]],'fixed':fixed[0],'parname':"HEIGHT",'error':0} ,
{'n':1,'value':params[1],'limits':[minpars[1],maxpars[1]],'limited':[limitedmin[1],limitedmax[1]],'fixed':fixed[1],'parname':"AMPLITUDE",'error':0},
{'n':2,'value':params[2],'limits':[minpars[2],maxpars[2]],'limited':[limitedmin[2],limitedmax[2]],'fixed':fixed[2],'parname':"SHIFT",'error':0},
{'n':3,'value':params[3],'limits':[minpars[3],maxpars[3]],'limited':[limitedmin[3],limitedmax[3]],'fixed':fixed[3],'parname':"WIDTH",'error':0}]
mp = mpfit(mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet)
mpp = mp.params
mpperr = mp.perror
chi2 = mp.fnorm
if mp.status == 0:
raise Exception(mp.errmsg)
if (not shh) or veryverbose:
print "Fit status: ",mp.status
for i,p in enumerate(mpp):
parinfo[i]['value'] = p
print parinfo[i]['parname'],p," +/- ",mpperr[i]
print "Chi2: ",mp.fnorm," Reduced Chi2: ",mp.fnorm/len(data)," DOF:",len(data)-len(mpp)
return mpp,onedgaussian(xax,*mpp),mpperr,chi2
def n_gaussian(pars=None,a=None,dx=None,sigma=None):
"""
Returns a function that sums over N gaussians, where N is the length of
a,dx,sigma *OR* N = len(pars) / 3
The background "height" is assumed to be zero (you must "baseline" your
spectrum before fitting)
pars - a list with len(pars) = 3n, assuming a,dx,sigma repeated
dx - offset (velocity center) values
sigma - line widths
a - amplitudes
"""
if len(pars) % 3 == 0:
a = [pars[ii] for ii in xrange(0,len(pars),3)]
dx = [pars[ii] for ii in xrange(1,len(pars),3)]
sigma = [pars[ii] for ii in xrange(2,len(pars),3)]
elif not(len(dx) == len(sigma) == len(a)):
raise ValueError("Wrong array lengths! dx: %i sigma: %i a: %i" % (len(dx),len(sigma),len(a)))
def g(x):
v = numpy.zeros(len(x))
for i in range(len(dx)):
v += a[i] * numpy.exp( - ( x - dx[i] )**2 / (2.0*sigma[i]**2) )
return v
return g
def multigaussfit(xax, data, ngauss=1, err=None, params=[1,0,1],
fixed=[False,False,False], limitedmin=[False,False,True],
limitedmax=[False,False,False], minpars=[0,0,0], maxpars=[0,0,0],
quiet=True, shh=True, veryverbose=False):
"""
An improvement on onedgaussfit. Lets you fit multiple gaussians.
Inputs:
xax - x axis
data - y axis
ngauss - How many gaussians to fit? Default 1 (this could supersede onedgaussfit)
err - error corresponding to data
These parameters need to have length = 3*ngauss. If ngauss > 1 and length = 3, they will
be replicated ngauss times, otherwise they will be reset to defaults:
params - Fit parameters: [amplitude, offset, width] * ngauss
If len(params) % 3 == 0, ngauss will be set to len(params) / 3
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0)
limitedmax/maxpars - set upper limits on each parameter
quiet - should MPFIT output each iteration?
shh - output final parameters?
Returns:
Fit parameters
Model
Fit errors
chi2
"""
if len(params) != ngauss and (len(params) / 3) > ngauss:
ngauss = len(params) / 3
if isinstance(params,numpy.ndarray): params=params.tolist()
# make sure all various things are the right length; if they're not, fix them using the defaults
for parlist in (params,fixed,limitedmin,limitedmax,minpars,maxpars):
if len(parlist) != 3*ngauss:
# if you leave the defaults, or enter something that can be multiplied by 3 to get to the
# right number of gaussians, it will just replicate
if len(parlist) == 3:
parlist *= ngauss
elif parlist==params:
parlist[:] = [1,0,1] * ngauss
elif parlist==fixed or parlist==limitedmax:
parlist[:] = [False,False,False] * ngauss
elif parlist==limitedmin:
parlist[:] = [False,False,True] * ngauss
elif parlist==minpars or parlist==maxpars:
parlist[:] = [0,0,0] * ngauss
def mpfitfun(x,y,err):
if err is None:
def f(p,fjac=None): return [0,(y-n_gaussian(pars=p)(x))]
else:
def f(p,fjac=None): return [0,(y-n_gaussian(pars=p)(x))/err]
return f
if xax == None:
xax = numpy.arange(len(data))
parnames = {0:"AMPLITUDE",1:"SHIFT",2:"WIDTH"}
parinfo = [ {'n':ii, 'value':params[ii],
'limits':[minpars[ii],maxpars[ii]],
'limited':[limitedmin[ii],limitedmax[ii]], 'fixed':fixed[ii],
'parname':parnames[ii%3]+str(ii%3), 'error':ii}
for ii in xrange(len(params)) ]
if veryverbose:
print "GUESSES: "
print "\n".join(["%s: %s" % (p['parname'],p['value']) for p in parinfo])
mp = mpfit(mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet)
mpp = mp.params
mpperr = mp.perror
chi2 = mp.fnorm
if mp.status == 0:
raise Exception(mp.errmsg)
if not shh:
print "Final fit values: "
for i,p in enumerate(mpp):
parinfo[i]['value'] = p
print parinfo[i]['parname'],p," +/- ",mpperr[i]
print "Chi2: ",mp.fnorm," Reduced Chi2: ",mp.fnorm/len(data)," DOF:",len(data)-len(mpp)
return mpp,n_gaussian(pars=mpp)(xax),mpperr,chi2
def collapse_gaussfit(cube,xax=None,axis=2,negamp=False,usemoments=True,nsigcut=1.0,mppsigcut=1.0,
return_errors=False, **kwargs):
import time
std_coll = cube.std(axis=axis)
std_coll[std_coll==0] = numpy.nan # must eliminate all-zero spectra
mean_std = median(std_coll[std_coll==std_coll])
if axis > 0:
cube = cube.swapaxes(0,axis)
width_arr = numpy.zeros(cube.shape[1:]) + numpy.nan
amp_arr = numpy.zeros(cube.shape[1:]) + numpy.nan
chi2_arr = numpy.zeros(cube.shape[1:]) + numpy.nan
offset_arr = numpy.zeros(cube.shape[1:]) + numpy.nan
width_err = numpy.zeros(cube.shape[1:]) + numpy.nan
amp_err = numpy.zeros(cube.shape[1:]) + numpy.nan
offset_err = numpy.zeros(cube.shape[1:]) + numpy.nan
if xax is None:
xax = numpy.arange(cube.shape[0])
starttime = time.time()
print "Cube shape: ",cube.shape
if negamp: extremum=numpy.min
else: extremum=numpy.max
print "Fitting a total of %i spectra with peak signal above %f" % ((numpy.abs(extremum(cube,axis=0)) > (mean_std*nsigcut)).sum(),mean_std*nsigcut)
for i in xrange(cube.shape[1]):
t0 = time.time()
nspec = (numpy.abs(extremum(cube[:,i,:],axis=0)) > (mean_std*nsigcut)).sum()
print "Working on row %d with %d spectra to fit" % (i,nspec) ,
for j in xrange(cube.shape[2]):
if numpy.abs(extremum(cube[:,i,j])) > (mean_std*nsigcut):
mpp,gfit,mpperr,chi2 = onedgaussfit(xax,cube[:,i,j],err=numpy.ones(cube.shape[0])*mean_std,negamp=negamp,usemoments=usemoments,**kwargs)
if numpy.abs(mpp[1]) > (mpperr[1]*mppsigcut):
width_arr[i,j] = mpp[3]
offset_arr[i,j] = mpp[2]
chi2_arr[i,j] = chi2
amp_arr[i,j] = mpp[1]
width_err[i,j] = mpperr[3]
offset_err[i,j] = mpperr[2]
amp_err[i,j] = mpperr[1]
dt = time.time()-t0
if nspec > 0:
print "in %f seconds (average: %f)" % (dt,dt/float(nspec))
else:
print "in %f seconds" % (dt)
print "Total time %f seconds" % (time.time()-starttime)
if return_errors:
return width_arr,offset_arr,amp_arr,width_err,offset_err,amp_err,chi2_arr
else:
return width_arr,offset_arr,amp_arr,chi2_arr
| lgpl-3.0 | -7,847,861,698,600,784,000 | 41.96745 | 174 | 0.608434 | false |
Mazecreator/tensorflow | tensorflow/contrib/timeseries/examples/predict.py | 69 | 5579 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An example of training and predicting with a TFTS estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
FLAGS = None
def structural_ensemble_train_and_predict(csv_file_name):
# Cycle between 5 latent values over a period of 100. This leads to a very
# smooth periodic component (and a small model), which is a good fit for our
# example data. Modeling high-frequency periodic variations will require a
# higher cycle_num_latent_values.
structural = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=100, num_features=1, cycle_num_latent_values=5)
return train_and_predict(structural, csv_file_name, training_steps=150)
def ar_train_and_predict(csv_file_name):
# An autoregressive model, with periodicity handled as a time-based
# regression. Note that this requires windows of size 16 (input_window_size +
# output_window_size) for training.
ar = tf.contrib.timeseries.ARRegressor(
periodicities=100, input_window_size=10, output_window_size=6,
num_features=1,
# Use the (default) normal likelihood loss to adaptively fit the
# variance. SQUARED_LOSS overestimates variance when there are trends in
# the series.
loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS)
return train_and_predict(ar, csv_file_name, training_steps=600)
def train_and_predict(estimator, csv_file_name, training_steps):
"""A simple example of training and predicting."""
# Read data in the default "time,value" CSV format with no header
reader = tf.contrib.timeseries.CSVReader(csv_file_name)
# Set up windowing and batching for training
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=16, window_size=16)
# Fit model parameters to data
estimator.train(input_fn=train_input_fn, steps=training_steps)
# Evaluate on the full dataset sequentially, collecting in-sample predictions
# for a qualitative evaluation. Note that this loads the whole dataset into
# memory. For quantitative evaluation, use RandomWindowChunker.
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=200)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
return times, observed, all_times, mean, upper_limit, lower_limit
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit):
"""Plot a time series in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.plot(all_times, upper_limit, "g", label="forecast upper bound")
pyplot.plot(all_times, lower_limit, "g", label="forecast lower bound")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
make_plot("Structural ensemble",
*structural_ensemble_train_and_predict(FLAGS.input_filename))
make_plot("AR", *ar_train_and_predict(FLAGS.input_filename))
pyplot.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_filename",
type=str,
required=True,
help="Input csv file.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 | 4,242,549,670,582,883,300 | 41.265152 | 80 | 0.715003 | false |
krsjoseph/youtube-dl | youtube_dl/extractor/tinypic.py | 126 | 1893 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import ExtractorError
class TinyPicIE(InfoExtractor):
IE_NAME = 'tinypic'
IE_DESC = 'tinypic.com videos'
_VALID_URL = r'http://(?:.+?\.)?tinypic\.com/player\.php\?v=(?P<id>[^&]+)&s=\d+'
_TESTS = [
{
'url': 'http://tinypic.com/player.php?v=6xw7tc%3E&s=5#.UtqZmbRFCM8',
'md5': '609b74432465364e72727ebc6203f044',
'info_dict': {
'id': '6xw7tc',
'ext': 'flv',
'title': 'shadow phenomenon weird',
},
},
{
'url': 'http://de.tinypic.com/player.php?v=dy90yh&s=8',
'only_matching': True,
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id, 'Downloading page')
mobj = re.search(r'(?m)fo\.addVariable\("file",\s"(?P<fileid>[\da-z]+)"\);\n'
'\s+fo\.addVariable\("s",\s"(?P<serverid>\d+)"\);', webpage)
if mobj is None:
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
file_id = mobj.group('fileid')
server_id = mobj.group('serverid')
KEYWORDS_SUFFIX = ', Video, images, photos, videos, myspace, ebay, video hosting, photo hosting'
keywords = self._html_search_meta('keywords', webpage, 'title')
title = keywords[:-len(KEYWORDS_SUFFIX)] if keywords.endswith(KEYWORDS_SUFFIX) else ''
video_url = 'http://v%s.tinypic.com/%s.flv' % (server_id, file_id)
thumbnail = 'http://v%s.tinypic.com/%s_th.jpg' % (server_id, file_id)
return {
'id': file_id,
'url': video_url,
'thumbnail': thumbnail,
'title': title
}
| unlicense | -4,758,051,863,561,678,000 | 32.803571 | 104 | 0.536714 | false |
qilicun/python | python2/diveintopythonzh-cn-5.4b/soundex/stage1/soundex1d.py | 4 | 2390 | """Soundex algorithm
This program is part of "Dive Into Python", a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
"""
__author__ = "Mark Pilgrim ([email protected])"
__version__ = "$Revision: 1.2 $"
__date__ = "$Date: 2004/05/06 21:36:36 $"
__copyright__ = "Copyright (c) 2004 Mark Pilgrim"
__license__ = "Python"
import string, re
charToSoundex = {"A": "9",
"B": "1",
"C": "2",
"D": "3",
"E": "9",
"F": "1",
"G": "2",
"H": "9",
"I": "9",
"J": "2",
"K": "2",
"L": "4",
"M": "5",
"N": "5",
"O": "9",
"P": "1",
"Q": "2",
"R": "6",
"S": "2",
"T": "3",
"U": "9",
"V": "1",
"W": "9",
"X": "2",
"Y": "9",
"Z": "2"}
def soundex(source):
"convert string to Soundex equivalent"
# Soundex requirements:
# source string must be at least 1 character
# and must consist entirely of letters
if not source:
return "0000"
for c in source:
if not ('A' <= c <= 'Z') and not ('a' <= c <= 'z'):
return "0000"
# Soundex algorithm:
# 1. make first character uppercase
source = source[0].upper() + source[1:]
# 2. translate all other characters to Soundex digits
digits = source[0]
for s in source[1:]:
s = s.upper()
digits += charToSoundex[s]
# 3. remove consecutive duplicates
digits2 = digits[0]
for d in digits[1:]:
if digits2[-1] != d:
digits2 += d
# 4. remove all "9"s
digits3 = re.sub('9', '', digits2)
# 5. pad end with "0"s to 4 characters
while len(digits3) < 4:
digits3 += "0"
# 6. return first 4 characters
return digits3[:4]
if __name__ == '__main__':
from timeit import Timer
names = ('Woo', 'Pilgrim', 'Flingjingwaller')
for name in names:
statement = "soundex('%s')" % name
t = Timer(statement, "from __main__ import soundex")
print name.ljust(15), soundex(name), min(t.repeat())
| gpl-3.0 | -3,036,519,781,077,432,300 | 26.471264 | 66 | 0.442259 | false |
kizyle502/collegiatemasters | collegiatemasters/players/models.py | 1 | 1433 | from django.db import models
from django.core.urlresolvers import reverse
from autoslug import AutoSlugField
from model_utils.models import TimeStampedModel
from django_countries.fields import CountryField
class Player(TimeStampedModel):
GROUP_UNSPECIFIED = "unspecified"
GROUP_FIRST = "first"
GROUP_SECOND = "second"
GROUP_THIRD = "third"
GROUP_FOURTH = "fourth"
GROUP_CHOICES = (
(GROUP_UNSPECIFIED, "Unspecified"),
(GROUP_FIRST, "First"),
(GROUP_SECOND, "Second"),
(GROUP_THIRD, "Third"),
(GROUP_FOURTH, "Fourth"),
)
name = models.CharField("Player Name", max_length=255, unique=True)
slug = AutoSlugField("Player Address", unique=True, always_update=False, populate_from='name')
round1 = models.IntegerField("Round 1", null=True)
round2 = models.IntegerField("Round 2", null=True)
round3 = models.IntegerField("Round 3", null=True)
round4 = models.IntegerField("Round 4", null=True)
group = models.CharField("Group (Based on Golfweek world rank)",
choices=GROUP_CHOICES,
default=GROUP_UNSPECIFIED,
max_length=255)
home_country = CountryField("Home Country", null=True, blank=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('players:detail', kwargs={'name': self.name})
| bsd-3-clause | 79,830,393,452,765,250 | 30.844444 | 98 | 0.641312 | false |
Kupoman/thor | src/appdirs.py | 1 | 22475 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor
"""Utilities for determining application-specific dirs.
See <http://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version_info__ = (1, 4, 1)
__version__ = '.'.join(map(str, __version_info__))
import sys
import os
PY3 = sys.version_info[0] == 3
if PY3:
unicode = str
if sys.platform.startswith('java'):
import platform
os_name = platform.java_ver()[3][0]
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
system = 'win32'
elif os_name.startswith('Mac'): # "Mac OS X", etc.
system = 'darwin'
else: # "Linux", "SunOS", "FreeBSD", etc.
# Setting this to "linux2" is not ideal, but only Windows or Mac
# are actually checked for and the rest of the module expects
# *sys.platform* style strings.
system = 'linux2'
else:
system = sys.platform
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if system == "win32":
if appauthor is None:
appauthor = appname
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('~/Library/Application Support/')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical user data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('/Library/Application Support')
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv('XDG_DATA_DIRS',
os.pathsep.join(['/usr/local/share', '/usr/share']))
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by deafult "~/.config/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of config dirs should be
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical user data directories are:
Mac OS X: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
Win *: same as site_data_dir
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system in ["win32", "darwin"]:
path = site_data_dir(appname, appauthor)
if appname and version:
path = os.path.join(path, version)
else:
# XDG default for $XDG_CONFIG_DIRS
# only first, if multipath is False
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
if opinion:
path = os.path.join(path, "Cache")
elif system == 'darwin':
path = os.path.expanduser('~/Library/Caches')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if system == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Logs'),
appname)
elif system == "win32":
path = user_data_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "log")
if appname and version:
path = os.path.join(path, version)
return path
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
def __init__(self, appname, appauthor=None, version=None, roaming=False,
multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_config_dir(self):
return site_config_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
version=self.version)
#---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
if PY3:
import winreg as _winreg
else:
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf = array.zeros('c', buf_size)
kernel = win32.Kernel32.INSTANCE
if kernal.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
if system == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
from ctypes import windll
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
try:
import com.sun.jna
_get_win_folder = _get_win_folder_with_jna
except ImportError:
_get_win_folder = _get_win_folder_from_registry
#---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = ("user_data_dir", "site_data_dir",
"user_config_dir", "site_config_dir",
"user_cache_dir", "user_log_dir")
print("-- app dirs %s --" % __version__)
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (with disabled 'appauthor')")
dirs = AppDirs(appname, appauthor=False)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
| apache-2.0 | -727,148,747,689,605,200 | 39.348294 | 122 | 0.617158 | false |
Weuxel/cjdns | node_build/dependencies/libuv/build/gyp/test/mac/gyptest-depend-on-bundle.py | 303 | 1186 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that a dependency on a bundle causes the whole bundle to be built.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('test.gyp', chdir='depend-on-bundle')
test.build('test.gyp', 'dependent_on_bundle', chdir='depend-on-bundle')
# Binary itself.
test.built_file_must_exist('dependent_on_bundle', chdir='depend-on-bundle')
# Bundle dependency.
test.built_file_must_exist(
'my_bundle.framework/Versions/A/my_bundle',
chdir='depend-on-bundle')
test.built_file_must_exist( # package_framework
'my_bundle.framework/my_bundle',
chdir='depend-on-bundle')
test.built_file_must_exist( # plist
'my_bundle.framework/Versions/A/Resources/Info.plist',
chdir='depend-on-bundle')
test.built_file_must_exist(
'my_bundle.framework/Versions/A/Resources/English.lproj/' # Resources
'InfoPlist.strings',
chdir='depend-on-bundle')
test.pass_test()
| gpl-3.0 | 7,009,291,564,742,722,000 | 28.65 | 77 | 0.690556 | false |
jeremiahmarks/sl4a | python/src/Lib/multiprocessing/util.py | 59 | 7839 | #
# Module providing various facilities to other parts of the package
#
# multiprocessing/util.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
import itertools
import weakref
import atexit
import threading # we want threading to install it's
# cleanup function before multiprocessing does
from multiprocessing.process import current_process, active_children
__all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
'SUBDEBUG', 'SUBWARNING',
]
#
# Logging
#
NOTSET = 0
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25
LOGGER_NAME = 'multiprocessing'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
_logger = None
_log_to_stderr = False
def sub_debug(msg, *args):
if _logger:
_logger.log(SUBDEBUG, msg, *args)
def debug(msg, *args):
if _logger:
_logger.log(DEBUG, msg, *args)
def info(msg, *args):
if _logger:
_logger.log(INFO, msg, *args)
def sub_warning(msg, *args):
if _logger:
_logger.log(SUBWARNING, msg, *args)
def get_logger():
'''
Returns logger used by multiprocessing
'''
global _logger
import logging, atexit
logging._acquireLock()
try:
if not _logger:
_logger = logging.getLogger(LOGGER_NAME)
_logger.propagate = 0
logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
logging.addLevelName(SUBWARNING, 'SUBWARNING')
# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
atexit.unregister(_exit_function)
atexit.register(_exit_function)
else:
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
finally:
logging._releaseLock()
return _logger
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
global _log_to_stderr
import logging
logger = get_logger()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
if level:
logger.setLevel(level)
_log_to_stderr = True
return _logger
#
# Function returning a temp directory which will be removed on exit
#
def get_temp_dir():
# get name of a temp directory which will be automatically cleaned up
if current_process()._tempdir is None:
import shutil, tempfile
tempdir = tempfile.mkdtemp(prefix='pymp-')
info('created temp directory %s', tempdir)
Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
current_process()._tempdir = tempdir
return current_process()._tempdir
#
# Support for reinitialization of objects when bootstrapping a child process
#
_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_counter = itertools.count()
def _run_after_forkers():
items = list(_afterfork_registry.items())
items.sort()
for (index, ident, func), obj in items:
try:
func(obj)
except Exception, e:
info('after forker raised exception %s', e)
def register_after_fork(obj, func):
_afterfork_registry[(_afterfork_counter.next(), id(obj), func)] = obj
#
# Finalization using weakrefs
#
_finalizer_registry = {}
_finalizer_counter = itertools.count()
class Finalize(object):
'''
Class which supports object finalization using weakrefs
'''
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
assert exitpriority is None or type(exitpriority) is int
if obj is not None:
self._weakref = weakref.ref(obj, self)
else:
assert exitpriority is not None
self._callback = callback
self._args = args
self._kwargs = kwargs or {}
self._key = (exitpriority, _finalizer_counter.next())
_finalizer_registry[self._key] = self
def __call__(self, wr=None):
'''
Run the callback unless it has already been called or cancelled
'''
try:
del _finalizer_registry[self._key]
except KeyError:
sub_debug('finalizer no longer registered')
else:
sub_debug('finalizer calling %s with args %s and kwargs %s',
self._callback, self._args, self._kwargs)
res = self._callback(*self._args, **self._kwargs)
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
return res
def cancel(self):
'''
Cancel finalization of the object
'''
try:
del _finalizer_registry[self._key]
except KeyError:
pass
else:
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
def still_active(self):
'''
Return whether this finalizer is still waiting to invoke callback
'''
return self._key in _finalizer_registry
def __repr__(self):
try:
obj = self._weakref()
except (AttributeError, TypeError):
obj = None
if obj is None:
return '<Finalize object, dead>'
x = '<Finalize object, callback=%s' % \
getattr(self._callback, '__name__', self._callback)
if self._args:
x += ', args=' + str(self._args)
if self._kwargs:
x += ', kwargs=' + str(self._kwargs)
if self._key[0] is not None:
x += ', exitprority=' + str(self._key[0])
return x + '>'
def _run_finalizers(minpriority=None):
'''
Run all finalizers whose exit priority is not None and at least minpriority
Finalizers with highest priority are called first; finalizers with
the same priority will be called in reverse order of creation.
'''
if minpriority is None:
f = lambda p : p[0][0] is not None
else:
f = lambda p : p[0][0] is not None and p[0][0] >= minpriority
items = [x for x in _finalizer_registry.items() if f(x)]
items.sort(reverse=True)
for key, finalizer in items:
sub_debug('calling %s', finalizer)
try:
finalizer()
except Exception:
import traceback
traceback.print_exc()
if minpriority is None:
_finalizer_registry.clear()
#
# Clean up on exit
#
def is_exiting():
'''
Returns true if the process is shutting down
'''
return _exiting or _exiting is None
_exiting = False
def _exit_function():
global _exiting
info('process shutting down')
debug('running all "atexit" finalizers with priority >= 0')
_run_finalizers(0)
for p in active_children():
if p._daemonic:
info('calling terminate() for daemon %s', p.name)
p._popen.terminate()
for p in active_children():
info('calling join() for process %s', p.name)
p.join()
debug('running the remaining "atexit" finalizers')
_run_finalizers()
atexit.register(_exit_function)
#
# Some fork aware types
#
class ForkAwareThreadLock(object):
def __init__(self):
self._lock = threading.Lock()
self.acquire = self._lock.acquire
self.release = self._lock.release
register_after_fork(self, ForkAwareThreadLock.__init__)
class ForkAwareLocal(threading.local):
def __init__(self):
register_after_fork(self, lambda obj : obj.__dict__.clear())
def __reduce__(self):
return type(self), ()
| apache-2.0 | -8,884,693,694,074,977,000 | 25.938144 | 79 | 0.600587 | false |
uclaros/QGIS | tests/src/python/test_qgsmapcanvas.py | 25 | 23167 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsMapCanvas
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '24/1/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsMapSettings,
QgsCoordinateReferenceSystem,
QgsRectangle,
QgsVectorLayer,
QgsFeature,
QgsGeometry,
QgsMultiRenderChecker,
QgsFillSymbol,
QgsSingleSymbolRenderer,
QgsMapThemeCollection,
QgsProject, QgsAnnotationPolygonItem,
QgsPolygon,
QgsLineString,
QgsPoint,
QgsPointXY,
QgsApplication)
from qgis.gui import (QgsMapCanvas)
from qgis.PyQt.QtCore import (Qt,
QDir)
from qgis.PyQt.QtXml import (QDomDocument, QDomElement)
import time
from qgis.testing import start_app, unittest
app = start_app()
class TestQgsMapCanvas(unittest.TestCase):
def setUp(self):
self.report = "<h1>Python QgsMapCanvas Tests</h1>\n"
def tearDown(self):
report_file_path = "%s/qgistest.html" % QDir.tempPath()
with open(report_file_path, 'a') as report_file:
report_file.write(self.report)
def testGettersSetters(self):
canvas = QgsMapCanvas()
# should be disabled by default
self.assertFalse(canvas.previewJobsEnabled())
canvas.setPreviewJobsEnabled(True)
self.assertTrue(canvas.previewJobsEnabled())
def testDeferredUpdate(self):
""" test that map canvas doesn't auto refresh on deferred layer update """
canvas = QgsMapCanvas()
canvas.setDestinationCrs(QgsCoordinateReferenceSystem(4326))
canvas.setFrameStyle(0)
canvas.resize(600, 400)
self.assertEqual(canvas.width(), 600)
self.assertEqual(canvas.height(), 400)
layer = QgsVectorLayer("Polygon?crs=epsg:4326&field=fldtxt:string",
"layer", "memory")
canvas.setLayers([layer])
canvas.setExtent(QgsRectangle(10, 30, 20, 35))
canvas.show()
# need to wait until first redraw can occur (note that we first need to wait till drawing starts!)
while not canvas.isDrawing():
app.processEvents()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('empty_canvas', 'empty_canvas', canvas))
# add polygon to layer
f = QgsFeature()
f.setGeometry(QgsGeometry.fromRect(QgsRectangle(5, 25, 25, 45)))
self.assertTrue(layer.dataProvider().addFeatures([f]))
# deferred update - so expect that canvas will not been refreshed
layer.triggerRepaint(True)
timeout = time.time() + 0.1
while time.time() < timeout:
# messy, but only way to check that canvas redraw doesn't occur
self.assertFalse(canvas.isDrawing())
# canvas should still be empty
self.assertTrue(self.canvasImageCheck('empty_canvas', 'empty_canvas', canvas))
# refresh canvas
canvas.refresh()
canvas.waitWhileRendering()
# now we expect the canvas check to fail (since they'll be a new polygon rendered over it)
self.assertFalse(self.canvasImageCheck('empty_canvas', 'empty_canvas', canvas))
def testRefreshOnTimer(self):
""" test that map canvas refreshes with auto refreshing layers """
canvas = QgsMapCanvas()
canvas.setDestinationCrs(QgsCoordinateReferenceSystem(4326))
canvas.setFrameStyle(0)
canvas.resize(600, 400)
self.assertEqual(canvas.width(), 600)
self.assertEqual(canvas.height(), 400)
layer = QgsVectorLayer("Polygon?crs=epsg:4326&field=fldtxt:string",
"layer", "memory")
canvas.setLayers([layer])
canvas.setExtent(QgsRectangle(10, 30, 20, 35))
canvas.show()
# need to wait until first redraw can occur (note that we first need to wait till drawing starts!)
while not canvas.isDrawing():
app.processEvents()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('empty_canvas', 'empty_canvas', canvas))
# add polygon to layer
f = QgsFeature()
f.setGeometry(QgsGeometry.fromRect(QgsRectangle(5, 25, 25, 45)))
self.assertTrue(layer.dataProvider().addFeatures([f]))
# set auto refresh on layer
layer.setAutoRefreshInterval(100)
layer.setAutoRefreshEnabled(True)
timeout = time.time() + 1
# expect canvas to auto refresh...
while not canvas.isDrawing():
app.processEvents()
self.assertTrue(time.time() < timeout)
while canvas.isDrawing():
app.processEvents()
self.assertTrue(time.time() < timeout)
# add a polygon to layer
f = QgsFeature()
f.setGeometry(QgsGeometry.fromRect(QgsRectangle(5, 25, 25, 45)))
self.assertTrue(layer.dataProvider().addFeatures([f]))
# wait for canvas auto refresh
while not canvas.isDrawing():
app.processEvents()
self.assertTrue(time.time() < timeout)
while canvas.isDrawing():
app.processEvents()
self.assertTrue(time.time() < timeout)
# now canvas should look different...
self.assertFalse(self.canvasImageCheck('empty_canvas', 'empty_canvas', canvas))
# switch off auto refresh
layer.setAutoRefreshEnabled(False)
timeout = time.time() + 0.5
while time.time() < timeout:
# messy, but only way to check that canvas redraw doesn't occur
self.assertFalse(canvas.isDrawing())
def testCancelAndDestroy(self):
""" test that nothing goes wrong if we destroy a canvas while a job is canceling """
canvas = QgsMapCanvas()
canvas.setDestinationCrs(QgsCoordinateReferenceSystem(4326))
canvas.setFrameStyle(0)
canvas.resize(600, 400)
layer = QgsVectorLayer("Polygon?crs=epsg:4326&field=fldtxt:string",
"layer", "memory")
# add a ton of features
for i in range(5000):
f = QgsFeature()
f.setGeometry(QgsGeometry.fromRect(QgsRectangle(5, 25, 25, 45)))
self.assertTrue(layer.dataProvider().addFeatures([f]))
canvas.setLayers([layer])
canvas.setExtent(QgsRectangle(10, 30, 20, 35))
canvas.show()
# need to wait until first redraw can occur (note that we first need to wait till drawing starts!)
while not canvas.isDrawing():
app.processEvents()
self.assertTrue(canvas.isDrawing())
canvas.stopRendering()
del canvas
def testMapTheme(self):
canvas = QgsMapCanvas()
canvas.setDestinationCrs(QgsCoordinateReferenceSystem(4326))
canvas.setFrameStyle(0)
canvas.resize(600, 400)
self.assertEqual(canvas.width(), 600)
self.assertEqual(canvas.height(), 400)
layer = QgsVectorLayer("Polygon?crs=epsg:4326&field=fldtxt:string",
"layer", "memory")
# add a polygon to layer
f = QgsFeature()
f.setGeometry(QgsGeometry.fromRect(QgsRectangle(5, 25, 25, 45)))
self.assertTrue(layer.dataProvider().addFeatures([f]))
# create a style
sym1 = QgsFillSymbol.createSimple({'color': '#ffb200'})
renderer = QgsSingleSymbolRenderer(sym1)
layer.setRenderer(renderer)
canvas.setLayers([layer])
canvas.setExtent(QgsRectangle(10, 30, 20, 35))
canvas.show()
# need to wait until first redraw can occur (note that we first need to wait till drawing starts!)
while not canvas.isDrawing():
app.processEvents()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('theme1', 'theme1', canvas))
# add some styles
layer.styleManager().addStyleFromLayer('style1')
sym2 = QgsFillSymbol.createSimple({'color': '#00b2ff'})
renderer2 = QgsSingleSymbolRenderer(sym2)
layer.setRenderer(renderer2)
layer.styleManager().addStyleFromLayer('style2')
canvas.refresh()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('theme2', 'theme2', canvas))
layer.styleManager().setCurrentStyle('style1')
canvas.refresh()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('theme1', 'theme1', canvas))
# OK, so all good with setting/rendering map styles
# try setting canvas to a particular theme
# make some themes...
theme1 = QgsMapThemeCollection.MapThemeRecord()
record1 = QgsMapThemeCollection.MapThemeLayerRecord(layer)
record1.currentStyle = 'style1'
record1.usingCurrentStyle = True
theme1.setLayerRecords([record1])
theme2 = QgsMapThemeCollection.MapThemeRecord()
record2 = QgsMapThemeCollection.MapThemeLayerRecord(layer)
record2.currentStyle = 'style2'
record2.usingCurrentStyle = True
theme2.setLayerRecords([record2])
QgsProject.instance().mapThemeCollection().insert('theme1', theme1)
QgsProject.instance().mapThemeCollection().insert('theme2', theme2)
canvas.setTheme('theme2')
canvas.refresh()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('theme2', 'theme2', canvas))
canvas.setTheme('theme1')
canvas.refresh()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('theme1', 'theme1', canvas))
# add another layer
layer2 = QgsVectorLayer("Polygon?crs=epsg:4326&field=fldtxt:string",
"layer2", "memory")
f = QgsFeature()
f.setGeometry(QgsGeometry.fromRect(QgsRectangle(5, 25, 25, 45)))
self.assertTrue(layer2.dataProvider().addFeatures([f]))
# create a style
sym1 = QgsFillSymbol.createSimple({'color': '#b2ff00'})
renderer = QgsSingleSymbolRenderer(sym1)
layer2.setRenderer(renderer)
# rerender canvas - should NOT show new layer
canvas.refresh()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('theme1', 'theme1', canvas))
# test again - this time refresh all layers
canvas.refreshAllLayers()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('theme1', 'theme1', canvas))
# add layer 2 to theme1
record3 = QgsMapThemeCollection.MapThemeLayerRecord(layer2)
theme1.setLayerRecords([record3])
QgsProject.instance().mapThemeCollection().update('theme1', theme1)
canvas.refresh()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('theme3', 'theme3', canvas))
# change the appearance of an active style
layer2.styleManager().addStyleFromLayer('original')
layer2.styleManager().addStyleFromLayer('style4')
record3.currentStyle = 'style4'
record3.usingCurrentStyle = True
theme1.setLayerRecords([record3])
QgsProject.instance().mapThemeCollection().update('theme1', theme1)
canvas.refresh()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('theme3', 'theme3', canvas))
layer2.styleManager().setCurrentStyle('style4')
sym3 = QgsFillSymbol.createSimple({'color': '#b200b2'})
layer2.renderer().setSymbol(sym3)
canvas.refresh()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('theme4', 'theme4', canvas))
# try setting layers while a theme is in place
canvas.setLayers([layer])
canvas.refresh()
# should be no change... setLayers should be ignored if canvas is following a theme!
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('theme4', 'theme4', canvas))
# setLayerStyleOverrides while theme is in place
canvas.setLayerStyleOverrides({layer2.id(): 'original'})
# should be no change... setLayerStyleOverrides should be ignored if canvas is following a theme!
canvas.refresh()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('theme4', 'theme4', canvas))
# clear theme
canvas.setTheme('')
canvas.refresh()
canvas.waitWhileRendering()
# should be different - we should now render project layers
self.assertFalse(self.canvasImageCheck('theme4', 'theme4', canvas))
# set canvas to theme1
canvas.setTheme('theme1')
canvas.refresh()
canvas.waitWhileRendering()
self.assertEqual(canvas.theme(), 'theme1')
themeLayers = theme1.layerRecords()
# rename the active theme
QgsProject.instance().mapThemeCollection().renameMapTheme('theme1', 'theme5')
# canvas theme should now be set to theme5
canvas.refresh()
canvas.waitWhileRendering()
self.assertEqual(canvas.theme(), 'theme5')
# theme5 should render as theme1
theme5 = QgsProject.instance().mapThemeCollection().mapThemeState('theme5')
theme5Layers = theme5.layerRecords()
self.assertEqual(themeLayers, theme5Layers, 'themes are different')
# self.assertTrue(self.canvasImageCheck('theme5', 'theme5', canvas))
def testMainAnnotationLayerRendered(self):
""" test that main annotation layer is rendered above all other layers """
canvas = QgsMapCanvas()
canvas.setDestinationCrs(QgsCoordinateReferenceSystem(4326))
canvas.setFrameStyle(0)
canvas.resize(600, 400)
self.assertEqual(canvas.width(), 600)
self.assertEqual(canvas.height(), 400)
layer = QgsVectorLayer("Polygon?crs=epsg:4326&field=fldtxt:string",
"layer", "memory")
sym3 = QgsFillSymbol.createSimple({'color': '#b200b2'})
layer.renderer().setSymbol(sym3)
canvas.setLayers([layer])
canvas.setExtent(QgsRectangle(10, 30, 20, 35))
canvas.show()
# need to wait until first redraw can occur (note that we first need to wait till drawing starts!)
while not canvas.isDrawing():
app.processEvents()
canvas.waitWhileRendering()
self.assertTrue(self.canvasImageCheck('empty_canvas', 'empty_canvas', canvas))
# add polygon to layer
f = QgsFeature()
f.setGeometry(QgsGeometry.fromRect(QgsRectangle(5, 25, 25, 45)))
self.assertTrue(layer.dataProvider().addFeatures([f]))
# refresh canvas
canvas.refresh()
canvas.waitWhileRendering()
# no annotation yet...
self.assertFalse(self.canvasImageCheck('main_annotation_layer', 'main_annotation_layer', canvas))
annotation_layer = QgsProject.instance().mainAnnotationLayer()
annotation_layer.setCrs(QgsCoordinateReferenceSystem(4326))
annotation_geom = QgsGeometry.fromRect(QgsRectangle(12, 30, 18, 33))
annotation = QgsAnnotationPolygonItem(annotation_geom.constGet().clone())
sym3 = QgsFillSymbol.createSimple({'color': '#ff0000', 'outline_style': 'no'})
annotation.setSymbol(sym3)
annotation_layer.addItem(annotation)
# refresh canvas
canvas.refresh()
canvas.waitWhileRendering()
# annotation must be rendered over other layers
self.assertTrue(self.canvasImageCheck('main_annotation_layer', 'main_annotation_layer', canvas))
annotation_layer.clear()
def canvasImageCheck(self, name, reference_image, canvas):
self.report += "<h2>Render {}</h2>\n".format(name)
temp_dir = QDir.tempPath() + '/'
file_name = temp_dir + 'mapcanvas_' + name + ".png"
print(file_name)
canvas.saveAsImage(file_name)
checker = QgsMultiRenderChecker()
checker.setControlPathPrefix("mapcanvas")
checker.setControlName("expected_" + reference_image)
checker.setRenderedImage(file_name)
checker.setColorTolerance(2)
result = checker.runTest(name, 20)
self.report += checker.report()
print((self.report))
return result
def testSaveCanvasVariablesToProject(self):
"""
Ensure that temporary canvas atlas variables are not written to project
"""
c1 = QgsMapCanvas()
c1.setObjectName('c1')
c1.expressionContextScope().setVariable('atlas_featurenumber', 1111)
c1.expressionContextScope().setVariable('atlas_pagename', 'bb')
c1.expressionContextScope().setVariable('atlas_feature', QgsFeature(1))
c1.expressionContextScope().setVariable('atlas_featureid', 22)
c1.expressionContextScope().setVariable('atlas_geometry', QgsGeometry.fromWkt('Point( 1 2 )'))
c1.expressionContextScope().setVariable('vara', 1111)
c1.expressionContextScope().setVariable('varb', 'bb')
doc = QDomDocument("testdoc")
elem = doc.createElement("qgis")
doc.appendChild(elem)
c1.writeProject(doc)
c2 = QgsMapCanvas()
c2.setObjectName('c1')
c2.readProject(doc)
self.assertCountEqual(c2.expressionContextScope().variableNames(), ['vara', 'varb'])
self.assertEqual(c2.expressionContextScope().variable('vara'), 1111)
self.assertEqual(c2.expressionContextScope().variable('varb'), 'bb')
def testSaveMultipleCanvasesToProject(self):
# test saving/restoring canvas state to project with multiple canvases
c1 = QgsMapCanvas()
c1.setObjectName('c1')
c1.setDestinationCrs(QgsCoordinateReferenceSystem('EPSG:3111'))
c1.setRotation(45)
c1.expressionContextScope().setVariable('vara', 1111)
c1.expressionContextScope().setVariable('varb', 'bb')
c2 = QgsMapCanvas()
c2.setObjectName('c2')
c2.setDestinationCrs(QgsCoordinateReferenceSystem('EPSG:4326'))
c2.setRotation(65)
c2.expressionContextScope().setVariable('vara', 2222)
c2.expressionContextScope().setVariable('varc', 'cc')
doc = QDomDocument("testdoc")
elem = doc.createElement("qgis")
doc.appendChild(elem)
c1.writeProject(doc)
c2.writeProject(doc)
c3 = QgsMapCanvas()
c3.setObjectName('c1')
c4 = QgsMapCanvas()
c4.setObjectName('c2')
c3.readProject(doc)
c4.readProject(doc)
self.assertEqual(c3.mapSettings().destinationCrs().authid(), 'EPSG:3111')
self.assertEqual(c3.rotation(), 45)
self.assertEqual(set(c3.expressionContextScope().variableNames()), {'vara', 'varb'})
self.assertEqual(c3.expressionContextScope().variable('vara'), 1111)
self.assertEqual(c3.expressionContextScope().variable('varb'), 'bb')
self.assertEqual(c4.mapSettings().destinationCrs().authid(), 'EPSG:4326')
self.assertEqual(c4.rotation(), 65)
self.assertEqual(set(c4.expressionContextScope().variableNames()), {'vara', 'varc'})
self.assertEqual(c4.expressionContextScope().variable('vara'), 2222)
self.assertEqual(c4.expressionContextScope().variable('varc'), 'cc')
def testLockedScale(self):
"""Test zoom/pan/center operations when scale lock is on"""
c = QgsMapCanvas()
dpr = c.mapSettings().devicePixelRatio()
self.assertEqual(c.size().width(), 640)
self.assertEqual(c.size().height(), 480)
c.setExtent(QgsRectangle(5, 45, 9, 47))
self.assertEqual(round(c.scale() / 100000), 13 * dpr)
c.zoomScale(2500000)
c.setScaleLocked(True)
self.assertEqual(round(c.magnificationFactor(), 1), 1)
# Test setExtent
c.setExtent(QgsRectangle(6, 45.5, 8, 46), True)
self.assertEqual(round(c.scale()), 2500000)
self.assertEqual(c.center().x(), 7.0)
self.assertEqual(c.center().y(), 45.75)
self.assertEqual(round(c.magnificationFactor()), 4 / dpr)
# Test setCenter
c.setCenter(QgsPointXY(6, 46))
self.assertEqual(c.center().x(), 6)
self.assertEqual(c.center().y(), 46)
self.assertEqual(round(c.scale()), 2500000)
# Test zoom
c.zoomByFactor(0.5, QgsPointXY(6.5, 46.5), False)
self.assertEqual(c.center().x(), 6.5)
self.assertEqual(c.center().y(), 46.5)
self.assertTrue(c.magnificationFactor() > 7 / dpr)
self.assertEqual(round(c.scale()), 2500000)
# Test zoom with center
# default zoom factor is 2, x and y are pixel coordinates, default size is 640x480
c.zoomWithCenter(300, 200, True)
self.assertEqual(round(c.center().x(), 1), 6.5)
self.assertEqual(round(c.center().y(), 1), 46.6)
self.assertEqual(round(c.scale()), 2500000)
self.assertTrue(c.magnificationFactor() > (14 / dpr) and c.magnificationFactor() < (16 / dpr))
# out ...
c.zoomWithCenter(300, 200, False)
self.assertEqual(round(c.center().x(), 1), 6.5)
self.assertEqual(round(c.center().y(), 1), 46.6)
self.assertEqual(round(c.scale()), 2500000)
self.assertTrue(c.magnificationFactor() > 7 / dpr)
# Test setExtent with different ratio
c2 = QgsMapCanvas()
c2.setExtent(QgsRectangle(5, 45, 9, 47))
c2.zoomScale(2500000)
c2.setScaleLocked(True)
c2.setExtent(QgsRectangle(3, 45, 11, 45.5), True)
self.assertEqual(round(c2.scale()), 2500000)
self.assertEqual(c2.center().x(), 7.0)
self.assertEqual(c2.center().y(), 45.25)
self.assertAlmostEqual(c2.magnificationFactor(), 1 / dpr, 0)
# Restore original
c2.setExtent(QgsRectangle(5, 45, 9, 47), True)
self.assertEqual(round(c2.scale()), 2500000)
self.assertEqual(c2.center().x(), 7.0)
self.assertEqual(c2.center().y(), 46.0)
self.assertAlmostEqual(c2.magnificationFactor(), 2 / dpr, 0)
c2.setExtent(QgsRectangle(7, 46, 11, 46.5), True)
self.assertEqual(round(c2.scale()), 2500000)
self.assertEqual(c2.center().x(), 9.0)
self.assertEqual(c2.center().y(), 46.25)
self.assertAlmostEqual(c2.magnificationFactor(), 2 / dpr, 0)
c2.setExtent(QgsRectangle(7, 46, 9, 46.5), True)
self.assertEqual(round(c2.scale()), 2500000)
self.assertEqual(c2.center().x(), 8.0)
self.assertEqual(c2.center().y(), 46.25)
self.assertAlmostEqual(c2.magnificationFactor(), 4 / dpr, 0)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -773,378,925,655,950,000 | 39.64386 | 106 | 0.633401 | false |
liqueur/tornado | tornado/test/process_test.py | 123 | 10569 | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
import logging
import os
import signal
import subprocess
import sys
from tornado.httpclient import HTTPClient, HTTPError
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.log import gen_log
from tornado.process import fork_processes, task_id, Subprocess
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado.testing import bind_unused_port, ExpectLog, AsyncTestCase, gen_test
from tornado.test.util import unittest, skipIfNonUnix
from tornado.web import RequestHandler, Application
def skip_if_twisted():
if IOLoop.configured_class().__name__.endswith(('TwistedIOLoop',
'AsyncIOMainLoop')):
raise unittest.SkipTest("Process tests not compatible with "
"TwistedIOLoop or AsyncIOMainLoop")
# Not using AsyncHTTPTestCase because we need control over the IOLoop.
@skipIfNonUnix
class ProcessTest(unittest.TestCase):
def get_app(self):
class ProcessHandler(RequestHandler):
def get(self):
if self.get_argument("exit", None):
# must use os._exit instead of sys.exit so unittest's
# exception handler doesn't catch it
os._exit(int(self.get_argument("exit")))
if self.get_argument("signal", None):
os.kill(os.getpid(),
int(self.get_argument("signal")))
self.write(str(os.getpid()))
return Application([("/", ProcessHandler)])
def tearDown(self):
if task_id() is not None:
# We're in a child process, and probably got to this point
# via an uncaught exception. If we return now, both
# processes will continue with the rest of the test suite.
# Exit now so the parent process will restart the child
# (since we don't have a clean way to signal failure to
# the parent that won't restart)
logging.error("aborting child process from tearDown")
logging.shutdown()
os._exit(1)
# In the surviving process, clear the alarm we set earlier
signal.alarm(0)
super(ProcessTest, self).tearDown()
def test_multi_process(self):
# This test can't work on twisted because we use the global reactor
# and have no way to get it back into a sane state after the fork.
skip_if_twisted()
with ExpectLog(gen_log, "(Starting .* processes|child .* exited|uncaught exception)"):
self.assertFalse(IOLoop.initialized())
sock, port = bind_unused_port()
def get_url(path):
return "http://127.0.0.1:%d%s" % (port, path)
# ensure that none of these processes live too long
signal.alarm(5) # master process
try:
id = fork_processes(3, max_restarts=3)
self.assertTrue(id is not None)
signal.alarm(5) # child processes
except SystemExit as e:
# if we exit cleanly from fork_processes, all the child processes
# finished with status 0
self.assertEqual(e.code, 0)
self.assertTrue(task_id() is None)
sock.close()
return
try:
if id in (0, 1):
self.assertEqual(id, task_id())
server = HTTPServer(self.get_app())
server.add_sockets([sock])
IOLoop.current().start()
elif id == 2:
self.assertEqual(id, task_id())
sock.close()
# Always use SimpleAsyncHTTPClient here; the curl
# version appears to get confused sometimes if the
# connection gets closed before it's had a chance to
# switch from writing mode to reading mode.
client = HTTPClient(SimpleAsyncHTTPClient)
def fetch(url, fail_ok=False):
try:
return client.fetch(get_url(url))
except HTTPError as e:
if not (fail_ok and e.code == 599):
raise
# Make two processes exit abnormally
fetch("/?exit=2", fail_ok=True)
fetch("/?exit=3", fail_ok=True)
# They've been restarted, so a new fetch will work
int(fetch("/").body)
# Now the same with signals
# Disabled because on the mac a process dying with a signal
# can trigger an "Application exited abnormally; send error
# report to Apple?" prompt.
# fetch("/?signal=%d" % signal.SIGTERM, fail_ok=True)
# fetch("/?signal=%d" % signal.SIGABRT, fail_ok=True)
# int(fetch("/").body)
# Now kill them normally so they won't be restarted
fetch("/?exit=0", fail_ok=True)
# One process left; watch it's pid change
pid = int(fetch("/").body)
fetch("/?exit=4", fail_ok=True)
pid2 = int(fetch("/").body)
self.assertNotEqual(pid, pid2)
# Kill the last one so we shut down cleanly
fetch("/?exit=0", fail_ok=True)
os._exit(0)
except Exception:
logging.error("exception in child process %d", id, exc_info=True)
raise
@skipIfNonUnix
class SubprocessTest(AsyncTestCase):
def test_subprocess(self):
if IOLoop.configured_class().__name__.endswith('LayeredTwistedIOLoop'):
# This test fails non-deterministically with LayeredTwistedIOLoop.
# (the read_until('\n') returns '\n' instead of 'hello\n')
# This probably indicates a problem with either TornadoReactor
# or TwistedIOLoop, but I haven't been able to track it down
# and for now this is just causing spurious travis-ci failures.
raise unittest.SkipTest("Subprocess tests not compatible with "
"LayeredTwistedIOLoop")
subproc = Subprocess([sys.executable, '-u', '-i'],
stdin=Subprocess.STREAM,
stdout=Subprocess.STREAM, stderr=subprocess.STDOUT,
io_loop=self.io_loop)
self.addCleanup(lambda: os.kill(subproc.pid, signal.SIGTERM))
subproc.stdout.read_until(b'>>> ', self.stop)
self.wait()
subproc.stdin.write(b"print('hello')\n")
subproc.stdout.read_until(b'\n', self.stop)
data = self.wait()
self.assertEqual(data, b"hello\n")
subproc.stdout.read_until(b">>> ", self.stop)
self.wait()
subproc.stdin.write(b"raise SystemExit\n")
subproc.stdout.read_until_close(self.stop)
data = self.wait()
self.assertEqual(data, b"")
def test_close_stdin(self):
# Close the parent's stdin handle and see that the child recognizes it.
subproc = Subprocess([sys.executable, '-u', '-i'],
stdin=Subprocess.STREAM,
stdout=Subprocess.STREAM, stderr=subprocess.STDOUT,
io_loop=self.io_loop)
self.addCleanup(lambda: os.kill(subproc.pid, signal.SIGTERM))
subproc.stdout.read_until(b'>>> ', self.stop)
self.wait()
subproc.stdin.close()
subproc.stdout.read_until_close(self.stop)
data = self.wait()
self.assertEqual(data, b"\n")
def test_stderr(self):
subproc = Subprocess([sys.executable, '-u', '-c',
r"import sys; sys.stderr.write('hello\n')"],
stderr=Subprocess.STREAM,
io_loop=self.io_loop)
self.addCleanup(lambda: os.kill(subproc.pid, signal.SIGTERM))
subproc.stderr.read_until(b'\n', self.stop)
data = self.wait()
self.assertEqual(data, b'hello\n')
def test_sigchild(self):
# Twisted's SIGCHLD handler and Subprocess's conflict with each other.
skip_if_twisted()
Subprocess.initialize(io_loop=self.io_loop)
self.addCleanup(Subprocess.uninitialize)
subproc = Subprocess([sys.executable, '-c', 'pass'],
io_loop=self.io_loop)
subproc.set_exit_callback(self.stop)
ret = self.wait()
self.assertEqual(ret, 0)
self.assertEqual(subproc.returncode, ret)
@gen_test
def test_sigchild_future(self):
skip_if_twisted()
Subprocess.initialize()
self.addCleanup(Subprocess.uninitialize)
subproc = Subprocess([sys.executable, '-c', 'pass'])
ret = yield subproc.wait_for_exit()
self.assertEqual(ret, 0)
self.assertEqual(subproc.returncode, ret)
def test_sigchild_signal(self):
skip_if_twisted()
Subprocess.initialize(io_loop=self.io_loop)
self.addCleanup(Subprocess.uninitialize)
subproc = Subprocess([sys.executable, '-c',
'import time; time.sleep(30)'],
io_loop=self.io_loop)
subproc.set_exit_callback(self.stop)
os.kill(subproc.pid, signal.SIGTERM)
ret = self.wait()
self.assertEqual(subproc.returncode, ret)
self.assertEqual(ret, -signal.SIGTERM)
@gen_test
def test_wait_for_exit_raise(self):
skip_if_twisted()
Subprocess.initialize()
self.addCleanup(Subprocess.uninitialize)
subproc = Subprocess([sys.executable, '-c', 'import sys; sys.exit(1)'])
with self.assertRaises(subprocess.CalledProcessError) as cm:
yield subproc.wait_for_exit()
self.assertEqual(cm.exception.returncode, 1)
@gen_test
def test_wait_for_exit_raise_disabled(self):
skip_if_twisted()
Subprocess.initialize()
self.addCleanup(Subprocess.uninitialize)
subproc = Subprocess([sys.executable, '-c', 'import sys; sys.exit(1)'])
ret = yield subproc.wait_for_exit(raise_error=False)
self.assertEqual(ret, 1)
| apache-2.0 | 8,805,905,003,651,502,000 | 42.493827 | 94 | 0.564576 | false |
spbguru/repo1 | tests/integration/py2/nupic/engine/network_testnode_interchangeability.py | 17 | 6158 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""This test verifies that the C++ test node and py.TestNode
It creates the same two node network with all four combinations
of TestNode and py.TestNode:
1. TestNode, TestNode
2. TestNode, py.TestNode
3. py.TestNode, TestNode
4. py.TestNode, py.TestNode
Then it performs the same tests as the twonode_network demo (except the error
messages tests for the three node network):
- Can add regions to network and set dimensions
- Linking induces dimensions correctly
- Network computation happens in correct order
- Direct (zero-copy) access to outputs
- Linking correctly maps outputs to inputs
"""
import logging
import unittest2 as unittest
from nupic.engine import Network, Dimensions
LOGGER = logging.getLogger(__name__)
class NetworkTestNodeInterchangeabilityTest(unittest.TestCase):
def testNodesPyTestNodeAndTestNode(self):
self.runNodesTest('py.TestNode', 'TestNode')
def testNodesTestNodeAndPyTestNode(self):
self.runNodesTest('TestNode', 'py.TestNode')
def testNodesTestNodeAndTestNode(self):
self.runNodesTest('TestNode', 'TestNode')
def testNodesPyTestNodeAndPyTestNode(self):
self.runNodesTest('py.TestNode', 'py.TestNode')
def runNodesTest(self, nodeType1, nodeType2):
# =====================================================
# Build and run the network
# =====================================================
LOGGER.info('test(level1: %s, level2: %s)', nodeType1, nodeType2)
net = Network()
level1 = net.addRegion("level1", nodeType1, "{int32Param: 15}")
dims = Dimensions([6, 4])
level1.setDimensions(dims)
level2 = net.addRegion("level2", nodeType2, "{real64Param: 128.23}")
net.link("level1", "level2", "TestFanIn2", "")
# Could call initialize here, but not necessary as net.run()
# initializes implicitly.
# net.initialize()
net.run(1)
LOGGER.info("Successfully created network and ran for one iteration")
# =====================================================
# Check everything
# =====================================================
dims = level1.getDimensions()
self.assertEqual(len(dims), 2)
self.assertEqual(dims[0], 6)
self.assertEqual(dims[1], 4)
dims = level2.getDimensions()
self.assertEqual(len(dims), 2)
self.assertEqual(dims[0], 3)
self.assertEqual(dims[1], 2)
# Check L1 output. "False" means don't copy, i.e.
# get a pointer to the actual output
# Actual output values are determined by the TestNode
# compute() behavior.
l1output = level1.getOutputData("bottomUpOut")
self.assertEqual(len(l1output), 48) # 24 nodes; 2 values per node
for i in xrange(24):
self.assertEqual(l1output[2*i], 0) # size of input to each node is 0
self.assertEqual(l1output[2*i+1], i) # node number
# check L2 output.
l2output = level2.getOutputData("bottomUpOut")
self.assertEqual(len(l2output), 12) # 6 nodes; 2 values per node
# Output val = node number + sum(inputs)
# Can compute from knowing L1 layout
#
# 00 01 | 02 03 | 04 05
# 06 07 | 08 09 | 10 11
# ---------------------
# 12 13 | 14 15 | 16 17
# 18 19 | 20 21 | 22 23
outputVals = []
outputVals.append(0 + (0 + 1 + 6 + 7))
outputVals.append(1 + (2 + 3 + 8 + 9))
outputVals.append(2 + (4 + 5 + 10 + 11))
outputVals.append(3 + (12 + 13 + 18 + 19))
outputVals.append(4 + (14 + 15 + 20 + 21))
outputVals.append(5 + (16 + 17 + 22 + 23))
for i in xrange(6):
if l2output[2*i] != 8:
LOGGER.info(l2output[2*i])
# from dbgp.client import brk; brk(port=9019)
self.assertEqual(l2output[2*i], 8) # size of input for each node is 8
self.assertEqual(l2output[2*i+1], outputVals[i])
# =====================================================
# Run for one more iteration
# =====================================================
LOGGER.info("Running for a second iteration")
net.run(1)
# =====================================================
# Check everything again
# =====================================================
# Outputs are all the same except that the first output is
# incremented by the iteration number
for i in xrange(24):
self.assertEqual(l1output[2*i], 1)
self.assertEqual(l1output[2*i+1], i)
for i in xrange(6):
self.assertEqual(l2output[2*i], 9)
self.assertEqual(l2output[2*i+1], outputVals[i] + 4)
# =====================================================
# Demonstrate a few other features
# =====================================================
#
# Linking can induce dimensions downward
#
net = Network()
level1 = net.addRegion("level1", nodeType1, "")
level2 = net.addRegion("level2", nodeType2, "")
dims = Dimensions([3, 2])
level2.setDimensions(dims)
net.link("level1", "level2", "TestFanIn2", "")
net.initialize()
# Level1 should now have dimensions [6, 4]
self.assertEqual(level1.getDimensions()[0], 6)
self.assertEqual(level1.getDimensions()[1], 4)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | -3,814,220,149,359,266,000 | 31.930481 | 80 | 0.594024 | false |
SergiosKar/Deep-Learning-models | Double_Deep_Q_Netowrk.py | 1 | 4525 | import random
import gym
import numpy as np
from collections import deque
from tensorflow.contrib.keras.python.keras.models import Sequential
from tensorflow.contrib.keras.python.keras.layers import Dense
from tensorflow.contrib.keras.python.keras.optimizers import Adam
from tensorflow.contrib.keras.python.keras import backend as K
EPISODES = 5000
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=2000)
self.gamma = 0.95 # discount rate
self.epsilon = 1.0 # exploration rate
self.epsilon_min = 0.01
self.epsilon_decay = 0.99
self.learning_rate = 0.001
# model network-->>action=NN.predict(state)
self.model = self._build_model()
# target network -->>target=NN.predict(state)
self.target_model = self._build_model()
self.update_target_model()
def _huber_loss(self, target, prediction):
# sqrt(1+error^2)-1
error = prediction - target
return K.mean(K.sqrt(1+K.square(error))-1, axis=-1)
def _build_model(self):
# Neural Net for Deep-Q learning Model
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(self.action_size, activation='linear'))
model.compile(loss=self._huber_loss,
optimizer=Adam(lr=self.learning_rate))
return model
def update_target_model(self):
# copy weights from model to target_model
self.target_model.set_weights(self.model.get_weights())
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
# select random action with prob=epsilon else action=maxQ
if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
act_values = self.model.predict(state)
return np.argmax(act_values[0]) # returns action
def replay(self, batch_size):
#sample random transitions
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
#calculate target for each minibatch
target = self.model.predict(state)
if done:
target[0][action] = reward
else:
#action from model network
a = self.model.predict(next_state)[0]
#target from target network
t = self.target_model.predict(next_state)[0]
target[0][action] = reward + self.gamma * t[np.argmax(a)]#belmann
#train model network
self.model.fit(state, target, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def load(self, name):
self.model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
if __name__ == "__main__":
env = gym.make('CartPole-v1')
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
agent = DQNAgent(state_size, action_size)
# agent.load("./save/cartpole-ddqn.h5")
done = False
batch_size = 32
for e in range(EPISODES):
state = env.reset()
state = np.reshape(state, [1, state_size])
for time in range(500):
# env.render()
#e-greedy action
action = agent.act(state)
next_state, reward, done, _ = env.step(action)
reward = reward if not done else -10
next_state = np.reshape(next_state, [1, state_size])
# add to experience memory
agent.remember(state, action, reward, next_state, done)
state = next_state
if done:
#update target model if goal is found
agent.update_target_model()
print("episode: {}/{}, score: {}, e: {:.2}"
.format(e, EPISODES, time, agent.epsilon))
break
if len(agent.memory) > batch_size:
agent.replay(batch_size)
# if e % 10 == 0:
# agent.save("./save/cartpole-ddqn.h5") | mit | 4,801,212,506,258,369,000 | 29.22069 | 81 | 0.574144 | false |
MarcosCommunity/odoo | addons/note/tests/__init__.py | 260 | 1076 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_note
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 9,045,515,598,551,703,000 | 45.782609 | 78 | 0.61803 | false |
mattesno1/CouchPotatoServer | libs/caper/helpers.py | 81 | 2210 | # Copyright 2013 Dean Gardiner <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
def is_list_type(obj, element_type):
if not type(obj) is list:
return False
if len(obj) < 1:
raise ValueError("Unable to determine list element type from empty list")
return type(obj[0]) is element_type
def clean_dict(target, remove=None):
"""Recursively remove items matching a value 'remove' from the dictionary
:type target: dict
"""
if type(target) is not dict:
raise ValueError("Target is required to be a dict")
remove_keys = []
for key in target.keys():
if type(target[key]) is not dict:
if target[key] == remove:
remove_keys.append(key)
else:
clean_dict(target[key], remove)
for key in remove_keys:
target.pop(key)
return target
def update_dict(a, b):
for key, value in b.items():
if key not in a:
a[key] = value
elif isinstance(a[key], dict) and isinstance(value, dict):
update_dict(a[key], value)
elif isinstance(a[key], list):
a[key].append(value)
else:
a[key] = [a[key], value]
def xrange_six(start, stop=None, step=None):
if stop is not None and step is not None:
if PY3:
return range(start, stop, step)
else:
return xrange(start, stop, step)
else:
if PY3:
return range(start)
else:
return xrange(start)
def delta_seconds(td):
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
| gpl-3.0 | -7,800,063,707,168,705,000 | 26.625 | 81 | 0.624887 | false |
nmayorov/scipy | scipy/optimize/_linprog.py | 3 | 23457 | """
A top-level linear programming interface. Currently this interface solves
linear programming problems via the Simplex and Interior-Point methods.
.. versionadded:: 0.15.0
Functions
---------
.. autosummary::
:toctree: generated/
linprog
linprog_verbose_callback
linprog_terse_callback
"""
import numpy as np
from .optimize import OptimizeResult, OptimizeWarning
from warnings import warn
from ._linprog_ip import _linprog_ip
from ._linprog_simplex import _linprog_simplex
from ._linprog_rs import _linprog_rs
from ._linprog_util import (
_parse_linprog, _presolve, _get_Abc, _LPProblem, _autoscale,
_postsolve, _check_result, _display_summary)
from copy import deepcopy
__all__ = ['linprog', 'linprog_verbose_callback', 'linprog_terse_callback']
__docformat__ = "restructuredtext en"
def linprog_verbose_callback(res):
"""
A sample callback function demonstrating the linprog callback interface.
This callback produces detailed output to sys.stdout before each iteration
and after the final iteration of the simplex algorithm.
Parameters
----------
res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1-D array
The independent variable vector which optimizes the linear
programming problem.
fun : float
Value of the objective function.
success : bool
True if the algorithm succeeded in finding an optimal solution.
slack : 1-D array
The values of the slack variables. Each slack variable corresponds
to an inequality constraint. If the slack is zero, then the
corresponding constraint is active.
con : 1-D array
The (nominally zero) residuals of the equality constraints, that is,
``b - A_eq @ x``
phase : int
The phase of the optimization being executed. In phase 1 a basic
feasible solution is sought and the T has an additional row
representing an alternate objective function.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
nit : int
The number of iterations performed.
message : str
A string descriptor of the exit status of the optimization.
"""
x = res['x']
fun = res['fun']
phase = res['phase']
status = res['status']
nit = res['nit']
message = res['message']
complete = res['complete']
saved_printoptions = np.get_printoptions()
np.set_printoptions(linewidth=500,
formatter={'float': lambda x: "{0: 12.4f}".format(x)})
if status:
print('--------- Simplex Early Exit -------\n'.format(nit))
print('The simplex method exited early with status {0:d}'.format(status))
print(message)
elif complete:
print('--------- Simplex Complete --------\n')
print('Iterations required: {}'.format(nit))
else:
print('--------- Iteration {0:d} ---------\n'.format(nit))
if nit > 0:
if phase == 1:
print('Current Pseudo-Objective Value:')
else:
print('Current Objective Value:')
print('f = ', fun)
print()
print('Current Solution Vector:')
print('x = ', x)
print()
np.set_printoptions(**saved_printoptions)
def linprog_terse_callback(res):
"""
A sample callback function demonstrating the linprog callback interface.
This callback produces brief output to sys.stdout before each iteration
and after the final iteration of the simplex algorithm.
Parameters
----------
res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1-D array
The independent variable vector which optimizes the linear
programming problem.
fun : float
Value of the objective function.
success : bool
True if the algorithm succeeded in finding an optimal solution.
slack : 1-D array
The values of the slack variables. Each slack variable corresponds
to an inequality constraint. If the slack is zero, then the
corresponding constraint is active.
con : 1-D array
The (nominally zero) residuals of the equality constraints, that is,
``b - A_eq @ x``.
phase : int
The phase of the optimization being executed. In phase 1 a basic
feasible solution is sought and the T has an additional row
representing an alternate objective function.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
nit : int
The number of iterations performed.
message : str
A string descriptor of the exit status of the optimization.
"""
nit = res['nit']
x = res['x']
if nit == 0:
print("Iter: X:")
print("{0: <5d} ".format(nit), end="")
print(x)
def linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
bounds=None, method='interior-point', callback=None,
options=None, x0=None):
r"""
Linear programming: minimize a linear objective function subject to linear
equality and inequality constraints.
Linear programming solves problems of the following form:
.. math::
\min_x \ & c^T x \\
\mbox{such that} \ & A_{ub} x \leq b_{ub},\\
& A_{eq} x = b_{eq},\\
& l \leq x \leq u ,
where :math:`x` is a vector of decision variables; :math:`c`,
:math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
:math:`A_{ub}` and :math:`A_{eq}` are matrices.
Informally, that's:
minimize::
c @ x
such that::
A_ub @ x <= b_ub
A_eq @ x == b_eq
lb <= x <= ub
Note that by default ``lb = 0`` and ``ub = None`` unless specified with
``bounds``.
Parameters
----------
c : 1-D array
The coefficients of the linear objective function to be minimized.
A_ub : 2-D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1-D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2-D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1-D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : sequence, optional
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
the minimum and maximum values of that decision variable. Use ``None`` to
indicate that there is no bound. By default, bounds are ``(0, None)``
(all decision variables are non-negative).
If a single tuple ``(min, max)`` is provided, then ``min`` and
``max`` will serve as bounds for all decision variables.
method : {'interior-point', 'revised simplex', 'simplex'}, optional
The algorithm used to solve the standard form problem.
:ref:`'interior-point' <optimize.linprog-interior-point>` (default),
:ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
:ref:`'simplex' <optimize.linprog-simplex>` (legacy)
are supported.
callback : callable, optional
If a callback function is provided, it will be called at least once per
iteration of the algorithm. The callback function must accept a single
`scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1-D array
The current solution vector.
fun : float
The current value of the objective function ``c @ x``.
success : bool
``True`` when the algorithm has completed successfully.
slack : 1-D array
The (nominally positive) values of the slack,
``b_ub - A_ub @ x``.
con : 1-D array
The (nominally zero) residuals of the equality constraints,
``b_eq - A_eq @ x``.
phase : int
The phase of the algorithm being executed.
status : int
An integer representing the status of the algorithm.
``0`` : Optimization proceeding nominally.
``1`` : Iteration limit reached.
``2`` : Problem appears to be infeasible.
``3`` : Problem appears to be unbounded.
``4`` : Numerical difficulties encountered.
nit : int
The current iteration number.
message : str
A string descriptor of the algorithm status.
options : dict, optional
A dictionary of solver options. All methods accept the following
options:
maxiter : int
Maximum number of iterations to perform.
Default: see method-specific documentation.
disp : bool
Set to ``True`` to print convergence messages.
Default: ``False``.
autoscale : bool
Set to ``True`` to automatically perform equilibration.
Consider using this option if the numerical values in the
constraints are separated by several orders of magnitude.
Default: ``False``.
presolve : bool
Set to ``False`` to disable automatic presolve.
Default: ``True``.
rr : bool
Set to ``False`` to disable automatic redundancy removal.
Default: ``True``.
For method-specific options, see
:func:`show_options('linprog') <show_options>`.
x0 : 1-D array, optional
Guess values of the decision variables, which will be refined by
the optimization algorithm. This argument is currently used only by the
'revised simplex' method, and can only be used if `x0` represents a
basic feasible solution.
Returns
-------
res : OptimizeResult
A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
x : 1-D array
The values of the decision variables that minimizes the
objective function while satisfying the constraints.
fun : float
The optimal value of the objective function ``c @ x``.
slack : 1-D array
The (nominally positive) values of the slack variables,
``b_ub - A_ub @ x``.
con : 1-D array
The (nominally zero) residuals of the equality constraints,
``b_eq - A_eq @ x``.
success : bool
``True`` when the algorithm succeeds in finding an optimal
solution.
status : int
An integer representing the exit status of the algorithm.
``0`` : Optimization terminated successfully.
``1`` : Iteration limit reached.
``2`` : Problem appears to be infeasible.
``3`` : Problem appears to be unbounded.
``4`` : Numerical difficulties encountered.
nit : int
The total number of iterations performed in all phases.
message : str
A string descriptor of the exit status of the algorithm.
See Also
--------
show_options : Additional options accepted by the solvers.
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter.
:ref:`'interior-point' <optimize.linprog-interior-point>` is the default
as it is typically the fastest and most robust method.
:ref:`'revised simplex' <optimize.linprog-revised_simplex>` is more
accurate for the problems it solves.
:ref:`'simplex' <optimize.linprog-simplex>` is the legacy method and is
included for backwards compatibility and educational purposes.
Method *interior-point* uses the primal-dual path following algorithm
as outlined in [4]_. This algorithm supports sparse constraint matrices and
is typically faster than the simplex methods, especially for large, sparse
problems. Note, however, that the solution returned may be slightly less
accurate than those of the simplex methods and will not, in general,
correspond with a vertex of the polytope defined by the constraints.
.. versionadded:: 1.0.0
Method *revised simplex* uses the revised simplex method as described in
[9]_, except that a factorization [11]_ of the basis matrix, rather than
its inverse, is efficiently maintained and used to solve the linear systems
at each iteration of the algorithm.
.. versionadded:: 1.3.0
Method *simplex* uses a traditional, full-tableau implementation of
Dantzig's simplex algorithm [1]_, [2]_ (*not* the
Nelder-Mead simplex). This algorithm is included for backwards
compatibility and educational purposes.
.. versionadded:: 0.15.0
Before applying any method, a presolve procedure based on [8]_ attempts
to identify trivial infeasibilities, trivial unboundedness, and potential
problem simplifications. Specifically, it checks for:
- rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints;
- columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained
variables;
- column singletons in ``A_eq``, representing fixed variables; and
- column singletons in ``A_ub``, representing simple bounds.
If presolve reveals that the problem is unbounded (e.g. an unconstrained
and unbounded variable has negative cost) or infeasible (e.g., a row of
zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver
terminates with the appropriate status code. Note that presolve terminates
as soon as any sign of unboundedness is detected; consequently, a problem
may be reported as unbounded when in reality the problem is infeasible
(but infeasibility has not been detected yet). Therefore, if it is
important to know whether the problem is actually infeasible, solve the
problem again with option ``presolve=False``.
If neither infeasibility nor unboundedness are detected in a single pass
of the presolve, bounds are tightened where possible and fixed
variables are removed from the problem. Then, linearly dependent rows
of the ``A_eq`` matrix are removed, (unless they represent an
infeasibility) to avoid numerical difficulties in the primary solve
routine. Note that rows that are nearly linearly dependent (within a
prescribed tolerance) may also be removed, which can change the optimal
solution in rare cases. If this is a concern, eliminate redundancy from
your problem formulation and run with option ``rr=False`` or
``presolve=False``.
Several potential improvements can be made here: additional presolve
checks outlined in [8]_ should be implemented, the presolve routine should
be run multiple times (until no further simplifications can be made), and
more of the efficiency improvements from [5]_ should be implemented in the
redundancy removal routines.
After presolve, the problem is transformed to standard form by converting
the (tightened) simple bounds to upper bound constraints, introducing
non-negative slack variables for inequality constraints, and expressing
unbounded variables as the difference between two non-negative variables.
Optionally, the problem is automatically scaled via equilibration [12]_.
The selected algorithm solves the standard form problem, and a
postprocessing routine converts the result to a solution to the original
problem.
References
----------
.. [1] Dantzig, George B., Linear programming and extensions. Rand
Corporation Research Study Princeton Univ. Press, Princeton, NJ,
1963
.. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
Mathematical Programming", McGraw-Hill, Chapter 4.
.. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
Mathematics of Operations Research (2), 1977: pp. 103-107.
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
optimizer for linear programming: an implementation of the
homogeneous algorithm." High performance optimization. Springer US,
2000. 197-232.
.. [5] Andersen, Erling D. "Finding all linearly dependent rows in
large-scale linear programming." Optimization Methods and Software
6.3 (1995): 219-227.
.. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
Programming based on Newton's Method." Unpublished Course Notes,
March 2004. Available 2/25/2017 at
https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
.. [7] Fourer, Robert. "Solving Linear Programs by Interior-Point Methods."
Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at
http://www.4er.org/CourseNotes/Book%20B/B-III.pdf
.. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
programming." Mathematical Programming 71.2 (1995): 221-245.
.. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
programming." Athena Scientific 1 (1997): 997.
.. [10] Andersen, Erling D., et al. Implementation of interior point
methods for large scale linear programming. HEC/Universite de
Geneve, 1996.
.. [11] Bartels, Richard H. "A stabilization of the simplex method."
Journal in Numerische Mathematik 16.5 (1971): 414-434.
.. [12] Tomlin, J. A. "On scaling linear programming problems."
Mathematical Programming Study 4 (1975): 146-166.
Examples
--------
Consider the following problem:
.. math::
\min_{x_0, x_1} \ -x_0 + 4x_1 & \\
\mbox{such that} \ -3x_0 + x_1 & \leq 6,\\
-x_0 - 2x_1 & \geq -4,\\
x_1 & \geq -3.
The problem is not presented in the form accepted by `linprog`. This is
easily remedied by converting the "greater than" inequality
constraint to a "less than" inequality constraint by
multiplying both sides by a factor of :math:`-1`. Note also that the last
constraint is really the simple bound :math:`-3 \leq x_1 \leq \infty`.
Finally, since there are no bounds on :math:`x_0`, we must explicitly
specify the bounds :math:`-\infty \leq x_0 \leq \infty`, as the
default is for variables to be non-negative. After collecting coeffecients
into arrays and tuples, the input for this problem is:
>>> c = [-1, 4]
>>> A = [[-3, 1], [1, 2]]
>>> b = [6, 4]
>>> x0_bounds = (None, None)
>>> x1_bounds = (-3, None)
>>> from scipy.optimize import linprog
>>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds])
Note that the default method for `linprog` is 'interior-point', which is
approximate by nature.
>>> print(res)
con: array([], dtype=float64)
fun: -21.99999984082494 # may vary
message: 'Optimization terminated successfully.'
nit: 6 # may vary
slack: array([3.89999997e+01, 8.46872439e-08] # may vary
status: 0
success: True
x: array([ 9.99999989, -2.99999999]) # may vary
If you need greater accuracy, try 'revised simplex'.
>>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds], method='revised simplex')
>>> print(res)
con: array([], dtype=float64)
fun: -22.0 # may vary
message: 'Optimization terminated successfully.'
nit: 1 # may vary
slack: array([39., 0.]) # may vary
status: 0
success: True
x: array([10., -3.]) # may vary
"""
meth = method.lower()
if x0 is not None and meth != "revised simplex":
warning_message = "x0 is used only when method is 'revised simplex'. "
warn(warning_message, OptimizeWarning)
lp = _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0)
lp, solver_options = _parse_linprog(lp, options)
tol = solver_options.get('tol', 1e-9)
iteration = 0
complete = False # will become True if solved in presolve
undo = []
# Keep the original arrays to calculate slack/residuals for original
# problem.
lp_o = deepcopy(lp)
# Solve trivial problem, eliminate variables, tighten bounds, etc.
c0 = 0 # we might get a constant term in the objective
if solver_options.pop('presolve', True):
rr = solver_options.pop('rr', True)
(lp, c0, x, undo, complete, status, message) = _presolve(lp, rr, tol)
C, b_scale = 1, 1 # for trivial unscaling if autoscale is not used
postsolve_args = (lp_o._replace(bounds=lp.bounds), undo, C, b_scale)
if not complete:
A, b, c, c0, x0 = _get_Abc(lp, c0)
if solver_options.pop('autoscale', False):
A, b, c, x0, C, b_scale = _autoscale(A, b, c, x0)
postsolve_args = postsolve_args[:-2] + (C, b_scale)
if meth == 'simplex':
x, status, message, iteration = _linprog_simplex(
c, c0=c0, A=A, b=b, callback=callback,
postsolve_args=postsolve_args, **solver_options)
elif meth == 'interior-point':
x, status, message, iteration = _linprog_ip(
c, c0=c0, A=A, b=b, callback=callback,
postsolve_args=postsolve_args, **solver_options)
elif meth == 'revised simplex':
x, status, message, iteration = _linprog_rs(
c, c0=c0, A=A, b=b, x0=x0, callback=callback,
postsolve_args=postsolve_args, **solver_options)
else:
raise ValueError('Unknown solver %s' % method)
# Eliminate artificial variables, re-introduce presolved variables, etc.
disp = solver_options.get('disp', False)
x, fun, slack, con = _postsolve(x, postsolve_args, complete)
status, message = _check_result(x, fun, status, slack, con, lp_o.bounds, tol, message)
if disp:
_display_summary(message, status, fun, iteration)
sol = {
'x': x,
'fun': fun,
'slack': slack,
'con': con,
'status': status,
'message': message,
'nit': iteration,
'success': status == 0}
return OptimizeResult(sol)
| bsd-3-clause | 4,203,824,383,983,364,600 | 39.65338 | 143 | 0.624504 | false |
mdeemer/XlsxWriter | xlsxwriter/test/comparison/test_textbox02.py | 8 | 1108 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'textbox02.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox('E9', 'This is some text')
worksheet.insert_textbox('H18', 'Some more text')
workbook.close()
self.assertExcelEqual()
| bsd-2-clause | -9,035,809,802,474,116,000 | 24.767442 | 79 | 0.594765 | false |
cbporch/perceptron | adaline.py | 1 | 1193 | import numpy as np
from perceptron import Perceptron
class Adaline(Perceptron):
"""
Implementation of an Adaptive Linear Neuron, that can be abstracted to
various input sizes or dimensions. Displays using pyplot.
"""
ETA = 1
def __init__(self, grph, eta, max_t):
Perceptron.__init__(self, grph)
self.ETA = eta
self.max_t = max_t
def update(self, y_t, x):
r = []
s_t = np.sign(np.inner(self.grph.w, x))
for i in range(self.DIM):
r.append(self.grph.w[i] + (self.ETA * (y_t - s_t) * x[i]))
return r
def fit(self):
t = 0
c = True
while c:
n = self.random_check()
if n == -1 or t == self.max_t:
c = False
else:
self.grph.w = self.update(self.grph.y[n], self.grph.training_matrix[n])
t += 1
print("t: {0}, w: {1}".format(t, self.grph.w))
if self.grph.PLOT:
self.grph.plot_g() # In calling g() the 0th value is 1, corresponding to w_0
self.grph.show_plot()
# and the last value is not used in calculation, so is set as 0
return t
| mit | -5,888,406,898,526,576,000 | 29.589744 | 89 | 0.521375 | false |
Jortolsa/l10n-spain | l10n_es_aeat_mod340/models/account_invoice.py | 9 | 1455 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2012 Acysos S.L. (http://acysos.com)
# Ignacio Ibeas <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class AccountInvoice(orm.Model):
_inherit = 'account.invoice'
_columns = {
'is_ticket_summary': fields.boolean(
'Ticket Summary',
help='Check if this invoice is a ticket summary'),
'number_tickets': fields.integer('Number of tickets', digits=(12, 0)),
'first_ticket': fields.char('First ticket', size=40),
'last_ticket': fields.char('Last ticket', size=40)
}
| agpl-3.0 | 3,294,555,049,847,596,000 | 40.571429 | 78 | 0.593814 | false |
bdang2012/taiga-back-casting | taiga/projects/milestones/services.py | 1 | 1452 | # Copyright (C) 2014-2015 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2015 Jesús Espino <[email protected]>
# Copyright (C) 2014-2015 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.utils import timezone
from . import models
def calculate_milestone_is_closed(milestone):
return (milestone.user_stories.all().count() > 0 and
all([task.status.is_closed for task in milestone.tasks.all()]) and
all([user_story.is_closed for user_story in milestone.user_stories.all()]))
def close_milestone(milestone):
if not milestone.closed:
milestone.closed = True
milestone.save(update_fields=["closed",])
def open_milestone(milestone):
if milestone.closed:
milestone.closed = False
milestone.save(update_fields=["closed",])
| agpl-3.0 | 2,424,713,738,919,583,000 | 37.157895 | 87 | 0.727586 | false |
FoxerLee/iOS_sitp | Pods/AVOSCloudCrashReporting/Breakpad/src/tools/gyp/test/mac/gyptest-xctest.py | 221 | 1196 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that xctest targets are correctly configured.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['xcode'])
# Ignore this test if Xcode 5 is not installed
import subprocess
job = subprocess.Popen(['xcodebuild', '-version'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = job.communicate()
if job.returncode != 0:
raise Exception('Error %d running xcodebuild' % job.returncode)
xcode_version, build_number = out.splitlines()
# Convert the version string from 'Xcode 5.0' to ['5','0'].
xcode_version = xcode_version.split()[-1].split('.')
if xcode_version < ['5']:
test.pass_test()
CHDIR = 'xctest'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', chdir=CHDIR, arguments=['-scheme', 'classes', 'test'])
test.built_file_must_match('tests.xctest/Contents/Resources/resource.txt',
'foo\n', chdir=CHDIR)
test.pass_test()
| mit | 8,804,503,777,923,049,000 | 30.473684 | 79 | 0.650502 | false |
guillermo-carrasco/bcbio-nextgen | bcbio/structural/__init__.py | 1 | 5772 | """Detect structural variation in genomes using high-throughput sequencing data.
"""
import collections
import copy
import operator
import toolz as tz
from bcbio.pipeline import datadict as dd
from bcbio.structural import (battenberg, cn_mops, cnvkit, delly,
lumpy, manta, metasv, plot, validate, wham)
from bcbio.variation import vcfutils
# Stratify callers by stage -- see `run` documentation below for definitions
_CALLERS = {
"initial": {"cnvkit": cnvkit.run,
"battenberg": battenberg.run},
"standard": {"cn.mops": cn_mops.run, "manta": manta.run,
"delly": delly.run, "lumpy": lumpy.run, "wham": wham.run},
"ensemble": {"metasv": metasv.run}}
_NEEDS_BACKGROUND = set(["cn.mops"])
def _get_svcallers(data):
svs = data["config"]["algorithm"].get("svcaller")
if svs is None:
svs = []
elif isinstance(svs, basestring):
svs = [svs]
return svs
def _handle_multiple_svcallers(data, stage):
"""Retrieve configured structural variation caller, handling multiple.
"""
svs = _get_svcallers(data)
out = []
for svcaller in svs:
if svcaller in _CALLERS[stage]:
base = copy.deepcopy(data)
base["config"]["algorithm"]["svcaller_active"] = svcaller
out.append(base)
return out
def finalize_sv(samples, config):
"""Combine results from multiple sv callers into a single ordered 'sv' key.
Handles ensemble calling and plotting of results.
"""
by_bam = collections.OrderedDict()
for x in samples:
try:
by_bam[x["align_bam"]].append(x)
except KeyError:
by_bam[x["align_bam"]] = [x]
by_batch = collections.OrderedDict()
lead_batches = {}
for grouped_calls in by_bam.values():
def orig_svcaller_order(x):
return _get_svcallers(x).index(x["config"]["algorithm"]["svcaller_active"])
sorted_svcalls = sorted([x for x in grouped_calls if "sv" in x],
key=orig_svcaller_order)
final = grouped_calls[0]
if len(sorted_svcalls) > 0:
final["sv"] = reduce(operator.add, [x["sv"] for x in sorted_svcalls])
del final["config"]["algorithm"]["svcaller_active"]
batch = dd.get_batch(final) or dd.get_sample_name(final)
batches = batch if isinstance(batch, (list, tuple)) else [batch]
lead_batches[dd.get_sample_name(final)] = batches[0]
for batch in batches:
try:
by_batch[batch].append(final)
except KeyError:
by_batch[batch] = [final]
out = []
for batch, items in by_batch.items():
if any("svplots" in dd.get_tools_on(d) for d in items):
plot_items = plot.by_regions(items)
else:
plot_items = items
for data in plot_items:
if lead_batches[dd.get_sample_name(data)] == batch:
out.append([data])
return out
def validate_sv(data):
"""Validate structural variant calls for a sample.
"""
return [[validate.evaluate(data)]]
def run(samples, run_parallel, stage):
"""Run structural variation detection.
The stage indicates which level of structural variant calling to run.
- initial, run prior to other callers and variant calling
- standard, regular batch calling
- ensemble, post-calling, combine other callers-
"""
to_process = collections.OrderedDict()
extras = []
background = []
for data in (xs[0] for xs in samples):
ready_data = _handle_multiple_svcallers(data, stage)
if len(ready_data) > 0:
background.append(data)
for x in ready_data:
svcaller = x["config"]["algorithm"].get("svcaller_active")
if stage == "ensemble": # no batching for ensemble methods
batch = dd.get_sample_name(x)
else:
batch = dd.get_batch(x) or dd.get_sample_name(x)
batches = batch if isinstance(batch, (list, tuple)) else [batch]
for b in batches:
try:
to_process[(svcaller, b)].append(x)
except KeyError:
to_process[(svcaller, b)] = [x]
else:
extras.append([data])
processed = run_parallel("detect_sv", ([xs, background, xs[0]["config"], stage]
for xs in to_process.values()))
finalized = (run_parallel("finalize_sv", [([xs[0] for xs in processed], processed[0][0]["config"])])
if len(processed) > 0 else [])
return extras + finalized
def detect_sv(items, all_items, config, stage):
"""Top level parallel target for examining structural variation.
"""
svcaller = config["algorithm"].get("svcaller_active")
caller_fn = _CALLERS[stage].get(svcaller)
out = []
if svcaller and caller_fn:
if (svcaller in _NEEDS_BACKGROUND and
not vcfutils.is_paired_analysis([x.get("align_bam") for x in items], items)):
names = set([tz.get_in(["rgnames", "sample"], x) for x in items])
background = [x for x in all_items if tz.get_in(["rgnames", "sample"], x) not in names]
for svdata in caller_fn(items, background):
out.append([svdata])
else:
for svdata in caller_fn(items):
out.append([svdata])
else:
for data in items:
out.append([data])
return out
# ## configuration
def parallel_multiplier(items):
"""Use more resources (up to available limits) if we have multiple SV callers.
"""
return max([1] + [len(_get_svcallers(xs[0])) for xs in items])
| mit | 4,835,140,490,848,871,000 | 37.48 | 104 | 0.584026 | false |
openmv/openmv | scripts/examples/OpenMV/03-Drawing/text_drawing.py | 3 | 1127 | # Text Drawing
#
# This example shows off drawing text on the OpenMV Cam.
import sensor, image, time, pyb
sensor.reset()
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
sensor.set_framesize(sensor.QVGA) # or QQVGA...
sensor.skip_frames(time = 2000)
clock = time.clock()
while(True):
clock.tick()
img = sensor.snapshot()
for i in range(10):
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
r = (pyb.rng() % 127) + 128
g = (pyb.rng() % 127) + 128
b = (pyb.rng() % 127) + 128
# If the first argument is a scaler then this method expects
# to see x, y, and text. Otherwise, it expects a (x,y,text) tuple.
# Character and string rotation can be done at 0, 90, 180, 270, and etc. degrees.
img.draw_string(x, y, "Hello World!", color = (r, g, b), scale = 2, mono_space = False,
char_rotation = 0, char_hmirror = False, char_vflip = False,
string_rotation = 0, string_hmirror = False, string_vflip = False)
print(clock.fps())
| mit | -3,380,956,146,827,878,000 | 33.151515 | 95 | 0.578527 | false |
etingof/pyasn1 | tests/type/test_constraint.py | 2 | 11339 | #
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2020, Ilya Etingof <[email protected]>
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
import unittest
from tests.base import BaseTestCase
from pyasn1.type import constraint
from pyasn1.type import error
class SingleValueConstraintTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.v1 = 1, 2
self.v2 = 3, 4
self.c1 = constraint.SingleValueConstraint(*self.v1)
self.c2 = constraint.SingleValueConstraint(*self.v2)
def testCmp(self):
assert self.c1 == self.c1, 'comparison fails'
def testHash(self):
assert hash(self.c1) != hash(self.c2), 'hash() fails'
def testGoodVal(self):
try:
self.c1(1)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(4)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
def testContains(self):
for v in self.v1:
assert v in self.c1
assert v not in self.c2
for v in self.v2:
assert v in self.c2
assert v not in self.c1
def testIter(self):
assert set(self.v1) == set(self.c1)
assert set(self.v2) == set(self.c2)
def testSub(self):
subconst = self.c1 - constraint.SingleValueConstraint(self.v1[0])
assert list(subconst) == [self.v1[1]]
def testAdd(self):
superconst = self.c1 + self.c2
assert set(superconst) == set(self.v1 + self.v2)
class ContainedSubtypeConstraintTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.c1 = constraint.ContainedSubtypeConstraint(
constraint.SingleValueConstraint(12)
)
def testGoodVal(self):
try:
self.c1(12)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(4)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class ValueRangeConstraintTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.c1 = constraint.ValueRangeConstraint(1, 4)
def testGoodVal(self):
try:
self.c1(1)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(-5)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class ValueSizeConstraintTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.c1 = constraint.ValueSizeConstraint(1, 2)
def testGoodVal(self):
try:
self.c1('a')
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1('abc')
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class PermittedAlphabetConstraintTestCase(SingleValueConstraintTestCase):
def setUp(self):
self.v1 = 'A', 'B'
self.v2 = 'C', 'D'
self.c1 = constraint.PermittedAlphabetConstraint(*self.v1)
self.c2 = constraint.PermittedAlphabetConstraint(*self.v2)
def testGoodVal(self):
try:
self.c1('A')
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1('E')
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class WithComponentsConstraintTestCase(BaseTestCase):
def testGoodVal(self):
c = constraint.WithComponentsConstraint(
('A', constraint.ComponentPresentConstraint()),
('B', constraint.ComponentAbsentConstraint()))
try:
c({'A': 1})
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testGoodValWithExtraFields(self):
c = constraint.WithComponentsConstraint(
('A', constraint.ComponentPresentConstraint()),
('B', constraint.ComponentAbsentConstraint())
)
try:
c({'A': 1, 'C': 2})
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testEmptyConstraint(self):
c = constraint.WithComponentsConstraint()
try:
c({'A': 1})
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
c = constraint.WithComponentsConstraint(
('A', constraint.ComponentPresentConstraint())
)
try:
c({'B': 2})
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
def testBadValExtraFields(self):
c = constraint.WithComponentsConstraint(
('A', constraint.ComponentPresentConstraint())
)
try:
c({'B': 2, 'C': 3})
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class ConstraintsIntersectionTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.c1 = constraint.ConstraintsIntersection(
constraint.SingleValueConstraint(4),
constraint.ValueRangeConstraint(2, 4)
)
def testCmp1(self):
assert constraint.SingleValueConstraint(4) in self.c1, '__cmp__() fails'
def testCmp2(self):
assert constraint.SingleValueConstraint(5) not in self.c1, \
'__cmp__() fails'
def testCmp3(self):
c = constraint.ConstraintsUnion(constraint.ConstraintsIntersection(
constraint.SingleValueConstraint(4),
constraint.ValueRangeConstraint(2, 4))
)
assert self.c1 in c, '__cmp__() fails'
def testCmp4(self):
c = constraint.ConstraintsUnion(
constraint.ConstraintsIntersection(constraint.SingleValueConstraint(5))
)
assert self.c1 not in c, '__cmp__() fails'
def testGoodVal(self):
try:
self.c1(4)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(-5)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class InnerTypeConstraintTestCase(BaseTestCase):
def testConst1(self):
c = constraint.InnerTypeConstraint(
constraint.SingleValueConstraint(4)
)
try:
c(4, 32)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
try:
c(5, 32)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
def testConst2(self):
c = constraint.InnerTypeConstraint(
(0, constraint.SingleValueConstraint(4), 'PRESENT'),
(1, constraint.SingleValueConstraint(4), 'ABSENT')
)
try:
c(4, 0)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
try:
c(4, 1)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
try:
c(3, 0)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
# Constraints compositions
class ConstraintsIntersectionRangeTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.c1 = constraint.ConstraintsIntersection(
constraint.ValueRangeConstraint(1, 9),
constraint.ValueRangeConstraint(2, 5)
)
def testGoodVal(self):
try:
self.c1(3)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(0)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class ConstraintsUnionTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.c1 = constraint.ConstraintsUnion(
constraint.SingleValueConstraint(5),
constraint.ValueRangeConstraint(1, 3)
)
def testGoodVal(self):
try:
self.c1(2)
self.c1(5)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(-5)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class ConstraintsExclusionTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.c1 = constraint.ConstraintsExclusion(
constraint.ValueRangeConstraint(2, 4)
)
def testGoodVal(self):
try:
self.c1(6)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(2)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
# Constraints derivations
class DirectDerivationTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.c1 = constraint.SingleValueConstraint(5)
self.c2 = constraint.ConstraintsUnion(
self.c1, constraint.ValueRangeConstraint(1, 3)
)
def testGoodVal(self):
assert self.c1.isSuperTypeOf(self.c2), 'isSuperTypeOf failed'
assert not self.c1.isSubTypeOf(self.c2), 'isSubTypeOf failed'
def testBadVal(self):
assert not self.c2.isSuperTypeOf(self.c1), 'isSuperTypeOf failed'
assert self.c2.isSubTypeOf(self.c1), 'isSubTypeOf failed'
class IndirectDerivationTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.c1 = constraint.ConstraintsIntersection(
constraint.ValueRangeConstraint(1, 30)
)
self.c2 = constraint.ConstraintsIntersection(
self.c1, constraint.ValueRangeConstraint(1, 20)
)
self.c2 = constraint.ConstraintsIntersection(
self.c2, constraint.ValueRangeConstraint(1, 10)
)
def testGoodVal(self):
assert self.c1.isSuperTypeOf(self.c2), 'isSuperTypeOf failed'
assert not self.c1.isSubTypeOf(self.c2), 'isSubTypeOf failed'
def testBadVal(self):
assert not self.c2.isSuperTypeOf(self.c1), 'isSuperTypeOf failed'
assert self.c2.isSubTypeOf(self.c1), 'isSubTypeOf failed'
# TODO: how to apply size constraints to constructed types?
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
| bsd-2-clause | 2,848,388,413,799,149,000 | 25.997619 | 83 | 0.600935 | false |
maartenq/ansible | lib/ansible/modules/packaging/os/package.py | 18 | 1929 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: package
version_added: 2.0
author:
- Ansible Inc
short_description: Generic OS package manager
description:
- Installs, upgrade and removes packages using the underlying OS package manager.
- For Windows targets, use the M(win_package) module instead.
options:
name:
description:
- "Package name, or package specifier with version, like C(name-1.0)."
- "Be aware that packages are not always named the same and this module will not 'translate' them per distro."
required: true
state:
description:
- Whether to install (C(present)), or remove (C(absent)) a package. Other states depend on the underlying package module, i.e C(latest).
required: true
use:
description:
- The required package manager module to use (yum, apt, etc). The default 'auto' will use existing facts or try to autodetect it.
- You should only use this field if the automatic selection is not working for some reason.
required: false
default: auto
requirements:
- Whatever is required for the package plugins specific for each system.
notes:
- This module actually calls the pertinent package modules for each system (apt, yum, etc).
- For Windows targets, use the M(win_package) module instead.
'''
EXAMPLES = '''
- name: install ntpdate
package:
name: ntpdate
state: present
# This uses a variable as this changes per distribution.
- name: remove the apache package
package:
name: "{{ apache }}"
state: absent
'''
| gpl-3.0 | 3,288,898,828,686,812,700 | 31.15 | 142 | 0.683256 | false |
blacklin/kbengine | kbe/src/lib/python/Lib/heapq.py | 208 | 17997 | """Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
__about__ = """Heap queues
[explanation by François Pinard]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
an usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
'nlargest', 'nsmallest', 'heappushpop']
from itertools import islice, count, tee, chain
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
else:
returnitem = lastelt
return returnitem
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop."""
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(range(n//2)):
_siftup(x, i)
def _heappushpop_max(heap, item):
"""Maxheap version of a heappush followed by a heappop."""
if heap and item < heap[0]:
item, heap[0] = heap[0], item
_siftup_max(heap, 0)
return item
def _heapify_max(x):
"""Transform list into a maxheap, in-place, in O(len(x)) time."""
n = len(x)
for i in reversed(range(n//2)):
_siftup_max(x, i)
def nlargest(n, iterable):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, reverse=True)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
heapify(result)
_heappushpop = heappushpop
for elem in it:
_heappushpop(result, elem)
result.sort(reverse=True)
return result
def nsmallest(n, iterable):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
_heapify_max(result)
_heappushpop = _heappushpop_max
for elem in it:
_heappushpop(result, elem)
result.sort()
return result
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if newitem < parent:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom comparison methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and not heap[childpos] < heap[rightpos]:
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
def _siftdown_max(heap, startpos, pos):
'Maxheap variant of _siftdown'
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent < newitem:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
def _siftup_max(heap, pos):
'Maxheap variant of _siftup'
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the larger child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of larger child.
rightpos = childpos + 1
if rightpos < endpos and not heap[rightpos] < heap[childpos]:
childpos = rightpos
# Move the larger child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown_max(heap, startpos, pos)
# If available, use C implementation
try:
from _heapq import *
except ImportError:
pass
def merge(*iterables):
'''Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
'''
_heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration
_len = len
h = []
h_append = h.append
for itnum, it in enumerate(map(iter, iterables)):
try:
next = it.__next__
h_append([next(), itnum, next])
except _StopIteration:
pass
heapify(h)
while _len(h) > 1:
try:
while True:
v, itnum, next = s = h[0]
yield v
s[0] = next() # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except _StopIteration:
_heappop(h) # remove empty iterator
if h:
# fast case when only a single iterator remains
v, itnum, next = h[0]
yield v
yield from next.__self__
# Extend the implementations of nsmallest and nlargest to use a key= argument
_nsmallest = nsmallest
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
"""
# Short-cut for n==1 is to use min() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [min(chain(head, it))]
return [min(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count()) # decorate
result = _nsmallest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(), in2) # decorate
result = _nsmallest(n, it)
return [r[2] for r in result] # undecorate
_nlargest = nlargest
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
# Short-cut for n==1 is to use max() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [max(chain(head, it))]
return [max(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key, reverse=True)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count(0,-1)) # decorate
result = _nlargest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(0,-1), in2) # decorate
result = _nlargest(n, it)
return [r[2] for r in result] # undecorate
if __name__ == "__main__":
# Simple sanity test
heap = []
data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
for item in data:
heappush(heap, item)
sort = []
while heap:
sort.append(heappop(heap))
print(sort)
import doctest
doctest.testmod()
| lgpl-3.0 | 291,169,621,331,679,600 | 36.806723 | 81 | 0.646477 | false |
ghickman/django | tests/template_tests/filter_tests/test_wordwrap.py | 21 | 2026 | from django.template.defaultfilters import wordwrap
from django.test import SimpleTestCase
from django.utils.functional import lazystr
from django.utils.safestring import mark_safe
from ..utils import setup
class WordwrapTests(SimpleTestCase):
@setup({'wordwrap01':
'{% autoescape off %}{{ a|wordwrap:"3" }} {{ b|wordwrap:"3" }}{% endautoescape %}'})
def test_wordwrap01(self):
output = self.engine.render_to_string('wordwrap01', {'a': 'a & b', 'b': mark_safe('a & b')})
self.assertEqual(output, 'a &\nb a &\nb')
@setup({'wordwrap02': '{{ a|wordwrap:"3" }} {{ b|wordwrap:"3" }}'})
def test_wordwrap02(self):
output = self.engine.render_to_string('wordwrap02', {'a': 'a & b', 'b': mark_safe('a & b')})
self.assertEqual(output, 'a &\nb a &\nb')
class FunctionTests(SimpleTestCase):
def test_wrap(self):
self.assertEqual(
wordwrap('this is a long paragraph of text that really needs to be wrapped I\'m afraid', 14),
'this is a long\nparagraph of\ntext that\nreally needs\nto be wrapped\nI\'m afraid',
)
def test_indent(self):
self.assertEqual(
wordwrap('this is a short paragraph of text.\n But this line should be indented', 14),
'this is a\nshort\nparagraph of\ntext.\n But this\nline should be\nindented',
)
def test_indent2(self):
self.assertEqual(
wordwrap('this is a short paragraph of text.\n But this line should be indented', 15),
'this is a short\nparagraph of\ntext.\n But this line\nshould be\nindented',
)
def test_non_string_input(self):
self.assertEqual(wordwrap(123, 2), '123')
def test_wrap_lazy_string(self):
self.assertEqual(
wordwrap(lazystr(
'this is a long paragraph of text that really needs to be wrapped I\'m afraid'
), 14),
'this is a long\nparagraph of\ntext that\nreally needs\nto be wrapped\nI\'m afraid',
)
| bsd-3-clause | -1,968,793,849,047,438,000 | 37.961538 | 105 | 0.621915 | false |
repotvsupertuga/tvsupertuga.repository | script.module.streamtvsupertuga/lib/resources/lib/modules/pyaes/util.py | 124 | 2032 | # The MIT License (MIT)
#
# Copyright (c) 2014 Richard Moore
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Why to_bufferable?
# Python 3 is very different from Python 2.x when it comes to strings of text
# and strings of bytes; in Python 3, strings of bytes do not exist, instead to
# represent arbitrary binary data, we must use the "bytes" object. This method
# ensures the object behaves as we need it to.
def to_bufferable(binary):
return binary
def _get_byte(c):
return ord(c)
try:
xrange
except:
def to_bufferable(binary):
if isinstance(binary, bytes):
return binary
return bytes(ord(b) for b in binary)
def _get_byte(c):
return c
def append_PKCS7_padding(data):
pad = 16 - (len(data) % 16)
return data + to_bufferable(chr(pad) * pad)
def strip_PKCS7_padding(data):
if len(data) % 16 != 0:
raise ValueError("invalid length")
pad = _get_byte(data[-1])
if not pad or pad > 16:
return data
return data[:-pad]
| gpl-2.0 | -2,545,621,783,375,186,400 | 32.866667 | 79 | 0.716043 | false |
cosenal/osf.io | website/addons/badges/model/badges.py | 35 | 5589 | # -*- coding: utf-8 -*-
import calendar
from bson import ObjectId
from datetime import datetime
from modularodm import fields, Q
from framework.mongo import StoredObject
from framework.guid.model import GuidStoredObject
from website.settings import DOMAIN
from website.util import web_url_for, api_url_for
from website.addons.badges.util import acquire_badge_image
class Badge(GuidStoredObject):
_id = fields.StringField(primary=True)
creator = fields.ForeignField('badgesusersettings', backref='creator')
is_system_badge = fields.BooleanField(default=False)
#Open Badge protocol
name = fields.StringField()
description = fields.StringField()
image = fields.StringField()
criteria = fields.StringField()
#TODO implement tags and alignment
alignment = fields.DictionaryField(list=True)
tags = fields.StringField(list=True)
@classmethod
def get_system_badges(cls):
return cls.find(Q('is_system_badge', 'eq', True))
@classmethod
def create(cls, user_settings, badge_data, save=True):
badge = cls()
badge.creator = user_settings
badge.name = badge_data['badgeName']
badge.description = badge_data['description']
badge.criteria = badge_data['criteria']
badge._ensure_guid()
badge.image = acquire_badge_image(badge_data['imageurl'], badge._id)
if not badge.image:
raise IOError
if save:
badge.save()
return badge
@property
def description_short(self):
words = self.description.split(' ')
if len(words) < 9:
return ' '.join(words)
return '{}...'.format(' '.join(words[:9]))
#TODO Auto link urls?
@property
def criteria_list(self):
tpl = '<ul>{}</ul>'
stpl = '<li>{}</li>'
lines = self.criteria.split('\n')
return tpl.format(' '.join([stpl.format(line) for line in lines if line])) # Please dont kill me Steve
@property
def assertions(self):
return self.badgeassertion__assertion
@property
def awarded_count(self):
return len(self.assertions)
@property
def unique_awards_count(self):
return len({assertion.node._id for assertion in self.assertions})
@property
def deep_url(self):
return web_url_for('view_badge', bid=self._id)
@property
def url(self):
return web_url_for('view_badge', bid=self._id)
def make_system_badge(self, save=True):
self.is_system_badge = True
self.save()
def to_json(self):
return {
'id': self._id,
'name': self.name,
'description': self.description,
'image': self.image,
'criteria': self.criteria,
'alignment': self.alignment,
'tags': self.tags,
}
def to_openbadge(self):
return {
'name': self.name,
'description': self.description,
'image': self.image,
'criteria': self.criteria,
'issuer': api_url_for('get_organization_json', _absolute=True, uid=self.creator.owner._id),
'url': '{0}{1}/json/'.format(DOMAIN, self._id), # web url for and GUIDs?
'alignment': self.alignment,
'tags': self.tags,
}
#TODO verification hosted and signed
class BadgeAssertion(StoredObject):
_id = fields.StringField(default=lambda: str(ObjectId()))
#Backrefs
badge = fields.ForeignField('badge', backref='assertion')
node = fields.ForeignField('node', backref='awarded')
_awarder = fields.ForeignField('badgesusersettings')
#Custom fields
revoked = fields.BooleanField(default=False)
reason = fields.StringField()
#Required
issued_on = fields.IntegerField(required=True)
#Optional
evidence = fields.StringField()
expires = fields.StringField()
@classmethod
def create(cls, badge, node, evidence=None, save=True, awarder=None):
b = cls()
b.badge = badge
b.node = node
b.evidence = evidence
b.issued_on = calendar.timegm(datetime.utctimetuple(datetime.utcnow()))
b._awarder = awarder
if save:
b.save()
return b
@property
def issued_date(self):
return datetime.fromtimestamp(self.issued_on).strftime('%Y/%m/%d')
@property
def verify(self, vtype='hosted'):
return {
'type': 'hosted',
'url': api_url_for('get_assertion_json', _absolute=True, aid=self._id)
}
@property
def recipient(self):
return {
'idenity': self.node._id,
'type': 'osfnode', # TODO Could be an email?
'hashed': False
}
@property
def awarder(self):
if self.badge.is_system_badge and self._awarder:
return self._awarder
return self.badge.creator
def to_json(self):
return {
'uid': self._id,
'recipient': self.node._id,
'badge': self.badge._id,
'verify': self.verify,
'issued_on': self.issued_date,
'evidence': self.evidence,
'expires': self.expires
}
def to_openbadge(self):
return {
'uid': self._id,
'recipient': self.recipient,
'badge': '{}{}/json/'.format(DOMAIN, self.badge._id), # GUIDs Web url for
'verify': self.verify,
'issuedOn': self.issued_on,
'evidence': self.evidence,
'expires': self.expires
}
| apache-2.0 | -9,024,829,034,988,827,000 | 27.661538 | 111 | 0.58973 | false |
idea4bsd/idea4bsd | python/helpers/pydev/third_party/pep8/autopep8.py | 34 | 125587 | #!/usr/bin/env python
#
# Copyright (C) 2010-2011 Hideo Hattori
# Copyright (C) 2011-2013 Hideo Hattori, Steven Myint
# Copyright (C) 2013-2015 Hideo Hattori, Steven Myint, Bill Wendling
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Automatically formats Python code to conform to the PEP 8 style guide.
Fixes that only need be done once can be added by adding a function of the form
"fix_<code>(source)" to this module. They should return the fixed source code.
These fixes are picked up by apply_global_fixes().
Fixes that depend on pep8 should be added as methods to FixPEP8. See the class
documentation for more information.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import bisect
import codecs
import collections
import copy
import difflib
import fnmatch
import inspect
import io
import itertools
import keyword
import locale
import os
import re
import signal
import sys
import token
import tokenize
import pep8
def check_lib2to3():
try:
import lib2to3
except ImportError:
sys.path.append(os.path.join(os.path.dirname(__file__), 'lib2to3'))
import lib2to3
try:
unicode
except NameError:
unicode = str
__version__ = '1.1.2a0'
CR = '\r'
LF = '\n'
CRLF = '\r\n'
PYTHON_SHEBANG_REGEX = re.compile(r'^#!.*\bpython[23]?\b\s*$')
# For generating line shortening candidates.
SHORTEN_OPERATOR_GROUPS = frozenset([
frozenset([',']),
frozenset(['%']),
frozenset([',', '(', '[', '{']),
frozenset(['%', '(', '[', '{']),
frozenset([',', '(', '[', '{', '%', '+', '-', '*', '/', '//']),
frozenset(['%', '+', '-', '*', '/', '//']),
])
DEFAULT_IGNORE = 'E24'
DEFAULT_INDENT_SIZE = 4
# W602 is handled separately due to the need to avoid "with_traceback".
CODE_TO_2TO3 = {
'E231': ['ws_comma'],
'E721': ['idioms'],
'W601': ['has_key'],
'W603': ['ne'],
'W604': ['repr'],
'W690': ['apply',
'except',
'exitfunc',
'numliterals',
'operator',
'paren',
'reduce',
'renames',
'standarderror',
'sys_exc',
'throw',
'tuple_params',
'xreadlines']}
if sys.platform == 'win32': # pragma: no cover
DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8')
else:
DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or
os.path.expanduser('~/.config'), 'pep8')
PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8')
def open_with_encoding(filename, encoding=None, mode='r'):
"""Return opened file with a specific encoding."""
if not encoding:
encoding = detect_encoding(filename)
return io.open(filename, mode=mode, encoding=encoding,
newline='') # Preserve line endings
def detect_encoding(filename):
"""Return file encoding."""
try:
with open(filename, 'rb') as input_file:
check_lib2to3()
from lib2to3.pgen2 import tokenize as lib2to3_tokenize
encoding = lib2to3_tokenize.detect_encoding(input_file.readline)[0]
# Check for correctness of encoding
with open_with_encoding(filename, encoding) as test_file:
test_file.read()
return encoding
except (LookupError, SyntaxError, UnicodeDecodeError):
return 'latin-1'
def readlines_from_file(filename):
"""Return contents of file."""
with open_with_encoding(filename) as input_file:
return input_file.readlines()
def extended_blank_lines(logical_line,
blank_lines,
blank_before,
indent_level,
previous_logical):
"""Check for missing blank lines after class declaration."""
if previous_logical.startswith('class '):
if logical_line.startswith(('def ', 'class ', '@')):
if indent_level and not blank_lines and not blank_before:
yield (0, 'E309 expected 1 blank line after class declaration')
elif previous_logical.startswith('def '):
if blank_lines and pep8.DOCSTRING_REGEX.match(logical_line):
yield (0, 'E303 too many blank lines ({0})'.format(blank_lines))
elif pep8.DOCSTRING_REGEX.match(previous_logical):
# Missing blank line between class docstring and method declaration.
if (
indent_level and
not blank_lines and
not blank_before and
logical_line.startswith(('def ')) and
'(self' in logical_line
):
yield (0, 'E301 expected 1 blank line, found 0')
pep8.register_check(extended_blank_lines)
def continued_indentation(logical_line, tokens, indent_level, indent_char,
noqa):
"""Override pep8's function to provide indentation information."""
first_row = tokens[0][2][0]
nrows = 1 + tokens[-1][2][0] - first_row
if noqa or nrows == 1:
return
# indent_next tells us whether the next block is indented. Assuming
# that it is indented by 4 spaces, then we should not allow 4-space
# indents on the final continuation line. In turn, some other
# indents are allowed to have an extra 4 spaces.
indent_next = logical_line.endswith(':')
row = depth = 0
valid_hangs = (
(DEFAULT_INDENT_SIZE,)
if indent_char != '\t' else (DEFAULT_INDENT_SIZE,
2 * DEFAULT_INDENT_SIZE)
)
# Remember how many brackets were opened on each line.
parens = [0] * nrows
# Relative indents of physical lines.
rel_indent = [0] * nrows
# For each depth, collect a list of opening rows.
open_rows = [[0]]
# For each depth, memorize the hanging indentation.
hangs = [None]
# Visual indents.
indent_chances = {}
last_indent = tokens[0][2]
indent = [last_indent[1]]
last_token_multiline = None
line = None
last_line = ''
last_line_begins_with_multiline = False
for token_type, text, start, end, line in tokens:
newline = row < start[0] - first_row
if newline:
row = start[0] - first_row
newline = (not last_token_multiline and
token_type not in (tokenize.NL, tokenize.NEWLINE))
last_line_begins_with_multiline = last_token_multiline
if newline:
# This is the beginning of a continuation line.
last_indent = start
# Record the initial indent.
rel_indent[row] = pep8.expand_indent(line) - indent_level
# Identify closing bracket.
close_bracket = (token_type == tokenize.OP and text in ']})')
# Is the indent relative to an opening bracket line?
for open_row in reversed(open_rows[depth]):
hang = rel_indent[row] - rel_indent[open_row]
hanging_indent = hang in valid_hangs
if hanging_indent:
break
if hangs[depth]:
hanging_indent = (hang == hangs[depth])
visual_indent = (not close_bracket and hang > 0 and
indent_chances.get(start[1]))
if close_bracket and indent[depth]:
# Closing bracket for visual indent.
if start[1] != indent[depth]:
yield (start, 'E124 {0}'.format(indent[depth]))
elif close_bracket and not hang:
pass
elif indent[depth] and start[1] < indent[depth]:
# Visual indent is broken.
yield (start, 'E128 {0}'.format(indent[depth]))
elif (hanging_indent or
(indent_next and
rel_indent[row] == 2 * DEFAULT_INDENT_SIZE)):
# Hanging indent is verified.
if close_bracket:
yield (start, 'E123 {0}'.format(indent_level +
rel_indent[open_row]))
hangs[depth] = hang
elif visual_indent is True:
# Visual indent is verified.
indent[depth] = start[1]
elif visual_indent in (text, unicode):
# Ignore token lined up with matching one from a previous line.
pass
else:
one_indented = (indent_level + rel_indent[open_row] +
DEFAULT_INDENT_SIZE)
# Indent is broken.
if hang <= 0:
error = ('E122', one_indented)
elif indent[depth]:
error = ('E127', indent[depth])
elif hang > DEFAULT_INDENT_SIZE:
error = ('E126', one_indented)
else:
hangs[depth] = hang
error = ('E121', one_indented)
yield (start, '{0} {1}'.format(*error))
# Look for visual indenting.
if (
parens[row] and
token_type not in (tokenize.NL, tokenize.COMMENT) and
not indent[depth]
):
indent[depth] = start[1]
indent_chances[start[1]] = True
# Deal with implicit string concatenation.
elif (token_type in (tokenize.STRING, tokenize.COMMENT) or
text in ('u', 'ur', 'b', 'br')):
indent_chances[start[1]] = unicode
# Special case for the "if" statement because len("if (") is equal to
# 4.
elif not indent_chances and not row and not depth and text == 'if':
indent_chances[end[1] + 1] = True
elif text == ':' and line[end[1]:].isspace():
open_rows[depth].append(row)
# Keep track of bracket depth.
if token_type == tokenize.OP:
if text in '([{':
depth += 1
indent.append(0)
hangs.append(None)
if len(open_rows) == depth:
open_rows.append([])
open_rows[depth].append(row)
parens[row] += 1
elif text in ')]}' and depth > 0:
# Parent indents should not be more than this one.
prev_indent = indent.pop() or last_indent[1]
hangs.pop()
for d in range(depth):
if indent[d] > prev_indent:
indent[d] = 0
for ind in list(indent_chances):
if ind >= prev_indent:
del indent_chances[ind]
del open_rows[depth + 1:]
depth -= 1
if depth:
indent_chances[indent[depth]] = True
for idx in range(row, -1, -1):
if parens[idx]:
parens[idx] -= 1
break
assert len(indent) == depth + 1
if (
start[1] not in indent_chances and
# This is for purposes of speeding up E121 (GitHub #90).
not last_line.rstrip().endswith(',')
):
# Allow to line up tokens.
indent_chances[start[1]] = text
last_token_multiline = (start[0] != end[0])
if last_token_multiline:
rel_indent[end[0] - first_row] = rel_indent[row]
last_line = line
if (
indent_next and
not last_line_begins_with_multiline and
pep8.expand_indent(line) == indent_level + DEFAULT_INDENT_SIZE
):
pos = (start[0], indent[0] + 4)
yield (pos, 'E125 {0}'.format(indent_level +
2 * DEFAULT_INDENT_SIZE))
del pep8._checks['logical_line'][pep8.continued_indentation]
pep8.register_check(continued_indentation)
class FixPEP8(object):
"""Fix invalid code.
Fixer methods are prefixed "fix_". The _fix_source() method looks for these
automatically.
The fixer method can take either one or two arguments (in addition to
self). The first argument is "result", which is the error information from
pep8. The second argument, "logical", is required only for logical-line
fixes.
The fixer method can return the list of modified lines or None. An empty
list would mean that no changes were made. None would mean that only the
line reported in the pep8 error was modified. Note that the modified line
numbers that are returned are indexed at 1. This typically would correspond
with the line number reported in the pep8 error information.
[fixed method list]
- e121,e122,e123,e124,e125,e126,e127,e128,e129
- e201,e202,e203
- e211
- e221,e222,e223,e224,e225
- e231
- e251
- e261,e262
- e271,e272,e273,e274
- e301,e302,e303
- e401
- e502
- e701,e702
- e711
- w291
"""
def __init__(self, filename,
options,
contents=None,
long_line_ignore_cache=None):
self.filename = filename
if contents is None:
self.source = readlines_from_file(filename)
else:
sio = io.StringIO(contents)
self.source = sio.readlines()
self.options = options
self.indent_word = _get_indentword(''.join(self.source))
self.long_line_ignore_cache = (
set() if long_line_ignore_cache is None
else long_line_ignore_cache)
# Many fixers are the same even though pep8 categorizes them
# differently.
self.fix_e115 = self.fix_e112
self.fix_e116 = self.fix_e113
self.fix_e121 = self._fix_reindent
self.fix_e122 = self._fix_reindent
self.fix_e123 = self._fix_reindent
self.fix_e124 = self._fix_reindent
self.fix_e126 = self._fix_reindent
self.fix_e127 = self._fix_reindent
self.fix_e128 = self._fix_reindent
self.fix_e129 = self._fix_reindent
self.fix_e202 = self.fix_e201
self.fix_e203 = self.fix_e201
self.fix_e211 = self.fix_e201
self.fix_e221 = self.fix_e271
self.fix_e222 = self.fix_e271
self.fix_e223 = self.fix_e271
self.fix_e226 = self.fix_e225
self.fix_e227 = self.fix_e225
self.fix_e228 = self.fix_e225
self.fix_e241 = self.fix_e271
self.fix_e242 = self.fix_e224
self.fix_e261 = self.fix_e262
self.fix_e272 = self.fix_e271
self.fix_e273 = self.fix_e271
self.fix_e274 = self.fix_e271
self.fix_e309 = self.fix_e301
self.fix_e501 = (
self.fix_long_line_logically if
options and (options.aggressive >= 2 or options.experimental) else
self.fix_long_line_physically)
self.fix_e703 = self.fix_e702
self.fix_w293 = self.fix_w291
def _fix_source(self, results):
try:
(logical_start, logical_end) = _find_logical(self.source)
logical_support = True
except (SyntaxError, tokenize.TokenError): # pragma: no cover
logical_support = False
completed_lines = set()
for result in sorted(results, key=_priority_key):
if result['line'] in completed_lines:
continue
fixed_methodname = 'fix_' + result['id'].lower()
if hasattr(self, fixed_methodname):
fix = getattr(self, fixed_methodname)
line_index = result['line'] - 1
original_line = self.source[line_index]
is_logical_fix = len(inspect.getargspec(fix).args) > 2
if is_logical_fix:
logical = None
if logical_support:
logical = _get_logical(self.source,
result,
logical_start,
logical_end)
if logical and set(range(
logical[0][0] + 1,
logical[1][0] + 1)).intersection(
completed_lines):
continue
modified_lines = fix(result, logical)
else:
modified_lines = fix(result)
if modified_lines is None:
# Force logical fixes to report what they modified.
assert not is_logical_fix
if self.source[line_index] == original_line:
modified_lines = []
if modified_lines:
completed_lines.update(modified_lines)
elif modified_lines == []: # Empty list means no fix
if self.options.verbose >= 2:
print(
'---> Not fixing {f} on line {l}'.format(
f=result['id'], l=result['line']),
file=sys.stderr)
else: # We assume one-line fix when None.
completed_lines.add(result['line'])
else:
if self.options.verbose >= 3:
print(
"---> '{0}' is not defined.".format(fixed_methodname),
file=sys.stderr)
info = result['info'].strip()
print('---> {0}:{1}:{2}:{3}'.format(self.filename,
result['line'],
result['column'],
info),
file=sys.stderr)
def fix(self):
"""Return a version of the source code with PEP 8 violations fixed."""
pep8_options = {
'ignore': self.options.ignore,
'select': self.options.select,
'max_line_length': self.options.max_line_length,
}
results = _execute_pep8(pep8_options, self.source)
if self.options.verbose:
progress = {}
for r in results:
if r['id'] not in progress:
progress[r['id']] = set()
progress[r['id']].add(r['line'])
print('---> {n} issue(s) to fix {progress}'.format(
n=len(results), progress=progress), file=sys.stderr)
if self.options.line_range:
start, end = self.options.line_range
results = [r for r in results
if start <= r['line'] <= end]
self._fix_source(filter_results(source=''.join(self.source),
results=results,
aggressive=self.options.aggressive))
if self.options.line_range:
# If number of lines has changed then change line_range.
count = sum(sline.count('\n')
for sline in self.source[start - 1:end])
self.options.line_range[1] = start + count - 1
return ''.join(self.source)
def _fix_reindent(self, result):
"""Fix a badly indented line.
This is done by adding or removing from its initial indent only.
"""
num_indent_spaces = int(result['info'].split()[1])
line_index = result['line'] - 1
target = self.source[line_index]
self.source[line_index] = ' ' * num_indent_spaces + target.lstrip()
def fix_e112(self, result):
"""Fix under-indented comments."""
line_index = result['line'] - 1
target = self.source[line_index]
if not target.lstrip().startswith('#'):
# Don't screw with invalid syntax.
return []
self.source[line_index] = self.indent_word + target
def fix_e113(self, result):
"""Fix over-indented comments."""
line_index = result['line'] - 1
target = self.source[line_index]
indent = _get_indentation(target)
stripped = target.lstrip()
if not stripped.startswith('#'):
# Don't screw with invalid syntax.
return []
self.source[line_index] = indent[1:] + stripped
def fix_e125(self, result):
"""Fix indentation undistinguish from the next logical line."""
num_indent_spaces = int(result['info'].split()[1])
line_index = result['line'] - 1
target = self.source[line_index]
spaces_to_add = num_indent_spaces - len(_get_indentation(target))
indent = len(_get_indentation(target))
modified_lines = []
while len(_get_indentation(self.source[line_index])) >= indent:
self.source[line_index] = (' ' * spaces_to_add +
self.source[line_index])
modified_lines.append(1 + line_index) # Line indexed at 1.
line_index -= 1
return modified_lines
def fix_e201(self, result):
"""Remove extraneous whitespace."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
if is_probably_part_of_multiline(target):
return []
fixed = fix_whitespace(target,
offset=offset,
replacement='')
self.source[line_index] = fixed
def fix_e224(self, result):
"""Remove extraneous whitespace around operator."""
target = self.source[result['line'] - 1]
offset = result['column'] - 1
fixed = target[:offset] + target[offset:].replace('\t', ' ')
self.source[result['line'] - 1] = fixed
def fix_e225(self, result):
"""Fix missing whitespace around operator."""
target = self.source[result['line'] - 1]
offset = result['column'] - 1
fixed = target[:offset] + ' ' + target[offset:]
# Only proceed if non-whitespace characters match.
# And make sure we don't break the indentation.
if (
fixed.replace(' ', '') == target.replace(' ', '') and
_get_indentation(fixed) == _get_indentation(target)
):
self.source[result['line'] - 1] = fixed
else:
return []
def fix_e231(self, result):
"""Add missing whitespace."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column']
fixed = target[:offset] + ' ' + target[offset:]
self.source[line_index] = fixed
def fix_e251(self, result):
"""Remove whitespace around parameter '=' sign."""
line_index = result['line'] - 1
target = self.source[line_index]
# This is necessary since pep8 sometimes reports columns that goes
# past the end of the physical line. This happens in cases like,
# foo(bar\n=None)
c = min(result['column'] - 1,
len(target) - 1)
if target[c].strip():
fixed = target
else:
fixed = target[:c].rstrip() + target[c:].lstrip()
# There could be an escaped newline
#
# def foo(a=\
# 1)
if fixed.endswith(('=\\\n', '=\\\r\n', '=\\\r')):
self.source[line_index] = fixed.rstrip('\n\r \t\\')
self.source[line_index + 1] = self.source[line_index + 1].lstrip()
return [line_index + 1, line_index + 2] # Line indexed at 1
self.source[result['line'] - 1] = fixed
def fix_e262(self, result):
"""Fix spacing after comment hash."""
target = self.source[result['line'] - 1]
offset = result['column']
code = target[:offset].rstrip(' \t#')
comment = target[offset:].lstrip(' \t#')
fixed = code + (' # ' + comment if comment.strip() else '\n')
self.source[result['line'] - 1] = fixed
def fix_e271(self, result):
"""Fix extraneous whitespace around keywords."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
if is_probably_part_of_multiline(target):
return []
fixed = fix_whitespace(target,
offset=offset,
replacement=' ')
if fixed == target:
return []
else:
self.source[line_index] = fixed
def fix_e301(self, result):
"""Add missing blank line."""
cr = '\n'
self.source[result['line'] - 1] = cr + self.source[result['line'] - 1]
def fix_e302(self, result):
"""Add missing 2 blank lines."""
add_linenum = 2 - int(result['info'].split()[-1])
cr = '\n' * add_linenum
self.source[result['line'] - 1] = cr + self.source[result['line'] - 1]
def fix_e303(self, result):
"""Remove extra blank lines."""
delete_linenum = int(result['info'].split('(')[1].split(')')[0]) - 2
delete_linenum = max(1, delete_linenum)
# We need to count because pep8 reports an offset line number if there
# are comments.
cnt = 0
line = result['line'] - 2
modified_lines = []
while cnt < delete_linenum and line >= 0:
if not self.source[line].strip():
self.source[line] = ''
modified_lines.append(1 + line) # Line indexed at 1
cnt += 1
line -= 1
return modified_lines
def fix_e304(self, result):
"""Remove blank line following function decorator."""
line = result['line'] - 2
if not self.source[line].strip():
self.source[line] = ''
def fix_e401(self, result):
"""Put imports on separate lines."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
if not target.lstrip().startswith('import'):
return []
indentation = re.split(pattern=r'\bimport\b',
string=target, maxsplit=1)[0]
fixed = (target[:offset].rstrip('\t ,') + '\n' +
indentation + 'import ' + target[offset:].lstrip('\t ,'))
self.source[line_index] = fixed
def fix_long_line_logically(self, result, logical):
"""Try to make lines fit within --max-line-length characters."""
if (
not logical or
len(logical[2]) == 1 or
self.source[result['line'] - 1].lstrip().startswith('#')
):
return self.fix_long_line_physically(result)
start_line_index = logical[0][0]
end_line_index = logical[1][0]
logical_lines = logical[2]
previous_line = get_item(self.source, start_line_index - 1, default='')
next_line = get_item(self.source, end_line_index + 1, default='')
single_line = join_logical_line(''.join(logical_lines))
try:
fixed = self.fix_long_line(
target=single_line,
previous_line=previous_line,
next_line=next_line,
original=''.join(logical_lines))
except (SyntaxError, tokenize.TokenError):
return self.fix_long_line_physically(result)
if fixed:
for line_index in range(start_line_index, end_line_index + 1):
self.source[line_index] = ''
self.source[start_line_index] = fixed
return range(start_line_index + 1, end_line_index + 1)
else:
return []
def fix_long_line_physically(self, result):
"""Try to make lines fit within --max-line-length characters."""
line_index = result['line'] - 1
target = self.source[line_index]
previous_line = get_item(self.source, line_index - 1, default='')
next_line = get_item(self.source, line_index + 1, default='')
try:
fixed = self.fix_long_line(
target=target,
previous_line=previous_line,
next_line=next_line,
original=target)
except (SyntaxError, tokenize.TokenError):
return []
if fixed:
self.source[line_index] = fixed
return [line_index + 1]
else:
return []
def fix_long_line(self, target, previous_line,
next_line, original):
cache_entry = (target, previous_line, next_line)
if cache_entry in self.long_line_ignore_cache:
return []
if target.lstrip().startswith('#'):
# Wrap commented lines.
return shorten_comment(
line=target,
max_line_length=self.options.max_line_length,
last_comment=not next_line.lstrip().startswith('#'))
fixed = get_fixed_long_line(
target=target,
previous_line=previous_line,
original=original,
indent_word=self.indent_word,
max_line_length=self.options.max_line_length,
aggressive=self.options.aggressive,
experimental=self.options.experimental,
verbose=self.options.verbose)
if fixed and not code_almost_equal(original, fixed):
return fixed
else:
self.long_line_ignore_cache.add(cache_entry)
return None
def fix_e502(self, result):
"""Remove extraneous escape of newline."""
line_index = result['line'] - 1
target = self.source[line_index]
self.source[line_index] = target.rstrip('\n\r \t\\') + '\n'
def fix_e701(self, result):
"""Put colon-separated compound statement on separate lines."""
line_index = result['line'] - 1
target = self.source[line_index]
c = result['column']
fixed_source = (target[:c] + '\n' +
_get_indentation(target) + self.indent_word +
target[c:].lstrip('\n\r \t\\'))
self.source[result['line'] - 1] = fixed_source
return [result['line'], result['line'] + 1]
def fix_e702(self, result, logical):
"""Put semicolon-separated compound statement on separate lines."""
if not logical:
return [] # pragma: no cover
logical_lines = logical[2]
line_index = result['line'] - 1
target = self.source[line_index]
if target.rstrip().endswith('\\'):
# Normalize '1; \\\n2' into '1; 2'.
self.source[line_index] = target.rstrip('\n \r\t\\')
self.source[line_index + 1] = self.source[line_index + 1].lstrip()
return [line_index + 1, line_index + 2]
if target.rstrip().endswith(';'):
self.source[line_index] = target.rstrip('\n \r\t;') + '\n'
return [line_index + 1]
offset = result['column'] - 1
first = target[:offset].rstrip(';').rstrip()
second = (_get_indentation(logical_lines[0]) +
target[offset:].lstrip(';').lstrip())
# find inline commnet
inline_comment = None
if '# ' == target[offset:].lstrip(';').lstrip()[:2]:
inline_comment = target[offset:].lstrip(';')
if inline_comment:
self.source[line_index] = first + inline_comment
else:
self.source[line_index] = first + '\n' + second
return [line_index + 1]
def fix_e711(self, result):
"""Fix comparison with None."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
right_offset = offset + 2
if right_offset >= len(target):
return []
left = target[:offset].rstrip()
center = target[offset:right_offset]
right = target[right_offset:].lstrip()
if not right.startswith('None'):
return []
if center.strip() == '==':
new_center = 'is'
elif center.strip() == '!=':
new_center = 'is not'
else:
return []
self.source[line_index] = ' '.join([left, new_center, right])
def fix_e712(self, result):
"""Fix comparison with boolean."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
# Handle very easy "not" special cases.
if re.match(r'^\s*if \w+ == False:$', target):
self.source[line_index] = re.sub(r'if (\w+) == False:',
r'if not \1:', target, count=1)
elif re.match(r'^\s*if \w+ != True:$', target):
self.source[line_index] = re.sub(r'if (\w+) != True:',
r'if not \1:', target, count=1)
else:
right_offset = offset + 2
if right_offset >= len(target):
return []
left = target[:offset].rstrip()
center = target[offset:right_offset]
right = target[right_offset:].lstrip()
# Handle simple cases only.
new_right = None
if center.strip() == '==':
if re.match(r'\bTrue\b', right):
new_right = re.sub(r'\bTrue\b *', '', right, count=1)
elif center.strip() == '!=':
if re.match(r'\bFalse\b', right):
new_right = re.sub(r'\bFalse\b *', '', right, count=1)
if new_right is None:
return []
if new_right[0].isalnum():
new_right = ' ' + new_right
self.source[line_index] = left + new_right
def fix_e713(self, result):
"""Fix non-membership check."""
line_index = result['line'] - 1
target = self.source[line_index]
# Handle very easy case only.
if re.match(r'^\s*if not \w+ in \w+:$', target):
self.source[line_index] = re.sub(r'if not (\w+) in (\w+):',
r'if \1 not in \2:',
target,
count=1)
def fix_w291(self, result):
"""Remove trailing whitespace."""
fixed_line = self.source[result['line'] - 1].rstrip()
self.source[result['line'] - 1] = fixed_line + '\n'
def get_fixed_long_line(target, previous_line, original,
indent_word=' ', max_line_length=79,
aggressive=False, experimental=False, verbose=False):
"""Break up long line and return result.
Do this by generating multiple reformatted candidates and then
ranking the candidates to heuristically select the best option.
"""
indent = _get_indentation(target)
source = target[len(indent):]
assert source.lstrip() == source
# Check for partial multiline.
tokens = list(generate_tokens(source))
candidates = shorten_line(
tokens, source, indent,
indent_word,
max_line_length,
aggressive=aggressive,
experimental=experimental,
previous_line=previous_line)
# Also sort alphabetically as a tie breaker (for determinism).
candidates = sorted(
sorted(set(candidates).union([target, original])),
key=lambda x: line_shortening_rank(x,
indent_word,
max_line_length,
experimental))
if verbose >= 4:
print(('-' * 79 + '\n').join([''] + candidates + ['']),
file=wrap_output(sys.stderr, 'utf-8'))
if candidates:
return candidates[0]
def join_logical_line(logical_line):
"""Return single line based on logical line input."""
indentation = _get_indentation(logical_line)
return indentation + untokenize_without_newlines(
generate_tokens(logical_line.lstrip())) + '\n'
def untokenize_without_newlines(tokens):
"""Return source code based on tokens."""
text = ''
last_row = 0
last_column = -1
for t in tokens:
token_string = t[1]
(start_row, start_column) = t[2]
(end_row, end_column) = t[3]
if start_row > last_row:
last_column = 0
if (
(start_column > last_column or token_string == '\n') and
not text.endswith(' ')
):
text += ' '
if token_string != '\n':
text += token_string
last_row = end_row
last_column = end_column
return text
def _find_logical(source_lines):
# Make a variable which is the index of all the starts of lines.
logical_start = []
logical_end = []
last_newline = True
parens = 0
for t in generate_tokens(''.join(source_lines)):
if t[0] in [tokenize.COMMENT, tokenize.DEDENT,
tokenize.INDENT, tokenize.NL,
tokenize.ENDMARKER]:
continue
if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]:
last_newline = True
logical_end.append((t[3][0] - 1, t[2][1]))
continue
if last_newline and not parens:
logical_start.append((t[2][0] - 1, t[2][1]))
last_newline = False
if t[0] == tokenize.OP:
if t[1] in '([{':
parens += 1
elif t[1] in '}])':
parens -= 1
return (logical_start, logical_end)
def _get_logical(source_lines, result, logical_start, logical_end):
"""Return the logical line corresponding to the result.
Assumes input is already E702-clean.
"""
row = result['line'] - 1
col = result['column'] - 1
ls = None
le = None
for i in range(0, len(logical_start), 1):
assert logical_end
x = logical_end[i]
if x[0] > row or (x[0] == row and x[1] > col):
le = x
ls = logical_start[i]
break
if ls is None:
return None
original = source_lines[ls[0]:le[0] + 1]
return ls, le, original
def get_item(items, index, default=None):
if 0 <= index < len(items):
return items[index]
else:
return default
def reindent(source, indent_size):
"""Reindent all lines."""
reindenter = Reindenter(source)
return reindenter.run(indent_size)
def code_almost_equal(a, b):
"""Return True if code is similar.
Ignore whitespace when comparing specific line.
"""
split_a = split_and_strip_non_empty_lines(a)
split_b = split_and_strip_non_empty_lines(b)
if len(split_a) != len(split_b):
return False
for index in range(len(split_a)):
if ''.join(split_a[index].split()) != ''.join(split_b[index].split()):
return False
return True
def split_and_strip_non_empty_lines(text):
"""Return lines split by newline.
Ignore empty lines.
"""
return [line.strip() for line in text.splitlines() if line.strip()]
def fix_e265(source, aggressive=False): # pylint: disable=unused-argument
"""Format block comments."""
if '#' not in source:
# Optimization.
return source
ignored_line_numbers = multiline_string_lines(
source,
include_docstrings=True) | set(commented_out_code_lines(source))
fixed_lines = []
sio = io.StringIO(source)
for (line_number, line) in enumerate(sio.readlines(), start=1):
if (
line.lstrip().startswith('#') and
line_number not in ignored_line_numbers
):
indentation = _get_indentation(line)
line = line.lstrip()
# Normalize beginning if not a shebang.
if len(line) > 1:
pos = next((index for index, c in enumerate(line)
if c != '#'))
if (
# Leave multiple spaces like '# ' alone.
(line[:pos].count('#') > 1 or line[1].isalnum()) and
# Leave stylistic outlined blocks alone.
not line.rstrip().endswith('#')
):
line = '# ' + line.lstrip('# \t')
fixed_lines.append(indentation + line)
else:
fixed_lines.append(line)
return ''.join(fixed_lines)
def refactor(source, fixer_names, ignore=None, filename=''):
"""Return refactored code using lib2to3.
Skip if ignore string is produced in the refactored code.
"""
check_lib2to3()
from lib2to3 import pgen2
try:
new_text = refactor_with_2to3(source,
fixer_names=fixer_names,
filename=filename)
except (pgen2.parse.ParseError,
SyntaxError,
UnicodeDecodeError,
UnicodeEncodeError):
return source
if ignore:
if ignore in new_text and ignore not in source:
return source
return new_text
def code_to_2to3(select, ignore):
fixes = set()
for code, fix in CODE_TO_2TO3.items():
if code_match(code, select=select, ignore=ignore):
fixes |= set(fix)
return fixes
def fix_2to3(source,
aggressive=True, select=None, ignore=None, filename=''):
"""Fix various deprecated code (via lib2to3)."""
if not aggressive:
return source
select = select or []
ignore = ignore or []
return refactor(source,
code_to_2to3(select=select,
ignore=ignore),
filename=filename)
def fix_w602(source, aggressive=True):
"""Fix deprecated form of raising exception."""
if not aggressive:
return source
return refactor(source, ['raise'],
ignore='with_traceback')
def find_newline(source):
"""Return type of newline used in source.
Input is a list of lines.
"""
assert not isinstance(source, unicode)
counter = collections.defaultdict(int)
for line in source:
if line.endswith(CRLF):
counter[CRLF] += 1
elif line.endswith(CR):
counter[CR] += 1
elif line.endswith(LF):
counter[LF] += 1
return (sorted(counter, key=counter.get, reverse=True) or [LF])[0]
def _get_indentword(source):
"""Return indentation type."""
indent_word = ' ' # Default in case source has no indentation
try:
for t in generate_tokens(source):
if t[0] == token.INDENT:
indent_word = t[1]
break
except (SyntaxError, tokenize.TokenError):
pass
return indent_word
def _get_indentation(line):
"""Return leading whitespace."""
if line.strip():
non_whitespace_index = len(line) - len(line.lstrip())
return line[:non_whitespace_index]
else:
return ''
def get_diff_text(old, new, filename):
"""Return text of unified diff between old and new."""
newline = '\n'
diff = difflib.unified_diff(
old, new,
'original/' + filename,
'fixed/' + filename,
lineterm=newline)
text = ''
for line in diff:
text += line
# Work around missing newline (http://bugs.python.org/issue2142).
if text and not line.endswith(newline):
text += newline + r'\ No newline at end of file' + newline
return text
def _priority_key(pep8_result):
"""Key for sorting PEP8 results.
Global fixes should be done first. This is important for things like
indentation.
"""
priority = [
# Fix multiline colon-based before semicolon based.
'e701',
# Break multiline statements early.
'e702',
# Things that make lines longer.
'e225', 'e231',
# Remove extraneous whitespace before breaking lines.
'e201',
# Shorten whitespace in comment before resorting to wrapping.
'e262'
]
middle_index = 10000
lowest_priority = [
# We need to shorten lines last since the logical fixer can get in a
# loop, which causes us to exit early.
'e501'
]
key = pep8_result['id'].lower()
try:
return priority.index(key)
except ValueError:
try:
return middle_index + lowest_priority.index(key) + 1
except ValueError:
return middle_index
def shorten_line(tokens, source, indentation, indent_word, max_line_length,
aggressive=False, experimental=False, previous_line=''):
"""Separate line at OPERATOR.
Multiple candidates will be yielded.
"""
for candidate in _shorten_line(tokens=tokens,
source=source,
indentation=indentation,
indent_word=indent_word,
aggressive=aggressive,
previous_line=previous_line):
yield candidate
if aggressive:
for key_token_strings in SHORTEN_OPERATOR_GROUPS:
shortened = _shorten_line_at_tokens(
tokens=tokens,
source=source,
indentation=indentation,
indent_word=indent_word,
key_token_strings=key_token_strings,
aggressive=aggressive)
if shortened is not None and shortened != source:
yield shortened
if experimental:
for shortened in _shorten_line_at_tokens_new(
tokens=tokens,
source=source,
indentation=indentation,
max_line_length=max_line_length):
yield shortened
def _shorten_line(tokens, source, indentation, indent_word,
aggressive=False, previous_line=''):
"""Separate line at OPERATOR.
The input is expected to be free of newlines except for inside multiline
strings and at the end.
Multiple candidates will be yielded.
"""
for (token_type,
token_string,
start_offset,
end_offset) in token_offsets(tokens):
if (
token_type == tokenize.COMMENT and
not is_probably_part_of_multiline(previous_line) and
not is_probably_part_of_multiline(source) and
not source[start_offset + 1:].strip().lower().startswith(
('noqa', 'pragma:', 'pylint:'))
):
# Move inline comments to previous line.
first = source[:start_offset]
second = source[start_offset:]
yield (indentation + second.strip() + '\n' +
indentation + first.strip() + '\n')
elif token_type == token.OP and token_string != '=':
# Don't break on '=' after keyword as this violates PEP 8.
assert token_type != token.INDENT
first = source[:end_offset]
second_indent = indentation
if first.rstrip().endswith('('):
second_indent += indent_word
elif '(' in first:
second_indent += ' ' * (1 + first.find('('))
else:
second_indent += indent_word
second = (second_indent + source[end_offset:].lstrip())
if (
not second.strip() or
second.lstrip().startswith('#')
):
continue
# Do not begin a line with a comma
if second.lstrip().startswith(','):
continue
# Do end a line with a dot
if first.rstrip().endswith('.'):
continue
if token_string in '+-*/':
fixed = first + ' \\' + '\n' + second
else:
fixed = first + '\n' + second
# Only fix if syntax is okay.
if check_syntax(normalize_multiline(fixed)
if aggressive else fixed):
yield indentation + fixed
# A convenient way to handle tokens.
Token = collections.namedtuple('Token', ['token_type', 'token_string',
'spos', 'epos', 'line'])
class ReformattedLines(object):
"""The reflowed lines of atoms.
Each part of the line is represented as an "atom." They can be moved
around when need be to get the optimal formatting.
"""
###########################################################################
# Private Classes
class _Indent(object):
"""Represent an indentation in the atom stream."""
def __init__(self, indent_amt):
self._indent_amt = indent_amt
def emit(self):
return ' ' * self._indent_amt
@property
def size(self):
return self._indent_amt
class _Space(object):
"""Represent a space in the atom stream."""
def emit(self):
return ' '
@property
def size(self):
return 1
class _LineBreak(object):
"""Represent a line break in the atom stream."""
def emit(self):
return '\n'
@property
def size(self):
return 0
def __init__(self, max_line_length):
self._max_line_length = max_line_length
self._lines = []
self._bracket_depth = 0
self._prev_item = None
self._prev_prev_item = None
def __repr__(self):
return self.emit()
###########################################################################
# Public Methods
def add(self, obj, indent_amt, break_after_open_bracket):
if isinstance(obj, Atom):
self._add_item(obj, indent_amt)
return
self._add_container(obj, indent_amt, break_after_open_bracket)
def add_comment(self, item):
num_spaces = 2
if len(self._lines) > 1:
if isinstance(self._lines[-1], self._Space):
num_spaces -= 1
if len(self._lines) > 2:
if isinstance(self._lines[-2], self._Space):
num_spaces -= 1
while num_spaces > 0:
self._lines.append(self._Space())
num_spaces -= 1
self._lines.append(item)
def add_indent(self, indent_amt):
self._lines.append(self._Indent(indent_amt))
def add_line_break(self, indent):
self._lines.append(self._LineBreak())
self.add_indent(len(indent))
def add_line_break_at(self, index, indent_amt):
self._lines.insert(index, self._LineBreak())
self._lines.insert(index + 1, self._Indent(indent_amt))
def add_space_if_needed(self, curr_text, equal=False):
if (
not self._lines or isinstance(
self._lines[-1], (self._LineBreak, self._Indent, self._Space))
):
return
prev_text = unicode(self._prev_item)
prev_prev_text = (
unicode(self._prev_prev_item) if self._prev_prev_item else '')
if (
# The previous item was a keyword or identifier and the current
# item isn't an operator that doesn't require a space.
((self._prev_item.is_keyword or self._prev_item.is_string or
self._prev_item.is_name or self._prev_item.is_number) and
(curr_text[0] not in '([{.,:}])' or
(curr_text[0] == '=' and equal))) or
# Don't place spaces around a '.', unless it's in an 'import'
# statement.
((prev_prev_text != 'from' and prev_text[-1] != '.' and
curr_text != 'import') and
# Don't place a space before a colon.
curr_text[0] != ':' and
# Don't split up ending brackets by spaces.
((prev_text[-1] in '}])' and curr_text[0] not in '.,}])') or
# Put a space after a colon or comma.
prev_text[-1] in ':,' or
# Put space around '=' if asked to.
(equal and prev_text == '=') or
# Put spaces around non-unary arithmetic operators.
((self._prev_prev_item and
(prev_text not in '+-' and
(self._prev_prev_item.is_name or
self._prev_prev_item.is_number or
self._prev_prev_item.is_string)) and
prev_text in ('+', '-', '%', '*', '/', '//', '**', 'in')))))
):
self._lines.append(self._Space())
def previous_item(self):
"""Return the previous non-whitespace item."""
return self._prev_item
def fits_on_current_line(self, item_extent):
return self.current_size() + item_extent <= self._max_line_length
def current_size(self):
"""The size of the current line minus the indentation."""
size = 0
for item in reversed(self._lines):
size += item.size
if isinstance(item, self._LineBreak):
break
return size
def line_empty(self):
return (self._lines and
isinstance(self._lines[-1],
(self._LineBreak, self._Indent)))
def emit(self):
string = ''
for item in self._lines:
if isinstance(item, self._LineBreak):
string = string.rstrip()
string += item.emit()
return string.rstrip() + '\n'
###########################################################################
# Private Methods
def _add_item(self, item, indent_amt):
"""Add an item to the line.
Reflow the line to get the best formatting after the item is
inserted. The bracket depth indicates if the item is being
inserted inside of a container or not.
"""
if self._prev_item and self._prev_item.is_string and item.is_string:
# Place consecutive string literals on separate lines.
self._lines.append(self._LineBreak())
self._lines.append(self._Indent(indent_amt))
item_text = unicode(item)
if self._lines and self._bracket_depth:
# Adding the item into a container.
self._prevent_default_initializer_splitting(item, indent_amt)
if item_text in '.,)]}':
self._split_after_delimiter(item, indent_amt)
elif self._lines and not self.line_empty():
# Adding the item outside of a container.
if self.fits_on_current_line(len(item_text)):
self._enforce_space(item)
else:
# Line break for the new item.
self._lines.append(self._LineBreak())
self._lines.append(self._Indent(indent_amt))
self._lines.append(item)
self._prev_item, self._prev_prev_item = item, self._prev_item
if item_text in '([{':
self._bracket_depth += 1
elif item_text in '}])':
self._bracket_depth -= 1
assert self._bracket_depth >= 0
def _add_container(self, container, indent_amt, break_after_open_bracket):
actual_indent = indent_amt + 1
if (
unicode(self._prev_item) != '=' and
not self.line_empty() and
not self.fits_on_current_line(
container.size + self._bracket_depth + 2)
):
if unicode(container)[0] == '(' and self._prev_item.is_name:
# Don't split before the opening bracket of a call.
break_after_open_bracket = True
actual_indent = indent_amt + 4
elif (
break_after_open_bracket or
unicode(self._prev_item) not in '([{'
):
# If the container doesn't fit on the current line and the
# current line isn't empty, place the container on the next
# line.
self._lines.append(self._LineBreak())
self._lines.append(self._Indent(indent_amt))
break_after_open_bracket = False
else:
actual_indent = self.current_size() + 1
break_after_open_bracket = False
if isinstance(container, (ListComprehension, IfExpression)):
actual_indent = indent_amt
# Increase the continued indentation only if recursing on a
# container.
container.reflow(self, ' ' * actual_indent,
break_after_open_bracket=break_after_open_bracket)
def _prevent_default_initializer_splitting(self, item, indent_amt):
"""Prevent splitting between a default initializer.
When there is a default initializer, it's best to keep it all on
the same line. It's nicer and more readable, even if it goes
over the maximum allowable line length. This goes back along the
current line to determine if we have a default initializer, and,
if so, to remove extraneous whitespaces and add a line
break/indent before it if needed.
"""
if unicode(item) == '=':
# This is the assignment in the initializer. Just remove spaces for
# now.
self._delete_whitespace()
return
if (not self._prev_item or not self._prev_prev_item or
unicode(self._prev_item) != '='):
return
self._delete_whitespace()
prev_prev_index = self._lines.index(self._prev_prev_item)
if (
isinstance(self._lines[prev_prev_index - 1], self._Indent) or
self.fits_on_current_line(item.size + 1)
):
# The default initializer is already the only item on this line.
# Don't insert a newline here.
return
# Replace the space with a newline/indent combo.
if isinstance(self._lines[prev_prev_index - 1], self._Space):
del self._lines[prev_prev_index - 1]
self.add_line_break_at(self._lines.index(self._prev_prev_item),
indent_amt)
def _split_after_delimiter(self, item, indent_amt):
"""Split the line only after a delimiter."""
self._delete_whitespace()
if self.fits_on_current_line(item.size):
return
last_space = None
for item in reversed(self._lines):
if (
last_space and
(not isinstance(item, Atom) or not item.is_colon)
):
break
else:
last_space = None
if isinstance(item, self._Space):
last_space = item
if isinstance(item, (self._LineBreak, self._Indent)):
return
if not last_space:
return
self.add_line_break_at(self._lines.index(last_space), indent_amt)
def _enforce_space(self, item):
"""Enforce a space in certain situations.
There are cases where we will want a space where normally we
wouldn't put one. This just enforces the addition of a space.
"""
if isinstance(self._lines[-1],
(self._Space, self._LineBreak, self._Indent)):
return
if not self._prev_item:
return
item_text = unicode(item)
prev_text = unicode(self._prev_item)
# Prefer a space around a '.' in an import statement, and between the
# 'import' and '('.
if (
(item_text == '.' and prev_text == 'from') or
(item_text == 'import' and prev_text == '.') or
(item_text == '(' and prev_text == 'import')
):
self._lines.append(self._Space())
def _delete_whitespace(self):
"""Delete all whitespace from the end of the line."""
while isinstance(self._lines[-1], (self._Space, self._LineBreak,
self._Indent)):
del self._lines[-1]
class Atom(object):
"""The smallest unbreakable unit that can be reflowed."""
def __init__(self, atom):
self._atom = atom
def __repr__(self):
return self._atom.token_string
def __len__(self):
return self.size
def reflow(
self, reflowed_lines, continued_indent, extent,
break_after_open_bracket=False,
is_list_comp_or_if_expr=False,
next_is_dot=False
):
if self._atom.token_type == tokenize.COMMENT:
reflowed_lines.add_comment(self)
return
total_size = extent if extent else self.size
if self._atom.token_string not in ',:([{}])':
# Some atoms will need an extra 1-sized space token after them.
total_size += 1
prev_item = reflowed_lines.previous_item()
if (
not is_list_comp_or_if_expr and
not reflowed_lines.fits_on_current_line(total_size) and
not (next_is_dot and
reflowed_lines.fits_on_current_line(self.size + 1)) and
not reflowed_lines.line_empty() and
not self.is_colon and
not (prev_item and prev_item.is_name and
unicode(self) == '(')
):
# Start a new line if there is already something on the line and
# adding this atom would make it go over the max line length.
reflowed_lines.add_line_break(continued_indent)
else:
reflowed_lines.add_space_if_needed(unicode(self))
reflowed_lines.add(self, len(continued_indent),
break_after_open_bracket)
def emit(self):
return self.__repr__()
@property
def is_keyword(self):
return keyword.iskeyword(self._atom.token_string)
@property
def is_string(self):
return self._atom.token_type == tokenize.STRING
@property
def is_name(self):
return self._atom.token_type == tokenize.NAME
@property
def is_number(self):
return self._atom.token_type == tokenize.NUMBER
@property
def is_comma(self):
return self._atom.token_string == ','
@property
def is_colon(self):
return self._atom.token_string == ':'
@property
def size(self):
return len(self._atom.token_string)
class Container(object):
"""Base class for all container types."""
def __init__(self, items):
self._items = items
def __repr__(self):
string = ''
last_was_keyword = False
for item in self._items:
if item.is_comma:
string += ', '
elif item.is_colon:
string += ': '
else:
item_string = unicode(item)
if (
string and
(last_was_keyword or
(not string.endswith(tuple('([{,.:}]) ')) and
not item_string.startswith(tuple('([{,.:}])'))))
):
string += ' '
string += item_string
last_was_keyword = item.is_keyword
return string
def __iter__(self):
for element in self._items:
yield element
def __getitem__(self, idx):
return self._items[idx]
def reflow(self, reflowed_lines, continued_indent,
break_after_open_bracket=False):
last_was_container = False
for (index, item) in enumerate(self._items):
next_item = get_item(self._items, index + 1)
if isinstance(item, Atom):
is_list_comp_or_if_expr = (
isinstance(self, (ListComprehension, IfExpression)))
item.reflow(reflowed_lines, continued_indent,
self._get_extent(index),
is_list_comp_or_if_expr=is_list_comp_or_if_expr,
next_is_dot=(next_item and
unicode(next_item) == '.'))
if last_was_container and item.is_comma:
reflowed_lines.add_line_break(continued_indent)
last_was_container = False
else: # isinstance(item, Container)
reflowed_lines.add(item, len(continued_indent),
break_after_open_bracket)
last_was_container = not isinstance(item, (ListComprehension,
IfExpression))
if (
break_after_open_bracket and index == 0 and
# Prefer to keep empty containers together instead of
# separating them.
unicode(item) == self.open_bracket and
(not next_item or unicode(next_item) != self.close_bracket) and
(len(self._items) != 3 or not isinstance(next_item, Atom))
):
reflowed_lines.add_line_break(continued_indent)
break_after_open_bracket = False
else:
next_next_item = get_item(self._items, index + 2)
if (
unicode(item) not in ['.', '%', 'in'] and
next_item and not isinstance(next_item, Container) and
unicode(next_item) != ':' and
next_next_item and (not isinstance(next_next_item, Atom) or
unicode(next_item) == 'not') and
not reflowed_lines.line_empty() and
not reflowed_lines.fits_on_current_line(
self._get_extent(index + 1) + 2)
):
reflowed_lines.add_line_break(continued_indent)
def _get_extent(self, index):
"""The extent of the full element.
E.g., the length of a function call or keyword.
"""
extent = 0
prev_item = get_item(self._items, index - 1)
seen_dot = prev_item and unicode(prev_item) == '.'
while index < len(self._items):
item = get_item(self._items, index)
index += 1
if isinstance(item, (ListComprehension, IfExpression)):
break
if isinstance(item, Container):
if prev_item and prev_item.is_name:
if seen_dot:
extent += 1
else:
extent += item.size
prev_item = item
continue
elif (unicode(item) not in ['.', '=', ':', 'not'] and
not item.is_name and not item.is_string):
break
if unicode(item) == '.':
seen_dot = True
extent += item.size
prev_item = item
return extent
@property
def is_string(self):
return False
@property
def size(self):
return len(self.__repr__())
@property
def is_keyword(self):
return False
@property
def is_name(self):
return False
@property
def is_comma(self):
return False
@property
def is_colon(self):
return False
@property
def open_bracket(self):
return None
@property
def close_bracket(self):
return None
class Tuple(Container):
"""A high-level representation of a tuple."""
@property
def open_bracket(self):
return '('
@property
def close_bracket(self):
return ')'
class List(Container):
"""A high-level representation of a list."""
@property
def open_bracket(self):
return '['
@property
def close_bracket(self):
return ']'
class DictOrSet(Container):
"""A high-level representation of a dictionary or set."""
@property
def open_bracket(self):
return '{'
@property
def close_bracket(self):
return '}'
class ListComprehension(Container):
"""A high-level representation of a list comprehension."""
@property
def size(self):
length = 0
for item in self._items:
if isinstance(item, IfExpression):
break
length += item.size
return length
class IfExpression(Container):
"""A high-level representation of an if-expression."""
def _parse_container(tokens, index, for_or_if=None):
"""Parse a high-level container, such as a list, tuple, etc."""
# Store the opening bracket.
items = [Atom(Token(*tokens[index]))]
index += 1
num_tokens = len(tokens)
while index < num_tokens:
tok = Token(*tokens[index])
if tok.token_string in ',)]}':
# First check if we're at the end of a list comprehension or
# if-expression. Don't add the ending token as part of the list
# comprehension or if-expression, because they aren't part of those
# constructs.
if for_or_if == 'for':
return (ListComprehension(items), index - 1)
elif for_or_if == 'if':
return (IfExpression(items), index - 1)
# We've reached the end of a container.
items.append(Atom(tok))
# If not, then we are at the end of a container.
if tok.token_string == ')':
# The end of a tuple.
return (Tuple(items), index)
elif tok.token_string == ']':
# The end of a list.
return (List(items), index)
elif tok.token_string == '}':
# The end of a dictionary or set.
return (DictOrSet(items), index)
elif tok.token_string in '([{':
# A sub-container is being defined.
(container, index) = _parse_container(tokens, index)
items.append(container)
elif tok.token_string == 'for':
(container, index) = _parse_container(tokens, index, 'for')
items.append(container)
elif tok.token_string == 'if':
(container, index) = _parse_container(tokens, index, 'if')
items.append(container)
else:
items.append(Atom(tok))
index += 1
return (None, None)
def _parse_tokens(tokens):
"""Parse the tokens.
This converts the tokens into a form where we can manipulate them
more easily.
"""
index = 0
parsed_tokens = []
num_tokens = len(tokens)
while index < num_tokens:
tok = Token(*tokens[index])
assert tok.token_type != token.INDENT
if tok.token_type == tokenize.NEWLINE:
# There's only one newline and it's at the end.
break
if tok.token_string in '([{':
(container, index) = _parse_container(tokens, index)
if not container:
return None
parsed_tokens.append(container)
else:
parsed_tokens.append(Atom(tok))
index += 1
return parsed_tokens
def _reflow_lines(parsed_tokens, indentation, max_line_length,
start_on_prefix_line):
"""Reflow the lines so that it looks nice."""
if unicode(parsed_tokens[0]) == 'def':
# A function definition gets indented a bit more.
continued_indent = indentation + ' ' * 2 * DEFAULT_INDENT_SIZE
else:
continued_indent = indentation + ' ' * DEFAULT_INDENT_SIZE
break_after_open_bracket = not start_on_prefix_line
lines = ReformattedLines(max_line_length)
lines.add_indent(len(indentation.lstrip('\r\n')))
if not start_on_prefix_line:
# If splitting after the opening bracket will cause the first element
# to be aligned weirdly, don't try it.
first_token = get_item(parsed_tokens, 0)
second_token = get_item(parsed_tokens, 1)
if (
first_token and second_token and
unicode(second_token)[0] == '(' and
len(indentation) + len(first_token) + 1 == len(continued_indent)
):
return None
for item in parsed_tokens:
lines.add_space_if_needed(unicode(item), equal=True)
save_continued_indent = continued_indent
if start_on_prefix_line and isinstance(item, Container):
start_on_prefix_line = False
continued_indent = ' ' * (lines.current_size() + 1)
item.reflow(lines, continued_indent, break_after_open_bracket)
continued_indent = save_continued_indent
return lines.emit()
def _shorten_line_at_tokens_new(tokens, source, indentation,
max_line_length):
"""Shorten the line taking its length into account.
The input is expected to be free of newlines except for inside
multiline strings and at the end.
"""
# Yield the original source so to see if it's a better choice than the
# shortened candidate lines we generate here.
yield indentation + source
parsed_tokens = _parse_tokens(tokens)
if parsed_tokens:
# Perform two reflows. The first one starts on the same line as the
# prefix. The second starts on the line after the prefix.
fixed = _reflow_lines(parsed_tokens, indentation, max_line_length,
start_on_prefix_line=True)
if fixed and check_syntax(normalize_multiline(fixed.lstrip())):
yield fixed
fixed = _reflow_lines(parsed_tokens, indentation, max_line_length,
start_on_prefix_line=False)
if fixed and check_syntax(normalize_multiline(fixed.lstrip())):
yield fixed
def _shorten_line_at_tokens(tokens, source, indentation, indent_word,
key_token_strings, aggressive):
"""Separate line by breaking at tokens in key_token_strings.
The input is expected to be free of newlines except for inside
multiline strings and at the end.
"""
offsets = []
for (index, _t) in enumerate(token_offsets(tokens)):
(token_type,
token_string,
start_offset,
end_offset) = _t
assert token_type != token.INDENT
if token_string in key_token_strings:
# Do not break in containers with zero or one items.
unwanted_next_token = {
'(': ')',
'[': ']',
'{': '}'}.get(token_string)
if unwanted_next_token:
if (
get_item(tokens,
index + 1,
default=[None, None])[1] == unwanted_next_token or
get_item(tokens,
index + 2,
default=[None, None])[1] == unwanted_next_token
):
continue
if (
index > 2 and token_string == '(' and
tokens[index - 1][1] in ',(%['
):
# Don't split after a tuple start, or before a tuple start if
# the tuple is in a list.
continue
if end_offset < len(source) - 1:
# Don't split right before newline.
offsets.append(end_offset)
else:
# Break at adjacent strings. These were probably meant to be on
# separate lines in the first place.
previous_token = get_item(tokens, index - 1)
if (
token_type == tokenize.STRING and
previous_token and previous_token[0] == tokenize.STRING
):
offsets.append(start_offset)
current_indent = None
fixed = None
for line in split_at_offsets(source, offsets):
if fixed:
fixed += '\n' + current_indent + line
for symbol in '([{':
if line.endswith(symbol):
current_indent += indent_word
else:
# First line.
fixed = line
assert not current_indent
current_indent = indent_word
assert fixed is not None
if check_syntax(normalize_multiline(fixed)
if aggressive > 1 else fixed):
return indentation + fixed
else:
return None
def token_offsets(tokens):
"""Yield tokens and offsets."""
end_offset = 0
previous_end_row = 0
previous_end_column = 0
for t in tokens:
token_type = t[0]
token_string = t[1]
(start_row, start_column) = t[2]
(end_row, end_column) = t[3]
# Account for the whitespace between tokens.
end_offset += start_column
if previous_end_row == start_row:
end_offset -= previous_end_column
# Record the start offset of the token.
start_offset = end_offset
# Account for the length of the token itself.
end_offset += len(token_string)
yield (token_type,
token_string,
start_offset,
end_offset)
previous_end_row = end_row
previous_end_column = end_column
def normalize_multiline(line):
"""Normalize multiline-related code that will cause syntax error.
This is for purposes of checking syntax.
"""
if line.startswith('def ') and line.rstrip().endswith(':'):
return line + ' pass'
elif line.startswith('return '):
return 'def _(): ' + line
elif line.startswith('@'):
return line + 'def _(): pass'
elif line.startswith('class '):
return line + ' pass'
elif line.startswith(('if ', 'elif ', 'for ', 'while ')):
return line + ' pass'
else:
return line
def fix_whitespace(line, offset, replacement):
"""Replace whitespace at offset and return fixed line."""
# Replace escaped newlines too
left = line[:offset].rstrip('\n\r \t\\')
right = line[offset:].lstrip('\n\r \t\\')
if right.startswith('#'):
return line
else:
return left + replacement + right
def _execute_pep8(pep8_options, source):
"""Execute pep8 via python method calls."""
class QuietReport(pep8.BaseReport):
"""Version of checker that does not print."""
def __init__(self, options):
super(QuietReport, self).__init__(options)
self.__full_error_results = []
def error(self, line_number, offset, text, _):
"""Collect errors."""
code = super(QuietReport, self).error(line_number, offset, text, _)
if code:
self.__full_error_results.append(
{'id': code,
'line': line_number,
'column': offset + 1,
'info': text})
def full_error_results(self):
"""Return error results in detail.
Results are in the form of a list of dictionaries. Each
dictionary contains 'id', 'line', 'column', and 'info'.
"""
return self.__full_error_results
checker = pep8.Checker('', lines=source,
reporter=QuietReport, **pep8_options)
checker.check_all()
return checker.report.full_error_results()
def _remove_leading_and_normalize(line):
return line.lstrip().rstrip(CR + LF) + '\n'
class Reindenter(object):
"""Reindents badly-indented code to uniformly use four-space indentation.
Released to the public domain, by Tim Peters, 03 October 2000.
"""
def __init__(self, input_text):
sio = io.StringIO(input_text)
source_lines = sio.readlines()
self.string_content_line_numbers = multiline_string_lines(input_text)
# File lines, rstripped & tab-expanded. Dummy at start is so
# that we can use tokenize's 1-based line numbering easily.
# Note that a line is all-blank iff it is a newline.
self.lines = []
for line_number, line in enumerate(source_lines, start=1):
# Do not modify if inside a multiline string.
if line_number in self.string_content_line_numbers:
self.lines.append(line)
else:
# Only expand leading tabs.
self.lines.append(_get_indentation(line).expandtabs() +
_remove_leading_and_normalize(line))
self.lines.insert(0, None)
self.index = 1 # index into self.lines of next line
self.input_text = input_text
def run(self, indent_size=DEFAULT_INDENT_SIZE):
"""Fix indentation and return modified line numbers.
Line numbers are indexed at 1.
"""
if indent_size < 1:
return self.input_text
try:
stats = _reindent_stats(tokenize.generate_tokens(self.getline))
except (SyntaxError, tokenize.TokenError):
return self.input_text
# Remove trailing empty lines.
lines = self.lines
while lines and lines[-1] == '\n':
lines.pop()
# Sentinel.
stats.append((len(lines), 0))
# Map count of leading spaces to # we want.
have2want = {}
# Program after transformation.
after = []
# Copy over initial empty lines -- there's nothing to do until
# we see a line with *something* on it.
i = stats[0][0]
after.extend(lines[1:i])
for i in range(len(stats) - 1):
thisstmt, thislevel = stats[i]
nextstmt = stats[i + 1][0]
have = _leading_space_count(lines[thisstmt])
want = thislevel * indent_size
if want < 0:
# A comment line.
if have:
# An indented comment line. If we saw the same
# indentation before, reuse what it most recently
# mapped to.
want = have2want.get(have, -1)
if want < 0:
# Then it probably belongs to the next real stmt.
for j in range(i + 1, len(stats) - 1):
jline, jlevel = stats[j]
if jlevel >= 0:
if have == _leading_space_count(lines[jline]):
want = jlevel * indent_size
break
if want < 0: # Maybe it's a hanging
# comment like this one,
# in which case we should shift it like its base
# line got shifted.
for j in range(i - 1, -1, -1):
jline, jlevel = stats[j]
if jlevel >= 0:
want = (have + _leading_space_count(
after[jline - 1]) -
_leading_space_count(lines[jline]))
break
if want < 0:
# Still no luck -- leave it alone.
want = have
else:
want = 0
assert want >= 0
have2want[have] = want
diff = want - have
if diff == 0 or have == 0:
after.extend(lines[thisstmt:nextstmt])
else:
for line_number, line in enumerate(lines[thisstmt:nextstmt],
start=thisstmt):
if line_number in self.string_content_line_numbers:
after.append(line)
elif diff > 0:
if line == '\n':
after.append(line)
else:
after.append(' ' * diff + line)
else:
remove = min(_leading_space_count(line), -diff)
after.append(line[remove:])
return ''.join(after)
def getline(self):
"""Line-getter for tokenize."""
if self.index >= len(self.lines):
line = ''
else:
line = self.lines[self.index]
self.index += 1
return line
def _reindent_stats(tokens):
"""Return list of (lineno, indentlevel) pairs.
One for each stmt and comment line. indentlevel is -1 for comment lines, as
a signal that tokenize doesn't know what to do about them; indeed, they're
our headache!
"""
find_stmt = 1 # Next token begins a fresh stmt?
level = 0 # Current indent level.
stats = []
for t in tokens:
token_type = t[0]
sline = t[2][0]
line = t[4]
if token_type == tokenize.NEWLINE:
# A program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
find_stmt = 1
elif token_type == tokenize.INDENT:
find_stmt = 1
level += 1
elif token_type == tokenize.DEDENT:
find_stmt = 1
level -= 1
elif token_type == tokenize.COMMENT:
if find_stmt:
stats.append((sline, -1))
# But we're still looking for a new stmt, so leave
# find_stmt alone.
elif token_type == tokenize.NL:
pass
elif find_stmt:
# This is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER.
find_stmt = 0
if line: # Not endmarker.
stats.append((sline, level))
return stats
def _leading_space_count(line):
"""Return number of leading spaces in line."""
i = 0
while i < len(line) and line[i] == ' ':
i += 1
return i
def refactor_with_2to3(source_text, fixer_names, filename=''):
"""Use lib2to3 to refactor the source.
Return the refactored source code.
"""
check_lib2to3()
from lib2to3.refactor import RefactoringTool
fixers = ['lib2to3.fixes.fix_' + name for name in fixer_names]
tool = RefactoringTool(fixer_names=fixers, explicit=fixers)
from lib2to3.pgen2 import tokenize as lib2to3_tokenize
try:
# The name parameter is necessary particularly for the "import" fixer.
return unicode(tool.refactor_string(source_text, name=filename))
except lib2to3_tokenize.TokenError:
return source_text
def check_syntax(code):
"""Return True if syntax is okay."""
try:
return compile(code, '<string>', 'exec')
except (SyntaxError, TypeError, UnicodeDecodeError):
return False
def filter_results(source, results, aggressive):
"""Filter out spurious reports from pep8.
If aggressive is True, we allow possibly unsafe fixes (E711, E712).
"""
non_docstring_string_line_numbers = multiline_string_lines(
source, include_docstrings=False)
all_string_line_numbers = multiline_string_lines(
source, include_docstrings=True)
commented_out_code_line_numbers = commented_out_code_lines(source)
has_e901 = any(result['id'].lower() == 'e901' for result in results)
for r in results:
issue_id = r['id'].lower()
if r['line'] in non_docstring_string_line_numbers:
if issue_id.startswith(('e1', 'e501', 'w191')):
continue
if r['line'] in all_string_line_numbers:
if issue_id in ['e501']:
continue
# We must offset by 1 for lines that contain the trailing contents of
# multiline strings.
if not aggressive and (r['line'] + 1) in all_string_line_numbers:
# Do not modify multiline strings in non-aggressive mode. Remove
# trailing whitespace could break doctests.
if issue_id.startswith(('w29', 'w39')):
continue
if aggressive <= 0:
if issue_id.startswith(('e711', 'w6')):
continue
if aggressive <= 1:
if issue_id.startswith(('e712', 'e713')):
continue
if r['line'] in commented_out_code_line_numbers:
if issue_id.startswith(('e26', 'e501')):
continue
# Do not touch indentation if there is a token error caused by
# incomplete multi-line statement. Otherwise, we risk screwing up the
# indentation.
if has_e901:
if issue_id.startswith(('e1', 'e7')):
continue
yield r
def multiline_string_lines(source, include_docstrings=False):
"""Return line numbers that are within multiline strings.
The line numbers are indexed at 1.
Docstrings are ignored.
"""
line_numbers = set()
previous_token_type = ''
try:
for t in generate_tokens(source):
token_type = t[0]
start_row = t[2][0]
end_row = t[3][0]
if token_type == tokenize.STRING and start_row != end_row:
if (
include_docstrings or
previous_token_type != tokenize.INDENT
):
# We increment by one since we want the contents of the
# string.
line_numbers |= set(range(1 + start_row, 1 + end_row))
previous_token_type = token_type
except (SyntaxError, tokenize.TokenError):
pass
return line_numbers
def commented_out_code_lines(source):
"""Return line numbers of comments that are likely code.
Commented-out code is bad practice, but modifying it just adds even more
clutter.
"""
line_numbers = []
try:
for t in generate_tokens(source):
token_type = t[0]
token_string = t[1]
start_row = t[2][0]
line = t[4]
# Ignore inline comments.
if not line.lstrip().startswith('#'):
continue
if token_type == tokenize.COMMENT:
stripped_line = token_string.lstrip('#').strip()
if (
' ' in stripped_line and
'#' not in stripped_line and
check_syntax(stripped_line)
):
line_numbers.append(start_row)
except (SyntaxError, tokenize.TokenError):
pass
return line_numbers
def shorten_comment(line, max_line_length, last_comment=False):
"""Return trimmed or split long comment line.
If there are no comments immediately following it, do a text wrap.
Doing this wrapping on all comments in general would lead to jagged
comment text.
"""
assert len(line) > max_line_length
line = line.rstrip()
# PEP 8 recommends 72 characters for comment text.
indentation = _get_indentation(line) + '# '
max_line_length = min(max_line_length,
len(indentation) + 72)
MIN_CHARACTER_REPEAT = 5
if (
len(line) - len(line.rstrip(line[-1])) >= MIN_CHARACTER_REPEAT and
not line[-1].isalnum()
):
# Trim comments that end with things like ---------
return line[:max_line_length] + '\n'
elif last_comment and re.match(r'\s*#+\s*\w+', line):
import textwrap
split_lines = textwrap.wrap(line.lstrip(' \t#'),
initial_indent=indentation,
subsequent_indent=indentation,
width=max_line_length,
break_long_words=False,
break_on_hyphens=False)
return '\n'.join(split_lines) + '\n'
else:
return line + '\n'
def normalize_line_endings(lines, newline):
"""Return fixed line endings.
All lines will be modified to use the most common line ending.
"""
return [line.rstrip('\n\r') + newline for line in lines]
def mutual_startswith(a, b):
return b.startswith(a) or a.startswith(b)
def code_match(code, select, ignore):
if ignore:
assert not isinstance(ignore, unicode)
for ignored_code in [c.strip() for c in ignore]:
if mutual_startswith(code.lower(), ignored_code.lower()):
return False
if select:
assert not isinstance(select, unicode)
for selected_code in [c.strip() for c in select]:
if mutual_startswith(code.lower(), selected_code.lower()):
return True
return False
return True
def fix_code(source, options=None, encoding=None, apply_config=False):
"""Return fixed source code.
"encoding" will be used to decode "source" if it is a byte string.
"""
if not options:
options = parse_args([''], apply_config=apply_config)
if not isinstance(source, unicode):
source = source.decode(encoding or get_encoding())
sio = io.StringIO(source)
return fix_lines(sio.readlines(), options=options)
def fix_lines(source_lines, options, filename=''):
"""Return fixed source code."""
# Transform everything to line feed. Then change them back to original
# before returning fixed source code.
original_newline = find_newline(source_lines)
tmp_source = ''.join(normalize_line_endings(source_lines, '\n'))
# Keep a history to break out of cycles.
previous_hashes = set()
if options.line_range:
fixed_source = apply_local_fixes(tmp_source, options)
else:
# Apply global fixes only once (for efficiency).
fixed_source = apply_global_fixes(tmp_source,
options,
filename=filename)
passes = 0
long_line_ignore_cache = set()
while hash(fixed_source) not in previous_hashes:
if options.pep8_passes >= 0 and passes > options.pep8_passes:
break
passes += 1
previous_hashes.add(hash(fixed_source))
tmp_source = copy.copy(fixed_source)
fix = FixPEP8(
filename,
options,
contents=tmp_source,
long_line_ignore_cache=long_line_ignore_cache)
fixed_source = fix.fix()
sio = io.StringIO(fixed_source)
return ''.join(normalize_line_endings(sio.readlines(), original_newline))
def fix_file(filename, options=None, output=None, apply_config=False):
if not options:
options = parse_args([filename], apply_config=apply_config)
original_source = readlines_from_file(filename)
fixed_source = original_source
if options.in_place or output:
encoding = detect_encoding(filename)
if output:
output = LineEndingWrapper(wrap_output(output, encoding=encoding))
fixed_source = fix_lines(fixed_source, options, filename=filename)
if options.diff:
new = io.StringIO(fixed_source)
new = new.readlines()
diff = get_diff_text(original_source, new, filename)
if output:
output.write(diff)
output.flush()
else:
return diff
elif options.in_place:
fp = open_with_encoding(filename, encoding=encoding,
mode='w')
fp.write(fixed_source)
fp.close()
else:
if output:
output.write(fixed_source)
output.flush()
else:
return fixed_source
def global_fixes():
"""Yield multiple (code, function) tuples."""
for function in list(globals().values()):
if inspect.isfunction(function):
arguments = inspect.getargspec(function)[0]
if arguments[:1] != ['source']:
continue
code = extract_code_from_function(function)
if code:
yield (code, function)
def apply_global_fixes(source, options, where='global', filename=''):
"""Run global fixes on source code.
These are fixes that only need be done once (unlike those in
FixPEP8, which are dependent on pep8).
"""
if any(code_match(code, select=options.select, ignore=options.ignore)
for code in ['E101', 'E111']):
source = reindent(source,
indent_size=options.indent_size)
for (code, function) in global_fixes():
if code_match(code, select=options.select, ignore=options.ignore):
if options.verbose:
print('---> Applying {0} fix for {1}'.format(where,
code.upper()),
file=sys.stderr)
source = function(source,
aggressive=options.aggressive)
source = fix_2to3(source,
aggressive=options.aggressive,
select=options.select,
ignore=options.ignore,
filename=filename)
return source
def apply_local_fixes(source, options):
"""Ananologus to apply_global_fixes, but runs only those which makes sense
for the given line_range.
Do as much as we can without breaking code.
"""
def find_ge(a, x):
"""Find leftmost item greater than or equal to x."""
i = bisect.bisect_left(a, x)
if i != len(a):
return (i, a[i])
return (len(a) - 1, a[-1])
def find_le(a, x):
"""Find rightmost value less than or equal to x."""
i = bisect.bisect_right(a, x)
if i:
return (i - 1, a[i - 1])
return (0, a[0])
def local_fix(source, start_log, end_log,
start_lines, end_lines, indents, last_line):
"""apply_global_fixes to the source between start_log and end_log.
The subsource must be the correct syntax of a complete python program
(but all lines may share an indentation). The subsource's shared indent
is removed, fixes are applied and the indent prepended back. Taking
care to not reindent strings.
last_line is the strict cut off (options.line_range[1]), so that
lines after last_line are not modified.
"""
if end_log < start_log:
return source
ind = indents[start_log]
indent = _get_indentation(source[start_lines[start_log]])
sl = slice(start_lines[start_log], end_lines[end_log] + 1)
subsource = source[sl]
msl = multiline_string_lines(''.join(subsource),
include_docstrings=False)
# Remove indent from subsource.
if ind:
for line_no in start_lines[start_log:end_log + 1]:
pos = line_no - start_lines[start_log]
subsource[pos] = subsource[pos][ind:]
# Remove indent from comments.
for (i, line) in enumerate(subsource):
if i + 1 not in msl and re.match(r'\s*#', line):
if line.index('#') >= ind:
subsource[i] = line[ind:]
# Fix indentation of subsource.
fixed_subsource = apply_global_fixes(''.join(subsource),
options,
where='local')
fixed_subsource = fixed_subsource.splitlines(True)
# Add back indent for non multi-line strings lines.
msl = multiline_string_lines(''.join(fixed_subsource),
include_docstrings=False)
for (i, line) in enumerate(fixed_subsource):
if not i + 1 in msl:
fixed_subsource[i] = indent + line if line != '\n' else line
# We make a special case to look at the final line, if it's a multiline
# *and* the cut off is somewhere inside it, we take the fixed
# subset up until last_line, this assumes that the number of lines
# does not change in this multiline line.
changed_lines = len(fixed_subsource)
if (
start_lines[end_log] != end_lines[end_log] and
end_lines[end_log] > last_line
):
after_end = end_lines[end_log] - last_line
fixed_subsource = (fixed_subsource[:-after_end] +
source[sl][-after_end:])
changed_lines -= after_end
options.line_range[1] = (options.line_range[0] +
changed_lines - 1)
return (source[:start_lines[start_log]] +
fixed_subsource +
source[end_lines[end_log] + 1:])
def is_continued_stmt(line,
continued_stmts=frozenset(['else', 'elif',
'finally', 'except'])):
return re.split('[ :]', line.strip(), 1)[0] in continued_stmts
assert options.line_range
(start, end) = options.line_range
start -= 1
end -= 1
last_line = end # We shouldn't modify lines after this cut-off.
try:
logical = _find_logical(source)
except (SyntaxError, tokenize.TokenError):
return ''.join(source)
if not logical[0]:
# Just blank lines, this should imply that it will become '\n' ?
return apply_global_fixes(source, options)
(start_lines, indents) = zip(*logical[0])
(end_lines, _) = zip(*logical[1])
source = source.splitlines(True)
(start_log, start) = find_ge(start_lines, start)
(end_log, end) = find_le(start_lines, end)
# Look behind one line, if it's indented less than current indent
# then we can move to this previous line knowing that its
# indentation level will not be changed.
if (
start_log > 0 and
indents[start_log - 1] < indents[start_log] and
not is_continued_stmt(source[start_log - 1])
):
start_log -= 1
start = start_lines[start_log]
while start < end:
if is_continued_stmt(source[start]):
start_log += 1
start = start_lines[start_log]
continue
ind = indents[start_log]
for t in itertools.takewhile(lambda t: t[1][1] >= ind,
enumerate(logical[0][start_log:])):
(n_log, n) = start_log + t[0], t[1][0]
# Start shares indent up to n.
if n <= end:
source = local_fix(source, start_log, n_log,
start_lines, end_lines,
indents, last_line)
start_log = n_log if n == end else n_log + 1
start = start_lines[start_log]
continue
else:
# Look at the line after end and see if allows us to reindent.
(after_end_log, after_end) = find_ge(start_lines, end + 1)
if indents[after_end_log] > indents[start_log]:
(start_log, start) = find_ge(start_lines, start + 1)
continue
if (
indents[after_end_log] == indents[start_log] and
is_continued_stmt(source[after_end])
):
# Find n, the beginning of the last continued statement.
# Apply fix to previous block if there is one.
only_block = True
for n, n_ind in logical[0][start_log:end_log + 1][::-1]:
if n_ind == ind and not is_continued_stmt(source[n]):
n_log = start_lines.index(n)
source = local_fix(source, start_log, n_log - 1,
start_lines, end_lines,
indents, last_line)
start_log = n_log + 1
start = start_lines[start_log]
only_block = False
break
if only_block:
(end_log, end) = find_le(start_lines, end - 1)
continue
source = local_fix(source, start_log, end_log,
start_lines, end_lines,
indents, last_line)
break
return ''.join(source)
def extract_code_from_function(function):
"""Return code handled by function."""
if not function.__name__.startswith('fix_'):
return None
code = re.sub('^fix_', '', function.__name__)
if not code:
return None
try:
int(code[1:])
except ValueError:
return None
return code
def create_parser():
"""Return command-line parser."""
# Do import locally to be friendly to those who use autopep8 as a library
# and are supporting Python 2.6.
import argparse
parser = argparse.ArgumentParser(description=docstring_summary(__doc__),
prog='autopep8')
parser.add_argument('--version', action='version',
version='%(prog)s ' + __version__)
parser.add_argument('-v', '--verbose', action='count', dest='verbose',
default=0,
help='print verbose messages; '
'multiple -v result in more verbose messages')
parser.add_argument('-d', '--diff', action='store_true', dest='diff',
help='print the diff for the fixed source')
parser.add_argument('-i', '--in-place', action='store_true',
help='make changes to files in place')
parser.add_argument('--global-config', metavar='filename',
default=DEFAULT_CONFIG,
help='path to a global pep8 config file; if this file '
'does not exist then this is ignored '
'(default: {0})'.format(DEFAULT_CONFIG))
parser.add_argument('--ignore-local-config', action='store_true',
help="don't look for and apply local config files; "
'if not passed, defaults are updated with any '
"config files in the project's root directory")
parser.add_argument('-r', '--recursive', action='store_true',
help='run recursively over directories; '
'must be used with --in-place or --diff')
parser.add_argument('-j', '--jobs', type=int, metavar='n', default=1,
help='number of parallel jobs; '
'match CPU count if value is less than 1')
parser.add_argument('-p', '--pep8-passes', metavar='n',
default=-1, type=int,
help='maximum number of additional pep8 passes '
'(default: infinite)')
parser.add_argument('-a', '--aggressive', action='count', default=0,
help='enable non-whitespace changes; '
'multiple -a result in more aggressive changes')
parser.add_argument('--experimental', action='store_true',
help='enable experimental fixes')
parser.add_argument('--exclude', metavar='globs',
help='exclude file/directory names that match these '
'comma-separated globs')
parser.add_argument('--list-fixes', action='store_true',
help='list codes for fixes; '
'used by --ignore and --select')
parser.add_argument('--ignore', metavar='errors', default='',
help='do not fix these errors/warnings '
'(default: {0})'.format(DEFAULT_IGNORE))
parser.add_argument('--select', metavar='errors', default='',
help='fix only these errors/warnings (e.g. E4,W)')
parser.add_argument('--max-line-length', metavar='n', default=79, type=int,
help='set maximum allowed line length '
'(default: %(default)s)')
parser.add_argument('--range', metavar='line', dest='line_range',
default=None, type=int, nargs=2,
help='only fix errors found within this inclusive '
'range of line numbers (e.g. 1 99); '
'line numbers are indexed at 1')
parser.add_argument('--indent-size', default=DEFAULT_INDENT_SIZE,
type=int, metavar='n',
help='number of spaces per indent level '
'(default %(default)s)')
parser.add_argument('files', nargs='*',
help="files to format or '-' for standard in")
return parser
def parse_args(arguments, apply_config=False):
"""Parse command-line options."""
parser = create_parser()
args = parser.parse_args(arguments)
if not args.files and not args.list_fixes:
parser.error('incorrect number of arguments')
args.files = [decode_filename(name) for name in args.files]
if apply_config:
parser = read_config(args, parser)
args = parser.parse_args(arguments)
args.files = [decode_filename(name) for name in args.files]
if '-' in args.files:
if len(args.files) > 1:
parser.error('cannot mix stdin and regular files')
if args.diff:
parser.error('--diff cannot be used with standard input')
if args.in_place:
parser.error('--in-place cannot be used with standard input')
if args.recursive:
parser.error('--recursive cannot be used with standard input')
if len(args.files) > 1 and not (args.in_place or args.diff):
parser.error('autopep8 only takes one filename as argument '
'unless the "--in-place" or "--diff" args are '
'used')
if args.recursive and not (args.in_place or args.diff):
parser.error('--recursive must be used with --in-place or --diff')
if args.exclude and not args.recursive:
parser.error('--exclude is only relevant when used with --recursive')
if args.in_place and args.diff:
parser.error('--in-place and --diff are mutually exclusive')
if args.max_line_length <= 0:
parser.error('--max-line-length must be greater than 0')
if args.select:
args.select = _split_comma_separated(args.select)
if args.ignore:
args.ignore = _split_comma_separated(args.ignore)
elif not args.select:
if args.aggressive:
# Enable everything by default if aggressive.
args.select = ['E', 'W']
else:
args.ignore = _split_comma_separated(DEFAULT_IGNORE)
if args.exclude:
args.exclude = _split_comma_separated(args.exclude)
else:
args.exclude = []
if args.jobs < 1:
# Do not import multiprocessing globally in case it is not supported
# on the platform.
import multiprocessing
args.jobs = multiprocessing.cpu_count()
if args.jobs > 1 and not args.in_place:
parser.error('parallel jobs requires --in-place')
if args.line_range:
if args.line_range[0] <= 0:
parser.error('--range must be positive numbers')
if args.line_range[0] > args.line_range[1]:
parser.error('First value of --range should be less than or equal '
'to the second')
return args
def read_config(args, parser):
"""Read both user configuration and local configuration."""
try:
from configparser import ConfigParser as SafeConfigParser
from configparser import Error
except ImportError:
from ConfigParser import SafeConfigParser
from ConfigParser import Error
config = SafeConfigParser()
try:
config.read(args.global_config)
if not args.ignore_local_config:
parent = tail = args.files and os.path.abspath(
os.path.commonprefix(args.files))
while tail:
if config.read([os.path.join(parent, fn)
for fn in PROJECT_CONFIG]):
break
(parent, tail) = os.path.split(parent)
defaults = dict((k.lstrip('-').replace('-', '_'), v)
for k, v in config.items('pep8'))
parser.set_defaults(**defaults)
except Error:
# Ignore for now.
pass
return parser
def _split_comma_separated(string):
"""Return a set of strings."""
return set(filter(None, string.split(',')))
def decode_filename(filename):
"""Return Unicode filename."""
if isinstance(filename, unicode):
return filename
else:
return filename.decode(sys.getfilesystemencoding())
def supported_fixes():
"""Yield pep8 error codes that autopep8 fixes.
Each item we yield is a tuple of the code followed by its
description.
"""
yield ('E101', docstring_summary(reindent.__doc__))
instance = FixPEP8(filename=None, options=None, contents='')
for attribute in dir(instance):
code = re.match('fix_([ew][0-9][0-9][0-9])', attribute)
if code:
yield (
code.group(1).upper(),
re.sub(r'\s+', ' ',
docstring_summary(getattr(instance, attribute).__doc__))
)
for (code, function) in sorted(global_fixes()):
yield (code.upper() + (4 - len(code)) * ' ',
re.sub(r'\s+', ' ', docstring_summary(function.__doc__)))
for code in sorted(CODE_TO_2TO3):
yield (code.upper() + (4 - len(code)) * ' ',
re.sub(r'\s+', ' ', docstring_summary(fix_2to3.__doc__)))
def docstring_summary(docstring):
"""Return summary of docstring."""
return docstring.split('\n')[0]
def line_shortening_rank(candidate, indent_word, max_line_length,
experimental=False):
"""Return rank of candidate.
This is for sorting candidates.
"""
if not candidate.strip():
return 0
rank = 0
lines = candidate.split('\n')
offset = 0
if (
not lines[0].lstrip().startswith('#') and
lines[0].rstrip()[-1] not in '([{'
):
for (opening, closing) in ('()', '[]', '{}'):
# Don't penalize empty containers that aren't split up. Things like
# this "foo(\n )" aren't particularly good.
opening_loc = lines[0].find(opening)
closing_loc = lines[0].find(closing)
if opening_loc >= 0:
if closing_loc < 0 or closing_loc != opening_loc + 1:
offset = max(offset, 1 + opening_loc)
current_longest = max(offset + len(x.strip()) for x in lines)
rank += 4 * max(0, current_longest - max_line_length)
rank += len(lines)
# Too much variation in line length is ugly.
rank += 2 * standard_deviation(len(line) for line in lines)
bad_staring_symbol = {
'(': ')',
'[': ']',
'{': '}'}.get(lines[0][-1])
if len(lines) > 1:
if (
bad_staring_symbol and
lines[1].lstrip().startswith(bad_staring_symbol)
):
rank += 20
for lineno, current_line in enumerate(lines):
current_line = current_line.strip()
if current_line.startswith('#'):
continue
for bad_start in ['.', '%', '+', '-', '/']:
if current_line.startswith(bad_start):
rank += 100
# Do not tolerate operators on their own line.
if current_line == bad_start:
rank += 1000
if current_line.endswith(('(', '[', '{', '.')):
# Avoid lonely opening. They result in longer lines.
if len(current_line) <= len(indent_word):
rank += 100
# Avoid the ugliness of ", (\n".
if (
current_line.endswith('(') and
current_line[:-1].rstrip().endswith(',')
):
rank += 100
# Also avoid the ugliness of "foo.\nbar"
if current_line.endswith('.'):
rank += 100
if has_arithmetic_operator(current_line):
rank += 100
if current_line.endswith(('%', '(', '[', '{')):
rank -= 20
# Try to break list comprehensions at the "for".
if current_line.startswith('for '):
rank -= 50
if current_line.endswith('\\'):
# If a line ends in \-newline, it may be part of a
# multiline string. In that case, we would like to know
# how long that line is without the \-newline. If it's
# longer than the maximum, or has comments, then we assume
# that the \-newline is an okay candidate and only
# penalize it a bit.
total_len = len(current_line)
lineno += 1
while lineno < len(lines):
total_len += len(lines[lineno])
if lines[lineno].lstrip().startswith('#'):
total_len = max_line_length
break
if not lines[lineno].endswith('\\'):
break
lineno += 1
if total_len < max_line_length:
rank += 10
else:
rank += 100 if experimental else 1
# Prefer breaking at commas rather than colon.
if ',' in current_line and current_line.endswith(':'):
rank += 10
rank += 10 * count_unbalanced_brackets(current_line)
return max(0, rank)
def standard_deviation(numbers):
"""Return standard devation."""
numbers = list(numbers)
if not numbers:
return 0
mean = sum(numbers) / len(numbers)
return (sum((n - mean) ** 2 for n in numbers) /
len(numbers)) ** .5
def has_arithmetic_operator(line):
"""Return True if line contains any arithmetic operators."""
for operator in pep8.ARITHMETIC_OP:
if operator in line:
return True
return False
def count_unbalanced_brackets(line):
"""Return number of unmatched open/close brackets."""
count = 0
for opening, closing in ['()', '[]', '{}']:
count += abs(line.count(opening) - line.count(closing))
return count
def split_at_offsets(line, offsets):
"""Split line at offsets.
Return list of strings.
"""
result = []
previous_offset = 0
current_offset = 0
for current_offset in sorted(offsets):
if current_offset < len(line) and previous_offset != current_offset:
result.append(line[previous_offset:current_offset].strip())
previous_offset = current_offset
result.append(line[current_offset:])
return result
class LineEndingWrapper(object):
r"""Replace line endings to work with sys.stdout.
It seems that sys.stdout expects only '\n' as the line ending, no matter
the platform. Otherwise, we get repeated line endings.
"""
def __init__(self, output):
self.__output = output
def write(self, s):
self.__output.write(s.replace('\r\n', '\n').replace('\r', '\n'))
def flush(self):
self.__output.flush()
def match_file(filename, exclude):
"""Return True if file is okay for modifying/recursing."""
base_name = os.path.basename(filename)
if base_name.startswith('.'):
return False
for pattern in exclude:
if fnmatch.fnmatch(base_name, pattern):
return False
if fnmatch.fnmatch(filename, pattern):
return False
if not os.path.isdir(filename) and not is_python_file(filename):
return False
return True
def find_files(filenames, recursive, exclude):
"""Yield filenames."""
while filenames:
name = filenames.pop(0)
if recursive and os.path.isdir(name):
for root, directories, children in os.walk(name):
filenames += [os.path.join(root, f) for f in children
if match_file(os.path.join(root, f),
exclude)]
directories[:] = [d for d in directories
if match_file(os.path.join(root, d),
exclude)]
else:
yield name
def _fix_file(parameters):
"""Helper function for optionally running fix_file() in parallel."""
if parameters[1].verbose:
print('[file:{0}]'.format(parameters[0]), file=sys.stderr)
try:
fix_file(*parameters)
except IOError as error:
print(unicode(error), file=sys.stderr)
def fix_multiple_files(filenames, options, output=None):
"""Fix list of files.
Optionally fix files recursively.
"""
filenames = find_files(filenames, options.recursive, options.exclude)
if options.jobs > 1:
import multiprocessing
pool = multiprocessing.Pool(options.jobs)
pool.map(_fix_file,
[(name, options) for name in filenames])
else:
for name in filenames:
_fix_file((name, options, output))
def is_python_file(filename):
"""Return True if filename is Python file."""
if filename.endswith('.py'):
return True
try:
with open_with_encoding(filename) as f:
first_line = f.readlines(1)[0]
except (IOError, IndexError):
return False
if not PYTHON_SHEBANG_REGEX.match(first_line):
return False
return True
def is_probably_part_of_multiline(line):
"""Return True if line is likely part of a multiline string.
When multiline strings are involved, pep8 reports the error as being
at the start of the multiline string, which doesn't work for us.
"""
return (
'"""' in line or
"'''" in line or
line.rstrip().endswith('\\')
)
def wrap_output(output, encoding):
"""Return output with specified encoding."""
return codecs.getwriter(encoding)(output.buffer
if hasattr(output, 'buffer')
else output)
def get_encoding():
"""Return preferred encoding."""
return locale.getpreferredencoding() or sys.getdefaultencoding()
def main(apply_config=True):
"""Tool main."""
try:
# Exit on broken pipe.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError: # pragma: no cover
# SIGPIPE is not available on Windows.
pass
try:
args = parse_args(sys.argv[1:], apply_config=apply_config)
if args.list_fixes:
for code, description in sorted(supported_fixes()):
print('{code} - {description}'.format(
code=code, description=description))
return 0
if args.files == ['-']:
assert not args.in_place
encoding = sys.stdin.encoding or get_encoding()
# LineEndingWrapper is unnecessary here due to the symmetry between
# standard in and standard out.
wrap_output(sys.stdout, encoding=encoding).write(
fix_code(sys.stdin.read(), args, encoding=encoding))
else:
if args.in_place or args.diff:
args.files = list(set(args.files))
else:
assert len(args.files) == 1
assert not args.recursive
fix_multiple_files(args.files, args, sys.stdout)
except KeyboardInterrupt:
return 1 # pragma: no cover
class CachedTokenizer(object):
"""A one-element cache around tokenize.generate_tokens().
Original code written by Ned Batchelder, in coverage.py.
"""
def __init__(self):
self.last_text = None
self.last_tokens = None
def generate_tokens(self, text):
"""A stand-in for tokenize.generate_tokens()."""
if text != self.last_text:
string_io = io.StringIO(text)
self.last_tokens = list(
tokenize.generate_tokens(string_io.readline)
)
self.last_text = text
return self.last_tokens
_cached_tokenizer = CachedTokenizer()
generate_tokens = _cached_tokenizer.generate_tokens
if __name__ == '__main__':
sys.exit(main()) | apache-2.0 | 5,816,400,508,170,562,000 | 32.206769 | 79 | 0.538248 | false |
zhangziang/django-allauth | allauth/socialaccount/app_settings.py | 61 | 2394 | class AppSettings(object):
def __init__(self, prefix):
self.prefix = prefix
def _setting(self, name, dflt):
from django.conf import settings
getter = getattr(settings,
'ALLAUTH_SETTING_GETTER',
lambda name, dflt: getattr(settings, name, dflt))
return getter(self.prefix + name, dflt)
@property
def QUERY_EMAIL(self):
"""
Request e-mail address from 3rd party account provider?
E.g. using OpenID AX
"""
from allauth.account import app_settings as account_settings
return self._setting("QUERY_EMAIL",
account_settings.EMAIL_REQUIRED)
@property
def AUTO_SIGNUP(self):
"""
Attempt to bypass the signup form by using fields (e.g. username,
email) retrieved from the social account provider. If a conflict
arises due to a duplicate e-mail signup form will still kick in.
"""
return self._setting("AUTO_SIGNUP", True)
@property
def PROVIDERS(self):
"""
Provider specific settings
"""
return self._setting("PROVIDERS", {})
@property
def EMAIL_REQUIRED(self):
"""
The user is required to hand over an e-mail address when signing up
"""
from allauth.account import app_settings as account_settings
return self._setting("EMAIL_REQUIRED", account_settings.EMAIL_REQUIRED)
@property
def EMAIL_VERIFICATION(self):
"""
See e-mail verification method
"""
from allauth.account import app_settings as account_settings
return self._setting("EMAIL_VERIFICATION",
account_settings.EMAIL_VERIFICATION)
@property
def ADAPTER(self):
return self._setting('ADAPTER',
'allauth.socialaccount.adapter'
'.DefaultSocialAccountAdapter')
@property
def FORMS(self):
return self._setting('FORMS', {})
@property
def STORE_TOKENS(self):
return self._setting('STORE_TOKENS', True)
# Ugly? Guido recommends this himself ...
# http://mail.python.org/pipermail/python-ideas/2012-May/014969.html
import sys
app_settings = AppSettings('SOCIALACCOUNT_')
app_settings.__name__ = __name__
sys.modules[__name__] = app_settings
| mit | -5,549,860,447,565,796,000 | 30.5 | 79 | 0.597744 | false |
Guneet-Dhillon/mxnet | tests/python/unittest/test_symbol.py | 7 | 9547 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import copy
import os
import re
import mxnet as mx
import numpy as np
from common import models
from mxnet.test_utils import discard_stderr
import pickle as pkl
def test_symbol_basic():
mlist = []
mlist.append(models.mlp2())
for m in mlist:
m.list_arguments()
m.list_outputs()
def test_symbol_compose():
data = mx.symbol.Variable('data')
net1 = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)
net1 = mx.symbol.FullyConnected(data=net1, name='fc2', num_hidden=100)
net1.list_arguments() == ['data',
'fc1_weight', 'fc1_bias',
'fc2_weight', 'fc2_bias']
net2 = mx.symbol.FullyConnected(name='fc3', num_hidden=10)
net2 = mx.symbol.Activation(data=net2, act_type='relu')
net2 = mx.symbol.FullyConnected(data=net2, name='fc4', num_hidden=20)
#print(net2.debug_str())
composed = net2(fc3_data=net1, name='composed')
#print(composed.debug_str())
multi_out = mx.symbol.Group([composed, net1])
assert len(multi_out.list_outputs()) == 2
def test_symbol_copy():
data = mx.symbol.Variable('data')
data_2 = copy.deepcopy(data)
data_3 = copy.copy(data)
assert data.tojson() == data_2.tojson()
assert data.tojson() == data_3.tojson()
def test_symbol_internal():
data = mx.symbol.Variable('data')
oldfc = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)
net1 = mx.symbol.FullyConnected(data=oldfc, name='fc2', num_hidden=100)
assert net1.list_arguments() == ['data', 'fc1_weight', 'fc1_bias', 'fc2_weight', 'fc2_bias']
internal = net1.get_internals()
fc1 = internal['fc1_output']
assert fc1.list_arguments() == oldfc.list_arguments()
def test_symbol_children():
data = mx.symbol.Variable('data')
oldfc = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)
net1 = mx.symbol.FullyConnected(data=oldfc, name='fc2', num_hidden=100)
assert net1.get_children().list_outputs() == ['fc1_output', 'fc2_weight', 'fc2_bias']
assert net1.get_children().get_children().list_outputs() == ['data', 'fc1_weight', 'fc1_bias']
assert net1.get_children()['fc2_weight'].list_arguments() == ['fc2_weight']
assert net1.get_children()['fc2_weight'].get_children() is None
data = mx.sym.Variable('data')
sliced = mx.sym.SliceChannel(data, num_outputs=3, name='slice')
concat = mx.sym.Concat(*list(sliced))
assert concat.get_children().list_outputs() == \
['slice_output0', 'slice_output1', 'slice_output2']
assert sliced.get_children().list_outputs() == ['data']
def test_symbol_pickle():
mlist = [models.mlp2(), models.conv()]
data = pkl.dumps(mlist)
mlist2 = pkl.loads(data)
for x, y in zip(mlist, mlist2):
assert x.tojson() == y.tojson()
def test_symbol_saveload():
sym = models.mlp2()
fname = 'tmp_sym.json'
sym.save(fname)
data2 = mx.symbol.load(fname)
# save because of order
assert sym.tojson() == data2.tojson()
os.remove(fname)
def test_symbol_infer_type():
data = mx.symbol.Variable('data')
f32data = mx.symbol.Cast(data=data, dtype='float32')
fc1 = mx.symbol.FullyConnected(data = f32data, name='fc1', num_hidden=128)
mlp = mx.symbol.SoftmaxOutput(data = fc1, name = 'softmax')
arg, out, aux = mlp.infer_type(data=np.float16)
assert arg == [np.float16, np.float32, np.float32, np.float32]
assert out == [np.float32]
assert aux == []
def test_symbol_infer_shape():
num_hidden = 128
num_dim = 64
num_sample = 10
data = mx.symbol.Variable('data')
prev = mx.symbol.Variable('prevstate')
x2h = mx.symbol.FullyConnected(data=data, name='x2h', num_hidden=num_hidden)
h2h = mx.symbol.FullyConnected(data=prev, name='h2h', num_hidden=num_hidden)
out = mx.symbol.Activation(data=mx.sym.elemwise_add(x2h, h2h), name='out', act_type='relu')
# shape inference will fail because information is not available for h2h
ret = out.infer_shape(data=(num_sample, num_dim))
assert ret == (None, None, None)
arg, out_shapes, aux_shapes = out.infer_shape_partial(data=(num_sample, num_dim))
arg_shapes = dict(zip(out.list_arguments(), arg))
assert arg_shapes['data'] == (num_sample, num_dim)
assert arg_shapes['x2h_weight'] == (num_hidden, num_dim)
assert arg_shapes['h2h_weight'] == ()
# now we can do full shape inference
state_shape = out_shapes[0]
arg, out_shapes, aux_shapes = out.infer_shape(data=(num_sample, num_dim), prevstate=state_shape)
arg_shapes = dict(zip(out.list_arguments(), arg))
assert arg_shapes['data'] == (num_sample, num_dim)
assert arg_shapes['x2h_weight'] == (num_hidden, num_dim)
assert arg_shapes['h2h_weight'] == (num_hidden, num_hidden)
def test_symbol_infer_shape_var():
"Test specifying shape information when constructing a variable"
shape = (2, 3)
a = mx.symbol.Variable('a', shape=shape)
b = mx.symbol.Variable('b')
c = mx.symbol.elemwise_add(a, b)
arg_shapes, out_shapes, aux_shapes = c.infer_shape()
assert arg_shapes[0] == shape
assert arg_shapes[1] == shape
assert out_shapes[0] == shape
overwrite_shape = (5, 6)
arg_shapes, out_shapes, aux_shapes = c.infer_shape(a=overwrite_shape)
assert arg_shapes[0] == overwrite_shape
assert arg_shapes[1] == overwrite_shape
assert out_shapes[0] == overwrite_shape
def check_symbol_consistency(sym1, sym2, ctx):
assert sym1.list_arguments() == sym2.list_arguments()
assert sym1.list_auxiliary_states() == sym2.list_auxiliary_states()
assert sym1.list_outputs() == sym2.list_outputs()
mx.test_utils.check_consistency([sym1, sym2], ctx_list=[ctx, ctx])
def test_load_000800():
with mx.AttrScope(ctx_group='stage1'):
data = mx.symbol.Variable('data', lr_mult=0.2)
weight = mx.sym.Variable(name='fc1_weight', lr_mult=1.2)
fc1 = mx.symbol.FullyConnected(data = data, weight=weight, name='fc1', num_hidden=128, wd_mult=0.3)
act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
set_stage1 = set(act1.list_arguments())
with mx.AttrScope(ctx_group='stage2'):
fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64, lr_mult=0.01)
act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
fc3 = mx.symbol.BatchNorm(fc3, name='batchnorm0')
sym1 = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax')
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sym2 = mx.sym.load(os.path.join(curr_path, 'save_000800.json'))
attr1 = sym1.attr_dict()
attr2 = sym2.attr_dict()
for k, v1 in attr1.items():
assert k in attr2, k
v2 = attr2[k]
for kk, vv1 in v1.items():
if kk.startswith('__') and kk.endswith('__'):
assert kk in v2 and v2[kk] == vv1, k + str(v1) + str(v2)
check_symbol_consistency(sym1, sym2,
{'ctx': mx.cpu(0), 'group2ctx': {'stage1' : mx.cpu(1), 'stage2' : mx.cpu(2)}, 'data': (1,200)})
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(2*a)
exe = b.simple_bind(ctx=mx.cpu(), a=(10,10))
def test_zero_prop():
data = mx.symbol.Variable('data')
for i in range(10):
data = data * data
exe = data.simple_bind(ctx=mx.cpu(), data=(10, 3, 256, 256))
big = int(re.search('Total (\d+) MB allocated', exe.debug_str()).group(1))
exe = data.simple_bind(ctx=mx.cpu(), data=(10, 3, 256, 256), grad_req='null')
small1 = int(re.search('Total (\d+) MB allocated', exe.debug_str()).group(1))
data = mx.sym.stop_gradient(data)
exe = data.simple_bind(ctx=mx.cpu(), data=(10, 3, 256, 256))
small2 = int(re.search('Total (\d+) MB allocated', exe.debug_str()).group(1))
assert big > small2
assert small1 == small2
def test_zero_prop2():
x = mx.sym.Variable('x')
idx = mx.sym.Variable('idx')
y = mx.sym.batch_take(x, idx)
z = mx.sym.stop_gradient(y)
exe = z.simple_bind(ctx=mx.cpu(), x=(10, 10), idx=(10,),
type_dict={'x': np.float32, 'idx': np.int32})
exe.forward()
exe.backward()
# The following bind() should throw an exception. We discard the expected stderr
# output for this operation only in order to keep the test logs clean.
with discard_stderr():
try:
y.simple_bind(ctx=mx.cpu(), x=(10, 10), idx=(10,),
type_dict={'x': np.float32, 'idx': np.int32})
except:
return
assert False
if __name__ == '__main__':
import nose
nose.runmodule()
| apache-2.0 | -4,149,492,475,961,347,600 | 37.035857 | 108 | 0.638839 | false |
kopchik/qtile | libqtile/widget/crashme.py | 10 | 2352 | # Copyright (c) 2012 Florian Mounier
# Copyright (c) 2012 roger
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2013 Craig Barnes
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 Adi Sieker
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -*- coding: utf-8 -*-
from .. import bar
from . import base
class _CrashMe(base._TextBox):
"""
A developer widget to force a crash in qtile.
Pressing left mouse button causes a zero divison error.
Pressing the right mouse button causes a cairo draw error.
"""
orientations = base.ORIENTATION_HORIZONTAL
def __init__(self, width=bar.CALCULATED, **config):
"""
- width: A fixed width, or bar.CALCULATED to calculate the width
automatically (which is recommended).
"""
base._TextBox.__init__(self, "Crash me !", width, **config)
def _configure(self, qtile, bar):
base._Widget._configure(self, qtile, bar)
self.layout = self.drawer.textlayout(
self.text,
self.foreground,
self.font,
self.fontsize,
self.fontshadow,
markup=True
)
def button_press(self, x, y, button):
if button == 1:
1 / 0
elif button == 3:
self.text = '<span>\xC3GError'
self.bar.draw()
| mit | -2,854,365,693,650,093,600 | 36.935484 | 79 | 0.670493 | false |
pquentin/django | django/core/handlers/wsgi.py | 82 | 9759 | from __future__ import unicode_literals
import cgi
import codecs
import logging
import sys
import warnings
from io import BytesIO
from threading import Lock
from django import http
from django.conf import settings
from django.core import signals
from django.core.handlers import base
from django.core.urlresolvers import set_script_prefix
# For backwards compatibility -- lots of code uses this in the wild!
from django.http.response import REASON_PHRASES as STATUS_CODE_TEXT # NOQA
from django.utils import datastructures, six
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_str, force_text
from django.utils.functional import cached_property
logger = logging.getLogger('django.request')
# encode() and decode() expect the charset to be a native string.
ISO_8859_1, UTF_8 = str('iso-8859-1'), str('utf-8')
class LimitedStream(object):
'''
LimitedStream wraps another stream in order to not allow reading from it
past specified amount of bytes.
'''
def __init__(self, stream, limit, buf_size=64 * 1024 * 1024):
self.stream = stream
self.remaining = limit
self.buffer = b''
self.buf_size = buf_size
def _read_limited(self, size=None):
if size is None or size > self.remaining:
size = self.remaining
if size == 0:
return b''
result = self.stream.read(size)
self.remaining -= len(result)
return result
def read(self, size=None):
if size is None:
result = self.buffer + self._read_limited()
self.buffer = b''
elif size < len(self.buffer):
result = self.buffer[:size]
self.buffer = self.buffer[size:]
else: # size >= len(self.buffer)
result = self.buffer + self._read_limited(size - len(self.buffer))
self.buffer = b''
return result
def readline(self, size=None):
while b'\n' not in self.buffer and \
(size is None or len(self.buffer) < size):
if size:
# since size is not None here, len(self.buffer) < size
chunk = self._read_limited(size - len(self.buffer))
else:
chunk = self._read_limited()
if not chunk:
break
self.buffer += chunk
sio = BytesIO(self.buffer)
if size:
line = sio.readline(size)
else:
line = sio.readline()
self.buffer = sio.read()
return line
class WSGIRequest(http.HttpRequest):
def __init__(self, environ):
script_name = get_script_name(environ)
path_info = get_path_info(environ)
if not path_info:
# Sometimes PATH_INFO exists, but is empty (e.g. accessing
# the SCRIPT_NAME URL without a trailing slash). We really need to
# operate as if they'd requested '/'. Not amazingly nice to force
# the path like this, but should be harmless.
path_info = '/'
self.environ = environ
self.path_info = path_info
# be careful to only replace the first slash in the path because of
# http://test/something and http://test//something being different as
# stated in http://www.ietf.org/rfc/rfc2396.txt
self.path = '%s/%s' % (script_name.rstrip('/'),
path_info.replace('/', '', 1))
self.META = environ
self.META['PATH_INFO'] = path_info
self.META['SCRIPT_NAME'] = script_name
self.method = environ['REQUEST_METHOD'].upper()
_, content_params = cgi.parse_header(environ.get('CONTENT_TYPE', ''))
if 'charset' in content_params:
try:
codecs.lookup(content_params['charset'])
except LookupError:
pass
else:
self.encoding = content_params['charset']
self._post_parse_error = False
try:
content_length = int(environ.get('CONTENT_LENGTH'))
except (ValueError, TypeError):
content_length = 0
self._stream = LimitedStream(self.environ['wsgi.input'], content_length)
self._read_started = False
self.resolver_match = None
def _get_scheme(self):
return self.environ.get('wsgi.url_scheme')
def _get_request(self):
warnings.warn('`request.REQUEST` is deprecated, use `request.GET` or '
'`request.POST` instead.', RemovedInDjango19Warning, 2)
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
@cached_property
def GET(self):
# The WSGI spec says 'QUERY_STRING' may be absent.
raw_query_string = get_bytes_from_wsgi(self.environ, 'QUERY_STRING', '')
return http.QueryDict(raw_query_string, encoding=self._encoding)
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
@cached_property
def COOKIES(self):
raw_cookie = get_str_from_wsgi(self.environ, 'HTTP_COOKIE', '')
return http.parse_cookie(raw_cookie)
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
POST = property(_get_post, _set_post)
FILES = property(_get_files)
REQUEST = property(_get_request)
class WSGIHandler(base.BaseHandler):
initLock = Lock()
request_class = WSGIRequest
def __call__(self, environ, start_response):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
with self.initLock:
try:
# Check that middleware is still uninitialized.
if self._request_middleware is None:
self.load_middleware()
except:
# Unload whatever middleware we got
self._request_middleware = None
raise
set_script_prefix(get_script_name(environ))
signals.request_started.send(sender=self.__class__, environ=environ)
try:
request = self.request_class(environ)
except UnicodeDecodeError:
logger.warning('Bad Request (UnicodeDecodeError)',
exc_info=sys.exc_info(),
extra={
'status_code': 400,
}
)
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
response._handler_class = self.__class__
status = '%s %s' % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append((str('Set-Cookie'), str(c.output(header=''))))
start_response(force_str(status), response_headers)
if getattr(response, 'file_to_stream', None) is not None and environ.get('wsgi.file_wrapper'):
response = environ['wsgi.file_wrapper'](response.file_to_stream)
return response
def get_path_info(environ):
"""
Returns the HTTP request's PATH_INFO as a unicode string.
"""
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '/')
return path_info.decode(UTF_8)
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
"""
if settings.FORCE_SCRIPT_NAME is not None:
return force_text(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = get_bytes_from_wsgi(environ, 'SCRIPT_URL', '')
if not script_url:
script_url = get_bytes_from_wsgi(environ, 'REDIRECT_URL', '')
if script_url:
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '')
script_name = script_url[:-len(path_info)]
else:
script_name = get_bytes_from_wsgi(environ, 'SCRIPT_NAME', '')
return script_name.decode(UTF_8)
def get_bytes_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as bytes.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = environ.get(str(key), str(default))
# Under Python 3, non-ASCII values in the WSGI environ are arbitrarily
# decoded with ISO-8859-1. This is wrong for Django websites where UTF-8
# is the default. Re-encode to recover the original bytestring.
return value.encode(ISO_8859_1) if six.PY3 else value
def get_str_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as str.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = get_bytes_from_wsgi(environ, key, default)
return value.decode(UTF_8, errors='replace') if six.PY3 else value
| bsd-3-clause | -4,120,595,982,493,286,400 | 36.106464 | 102 | 0.616559 | false |
otherness-space/myProject002 | my_project_002/lib/python2.7/site-packages/django/core/management/commands/diffsettings.py | 114 | 1264 | from django.core.management.base import NoArgsCommand
def module_to_dict(module, omittable=lambda k: k.startswith('_')):
"Converts a module namespace to a Python dictionary. Used by get_settings_diff."
return dict([(k, repr(v)) for k, v in module.__dict__.items() if not omittable(k)])
class Command(NoArgsCommand):
help = """Displays differences between the current settings.py and Django's
default settings. Settings that don't appear in the defaults are
followed by "###"."""
requires_model_validation = False
def handle_noargs(self, **options):
# Inspired by Postfix's "postconf -n".
from django.conf import settings, global_settings
# Because settings are imported lazily, we need to explicitly load them.
settings._setup()
user_settings = module_to_dict(settings._wrapped)
default_settings = module_to_dict(global_settings)
output = []
for key in sorted(user_settings.keys()):
if key not in default_settings:
output.append("%s = %s ###" % (key, user_settings[key]))
elif user_settings[key] != default_settings[key]:
output.append("%s = %s" % (key, user_settings[key]))
return '\n'.join(output)
| mit | 7,858,627,970,156,690,000 | 41.133333 | 87 | 0.643196 | false |
parthea/pydatalab | solutionbox/image_classification/mltoolbox/image/classification/task.py | 6 | 2644 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entry point for CloudML training.
CloudML training requires a tarball package and a python module to run. This file
provides such a "main" method and a list of args passed with the program.
"""
import argparse
import json
import logging
import os
import tensorflow as tf
from . import _model
from . import _trainer
from . import _util
def main(_):
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_dir',
type=str,
help='The input dir path for training and evaluation data.')
parser.add_argument(
'--job-dir',
dest='job_dir',
type=str,
help='The GCS path to which checkpoints and other outputs should be saved.')
parser.add_argument(
'--max_steps',
type=int,)
parser.add_argument(
'--batch_size',
type=int,
help='Number of examples to be processed per mini-batch.')
parser.add_argument(
'--checkpoint',
type=str,
default=_util._DEFAULT_CHECKPOINT_GSURL,
help='Pretrained inception checkpoint path.')
args, _ = parser.parse_known_args()
labels = _util.get_labels(args.input_dir)
model = _model.Model(labels, 0.5, args.checkpoint)
env = json.loads(os.environ.get('TF_CONFIG', '{}'))
# Print the job data as provided by the service.
logging.info('Original job data: %s', env.get('job', {}))
task_data = env.get('task', None) or {'type': 'master', 'index': 0}
task = type('TaskSpec', (object,), task_data)
cluster_data = env.get('cluster', None)
cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None
if not cluster or not task or task.type == 'master' or task.type == 'worker':
_trainer.Trainer(args.input_dir, args.batch_size, args.max_steps,
args.job_dir, model, cluster, task).run_training()
elif task.type == 'ps':
server = _trainer.start_server(cluster, task)
server.join()
else:
raise ValueError('invalid task_type %s' % (task.type,))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
tf.app.run()
| apache-2.0 | 7,548,480,017,629,927,000 | 31.641975 | 84 | 0.678139 | false |
pekkosk/hotbit | hotbit/io/__init__.py | 1 | 2274 | """
Input module. Contains functions to read element, Slater-Koster and repulsion
data.
"""
def read_element(filename, symbol, format=None):
"""
Read element data from files.
Parameters:
-----------
fileobj: filename of file-object to read from
symbol: chemical symbol of the element
"""
if format is None:
format = filetype(filename)
if format == 'elm':
from hotbit.io.native import read_element_from_elm
return read_element_from_elm(filename, symbol)
if format == 'skf':
from hotbit.io.dftb import read_element_from_skf
return read_element_from_skf(filename, symbol)
raise RuntimeError('File format "'+format+'" not recognized!')
def read_HS(filename, symboli, symbolj, format=None):
"""
Read Slater-Koster tables from files.
Parameters:
-----------
fileobj: filename of file-object to read from
symboli: chemical symbol of the first element
symbolj: chemical symbol of the second element
"""
if format is None:
format = filetype(filename)
if format == 'par':
from hotbit.io.native import read_HS_from_par
return read_HS_from_par(filename, symboli, symbolj)
if format == 'skf':
from hotbit.io.dftb import read_HS_from_skf
return read_HS_from_skf(filename, symboli, symbolj)
raise RuntimeError('File format "'+format+'" not recognized!')
def read_repulsion(filename, format=None):
"""
Read Slater-Koster tables from files.
Parameters:
-----------
fileobj: filename of file-object to read from
"""
if format is None:
format = filetype(filename)
if format == 'par':
from hotbit.io.native import read_repulsion_from_par
return read_repulsion_from_par(filename)
if format == 'skf':
from hotbit.io.dftb import read_repulsion_from_skf
return read_repulsion_from_skf(filename)
raise RuntimeError('File format "'+format+'" not recognized!')
def filetype(filename):
if filename.lower().endswith('.elm'):
return 'elm'
if filename.lower().endswith('.par'):
return 'par'
if filename.lower().endswith('.skf') or filename.lower.endswith('.spl'):
return 'skf'
| gpl-2.0 | -1,284,471,911,798,206,000 | 24.550562 | 77 | 0.635884 | false |
PatKayongo/patkayongo.github.io | node_modules/pygmentize-bundled/vendor/pygments/pygments/lexers/_openedgebuiltins.py | 370 | 40661 | # -*- coding: utf-8 -*-
"""
pygments.lexers._openedgebuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Builtin list for the OpenEdgeLexer.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
OPENEDGEKEYWORDS = [
'ABSOLUTE', 'ABS', 'ABSO', 'ABSOL', 'ABSOLU', 'ABSOLUT', 'ACCELERATOR',
'ACCUM', 'ACCUMULATE', 'ACCUM', 'ACCUMU', 'ACCUMUL', 'ACCUMULA',
'ACCUMULAT', 'ACTIVE-FORM', 'ACTIVE-WINDOW', 'ADD', 'ADD-BUFFER',
'ADD-CALC-COLUMN', 'ADD-COLUMNS-FROM', 'ADD-EVENTS-PROCEDURE',
'ADD-FIELDS-FROM', 'ADD-FIRST', 'ADD-INDEX-FIELD', 'ADD-LAST',
'ADD-LIKE-COLUMN', 'ADD-LIKE-FIELD', 'ADD-LIKE-INDEX', 'ADD-NEW-FIELD',
'ADD-NEW-INDEX', 'ADD-SCHEMA-LOCATION', 'ADD-SUPER-PROCEDURE', 'ADM-DATA',
'ADVISE', 'ALERT-BOX', 'ALIAS', 'ALL', 'ALLOW-COLUMN-SEARCHING',
'ALLOW-REPLICATION', 'ALTER', 'ALWAYS-ON-TOP', 'AMBIGUOUS', 'AMBIG',
'AMBIGU', 'AMBIGUO', 'AMBIGUOU', 'ANALYZE', 'ANALYZ', 'AND', 'ANSI-ONLY',
'ANY', 'ANYWHERE', 'APPEND', 'APPL-ALERT-BOXES', 'APPL-ALERT',
'APPL-ALERT-', 'APPL-ALERT-B', 'APPL-ALERT-BO', 'APPL-ALERT-BOX',
'APPL-ALERT-BOXE', 'APPL-CONTEXT-ID', 'APPLICATION', 'APPLY',
'APPSERVER-INFO', 'APPSERVER-PASSWORD', 'APPSERVER-USERID', 'ARRAY-MESSAGE',
'AS', 'ASC', 'ASCENDING', 'ASCE', 'ASCEN', 'ASCEND', 'ASCENDI', 'ASCENDIN',
'ASK-OVERWRITE', 'ASSEMBLY', 'ASSIGN', 'ASYNCHRONOUS',
'ASYNC-REQUEST-COUNT', 'ASYNC-REQUEST-HANDLE', 'AT', 'ATTACHED-PAIRLIST',
'ATTR-SPACE', 'ATTR', 'ATTRI', 'ATTRIB', 'ATTRIBU', 'ATTRIBUT',
'AUDIT-CONTROL', 'AUDIT-ENABLED', 'AUDIT-EVENT-CONTEXT', 'AUDIT-POLICY',
'AUTHENTICATION-FAILED', 'AUTHORIZATION', 'AUTO-COMPLETION', 'AUTO-COMP',
'AUTO-COMPL', 'AUTO-COMPLE', 'AUTO-COMPLET', 'AUTO-COMPLETI',
'AUTO-COMPLETIO', 'AUTO-ENDKEY', 'AUTO-END-KEY', 'AUTO-GO', 'AUTO-INDENT',
'AUTO-IND', 'AUTO-INDE', 'AUTO-INDEN', 'AUTOMATIC', 'AUTO-RESIZE',
'AUTO-RETURN', 'AUTO-RET', 'AUTO-RETU', 'AUTO-RETUR', 'AUTO-SYNCHRONIZE',
'AUTO-ZAP', 'AUTO-Z', 'AUTO-ZA', 'AVAILABLE', 'AVAIL', 'AVAILA', 'AVAILAB',
'AVAILABL', 'AVAILABLE-FORMATS', 'AVERAGE', 'AVE', 'AVER', 'AVERA',
'AVERAG', 'AVG', 'BACKGROUND', 'BACK', 'BACKG', 'BACKGR', 'BACKGRO',
'BACKGROU', 'BACKGROUN', 'BACKWARDS', 'BACKWARD', 'BASE64-DECODE',
'BASE64-ENCODE', 'BASE-ADE', 'BASE-KEY', 'BATCH-MODE', 'BATCH', 'BATCH-',
'BATCH-M', 'BATCH-MO', 'BATCH-MOD', 'BATCH-SIZE', 'BEFORE-HIDE', 'BEFORE-H',
'BEFORE-HI', 'BEFORE-HID', 'BEGIN-EVENT-GROUP', 'BEGINS', 'BELL', 'BETWEEN',
'BGCOLOR', 'BGC', 'BGCO', 'BGCOL', 'BGCOLO', 'BIG-ENDIAN', 'BINARY', 'BIND',
'BIND-WHERE', 'BLANK', 'BLOCK-ITERATION-DISPLAY', 'BORDER-BOTTOM-CHARS',
'BORDER-B', 'BORDER-BO', 'BORDER-BOT', 'BORDER-BOTT', 'BORDER-BOTTO',
'BORDER-BOTTOM-PIXELS', 'BORDER-BOTTOM-P', 'BORDER-BOTTOM-PI',
'BORDER-BOTTOM-PIX', 'BORDER-BOTTOM-PIXE', 'BORDER-BOTTOM-PIXEL',
'BORDER-LEFT-CHARS', 'BORDER-L', 'BORDER-LE', 'BORDER-LEF', 'BORDER-LEFT',
'BORDER-LEFT-', 'BORDER-LEFT-C', 'BORDER-LEFT-CH', 'BORDER-LEFT-CHA',
'BORDER-LEFT-CHAR', 'BORDER-LEFT-PIXELS', 'BORDER-LEFT-P', 'BORDER-LEFT-PI',
'BORDER-LEFT-PIX', 'BORDER-LEFT-PIXE', 'BORDER-LEFT-PIXEL',
'BORDER-RIGHT-CHARS', 'BORDER-R', 'BORDER-RI', 'BORDER-RIG', 'BORDER-RIGH',
'BORDER-RIGHT', 'BORDER-RIGHT-', 'BORDER-RIGHT-C', 'BORDER-RIGHT-CH',
'BORDER-RIGHT-CHA', 'BORDER-RIGHT-CHAR', 'BORDER-RIGHT-PIXELS',
'BORDER-RIGHT-P', 'BORDER-RIGHT-PI', 'BORDER-RIGHT-PIX',
'BORDER-RIGHT-PIXE', 'BORDER-RIGHT-PIXEL', 'BORDER-TOP-CHARS', 'BORDER-T',
'BORDER-TO', 'BORDER-TOP', 'BORDER-TOP-', 'BORDER-TOP-C', 'BORDER-TOP-CH',
'BORDER-TOP-CHA', 'BORDER-TOP-CHAR', 'BORDER-TOP-PIXELS', 'BORDER-TOP-P',
'BORDER-TOP-PI', 'BORDER-TOP-PIX', 'BORDER-TOP-PIXE', 'BORDER-TOP-PIXEL',
'BOX', 'BOX-SELECTABLE', 'BOX-SELECT', 'BOX-SELECTA', 'BOX-SELECTAB',
'BOX-SELECTABL', 'BREAK', 'BROWSE', 'BUFFER', 'BUFFER-CHARS',
'BUFFER-COMPARE', 'BUFFER-COPY', 'BUFFER-CREATE', 'BUFFER-DELETE',
'BUFFER-FIELD', 'BUFFER-HANDLE', 'BUFFER-LINES', 'BUFFER-NAME',
'BUFFER-RELEASE', 'BUFFER-VALUE', 'BUTTON', 'BUTTONS', 'BUTTON', 'BY',
'BY-POINTER', 'BY-VARIANT-POINTER', 'CACHE', 'CACHE-SIZE', 'CALL',
'CALL-NAME', 'CALL-TYPE', 'CANCEL-BREAK', 'CANCEL-BUTTON', 'CAN-CREATE',
'CAN-DELETE', 'CAN-DO', 'CAN-FIND', 'CAN-QUERY', 'CAN-READ', 'CAN-SET',
'CAN-WRITE', 'CAPS', 'CAREFUL-PAINT', 'CASE', 'CASE-SENSITIVE', 'CASE-SEN',
'CASE-SENS', 'CASE-SENSI', 'CASE-SENSIT', 'CASE-SENSITI', 'CASE-SENSITIV',
'CAST', 'CATCH', 'CDECL', 'CENTERED', 'CENTER', 'CENTERE', 'CHAINED',
'CHARACTER_LENGTH', 'CHARSET', 'CHECK', 'CHECKED', 'CHOOSE', 'CHR', 'CLASS',
'CLASS-TYPE', 'CLEAR', 'CLEAR-APPL-CONTEXT', 'CLEAR-LOG', 'CLEAR-SELECTION',
'CLEAR-SELECT', 'CLEAR-SELECTI', 'CLEAR-SELECTIO', 'CLEAR-SORT-ARROWS',
'CLEAR-SORT-ARROW', 'CLIENT-CONNECTION-ID', 'CLIENT-PRINCIPAL',
'CLIENT-TTY', 'CLIENT-TYPE', 'CLIENT-WORKSTATION', 'CLIPBOARD', 'CLOSE',
'CLOSE-LOG', 'CODE', 'CODEBASE-LOCATOR', 'CODEPAGE', 'CODEPAGE-CONVERT',
'COLLATE', 'COL-OF', 'COLON', 'COLON-ALIGNED', 'COLON-ALIGN',
'COLON-ALIGNE', 'COLOR', 'COLOR-TABLE', 'COLUMN', 'COL', 'COLU', 'COLUM',
'COLUMN-BGCOLOR', 'COLUMN-DCOLOR', 'COLUMN-FGCOLOR', 'COLUMN-FONT',
'COLUMN-LABEL', 'COLUMN-LAB', 'COLUMN-LABE', 'COLUMN-MOVABLE', 'COLUMN-OF',
'COLUMN-PFCOLOR', 'COLUMN-READ-ONLY', 'COLUMN-RESIZABLE', 'COLUMNS',
'COLUMN-SCROLLING', 'COMBO-BOX', 'COMMAND', 'COMPARES', 'COMPILE',
'COMPILER', 'COMPLETE', 'COM-SELF', 'CONFIG-NAME', 'CONNECT', 'CONNECTED',
'CONSTRUCTOR', 'CONTAINS', 'CONTENTS', 'CONTEXT', 'CONTEXT-HELP',
'CONTEXT-HELP-FILE', 'CONTEXT-HELP-ID', 'CONTEXT-POPUP', 'CONTROL',
'CONTROL-BOX', 'CONTROL-FRAME', 'CONVERT', 'CONVERT-3D-COLORS',
'CONVERT-TO-OFFSET', 'CONVERT-TO-OFFS', 'CONVERT-TO-OFFSE', 'COPY-DATASET',
'COPY-LOB', 'COPY-SAX-ATTRIBUTES', 'COPY-TEMP-TABLE', 'COUNT', 'COUNT-OF',
'CPCASE', 'CPCOLL', 'CPINTERNAL', 'CPLOG', 'CPPRINT', 'CPRCODEIN',
'CPRCODEOUT', 'CPSTREAM', 'CPTERM', 'CRC-VALUE', 'CREATE', 'CREATE-LIKE',
'CREATE-LIKE-SEQUENTIAL', 'CREATE-NODE-NAMESPACE',
'CREATE-RESULT-LIST-ENTRY', 'CREATE-TEST-FILE', 'CURRENT', 'CURRENT_DATE',
'CURRENT_DATE', 'CURRENT-CHANGED', 'CURRENT-COLUMN', 'CURRENT-ENVIRONMENT',
'CURRENT-ENV', 'CURRENT-ENVI', 'CURRENT-ENVIR', 'CURRENT-ENVIRO',
'CURRENT-ENVIRON', 'CURRENT-ENVIRONM', 'CURRENT-ENVIRONME',
'CURRENT-ENVIRONMEN', 'CURRENT-ITERATION', 'CURRENT-LANGUAGE',
'CURRENT-LANG', 'CURRENT-LANGU', 'CURRENT-LANGUA', 'CURRENT-LANGUAG',
'CURRENT-QUERY', 'CURRENT-RESULT-ROW', 'CURRENT-ROW-MODIFIED',
'CURRENT-VALUE', 'CURRENT-WINDOW', 'CURSOR', 'CURS', 'CURSO', 'CURSOR-CHAR',
'CURSOR-LINE', 'CURSOR-OFFSET', 'DATABASE', 'DATA-BIND',
'DATA-ENTRY-RETURN', 'DATA-ENTRY-RET', 'DATA-ENTRY-RETU',
'DATA-ENTRY-RETUR', 'DATA-RELATION', 'DATA-REL', 'DATA-RELA', 'DATA-RELAT',
'DATA-RELATI', 'DATA-RELATIO', 'DATASERVERS', 'DATASET', 'DATASET-HANDLE',
'DATA-SOURCE', 'DATA-SOURCE-COMPLETE-MAP', 'DATA-SOURCE-MODIFIED',
'DATA-SOURCE-ROWID', 'DATA-TYPE', 'DATA-T', 'DATA-TY', 'DATA-TYP',
'DATE-FORMAT', 'DATE-F', 'DATE-FO', 'DATE-FOR', 'DATE-FORM', 'DATE-FORMA',
'DAY', 'DBCODEPAGE', 'DBCOLLATION', 'DBNAME', 'DBPARAM', 'DB-REFERENCES',
'DBRESTRICTIONS', 'DBREST', 'DBRESTR', 'DBRESTRI', 'DBRESTRIC',
'DBRESTRICT', 'DBRESTRICTI', 'DBRESTRICTIO', 'DBRESTRICTION', 'DBTASKID',
'DBTYPE', 'DBVERSION', 'DBVERS', 'DBVERSI', 'DBVERSIO', 'DCOLOR', 'DDE',
'DDE-ERROR', 'DDE-ID', 'DDE-I', 'DDE-ITEM', 'DDE-NAME', 'DDE-TOPIC',
'DEBLANK', 'DEBUG', 'DEBU', 'DEBUG-ALERT', 'DEBUGGER', 'DEBUG-LIST',
'DECIMALS', 'DECLARE', 'DECLARE-NAMESPACE', 'DECRYPT', 'DEFAULT',
'DEFAULT-BUFFER-HANDLE', 'DEFAULT-BUTTON', 'DEFAUT-B', 'DEFAUT-BU',
'DEFAUT-BUT', 'DEFAUT-BUTT', 'DEFAUT-BUTTO', 'DEFAULT-COMMIT',
'DEFAULT-EXTENSION', 'DEFAULT-EX', 'DEFAULT-EXT', 'DEFAULT-EXTE',
'DEFAULT-EXTEN', 'DEFAULT-EXTENS', 'DEFAULT-EXTENSI', 'DEFAULT-EXTENSIO',
'DEFAULT-NOXLATE', 'DEFAULT-NOXL', 'DEFAULT-NOXLA', 'DEFAULT-NOXLAT',
'DEFAULT-VALUE', 'DEFAULT-WINDOW', 'DEFINED', 'DEFINE-USER-EVENT-MANAGER',
'DELETE', 'DEL', 'DELE', 'DELET', 'DELETE-CHARACTER', 'DELETE-CHAR',
'DELETE-CHARA', 'DELETE-CHARAC', 'DELETE-CHARACT', 'DELETE-CHARACTE',
'DELETE-CURRENT-ROW', 'DELETE-LINE', 'DELETE-RESULT-LIST-ENTRY',
'DELETE-SELECTED-ROW', 'DELETE-SELECTED-ROWS', 'DELIMITER', 'DESC',
'DESCENDING', 'DESC', 'DESCE', 'DESCEN', 'DESCEND', 'DESCENDI', 'DESCENDIN',
'DESELECT-FOCUSED-ROW', 'DESELECTION', 'DESELECT-ROWS',
'DESELECT-SELECTED-ROW', 'DESTRUCTOR', 'DIALOG-BOX', 'DICTIONARY', 'DICT',
'DICTI', 'DICTIO', 'DICTION', 'DICTIONA', 'DICTIONAR', 'DIR', 'DISABLE',
'DISABLE-AUTO-ZAP', 'DISABLED', 'DISABLE-DUMP-TRIGGERS',
'DISABLE-LOAD-TRIGGERS', 'DISCONNECT', 'DISCON', 'DISCONN', 'DISCONNE',
'DISCONNEC', 'DISP', 'DISPLAY', 'DISP', 'DISPL', 'DISPLA',
'DISPLAY-MESSAGE', 'DISPLAY-TYPE', 'DISPLAY-T', 'DISPLAY-TY', 'DISPLAY-TYP',
'DISTINCT', 'DO', 'DOMAIN-DESCRIPTION', 'DOMAIN-NAME', 'DOMAIN-TYPE', 'DOS',
'DOUBLE', 'DOWN', 'DRAG-ENABLED', 'DROP', 'DROP-DOWN', 'DROP-DOWN-LIST',
'DROP-FILE-NOTIFY', 'DROP-TARGET', 'DUMP', 'DYNAMIC', 'DYNAMIC-FUNCTION',
'EACH', 'ECHO', 'EDGE-CHARS', 'EDGE', 'EDGE-', 'EDGE-C', 'EDGE-CH',
'EDGE-CHA', 'EDGE-CHAR', 'EDGE-PIXELS', 'EDGE-P', 'EDGE-PI', 'EDGE-PIX',
'EDGE-PIXE', 'EDGE-PIXEL', 'EDIT-CAN-PASTE', 'EDIT-CAN-UNDO', 'EDIT-CLEAR',
'EDIT-COPY', 'EDIT-CUT', 'EDITING', 'EDITOR', 'EDIT-PASTE', 'EDIT-UNDO',
'ELSE', 'EMPTY', 'EMPTY-TEMP-TABLE', 'ENABLE', 'ENABLED-FIELDS', 'ENCODE',
'ENCRYPT', 'ENCRYPT-AUDIT-MAC-KEY', 'ENCRYPTION-SALT', 'END',
'END-DOCUMENT', 'END-ELEMENT', 'END-EVENT-GROUP', 'END-FILE-DROP', 'ENDKEY',
'END-KEY', 'END-MOVE', 'END-RESIZE', 'END-ROW-RESIZE', 'END-USER-PROMPT',
'ENTERED', 'ENTRY', 'EQ', 'ERROR', 'ERROR-COLUMN', 'ERROR-COL',
'ERROR-COLU', 'ERROR-COLUM', 'ERROR-ROW', 'ERROR-STACK-TRACE',
'ERROR-STATUS', 'ERROR-STAT', 'ERROR-STATU', 'ESCAPE', 'ETIME',
'EVENT-GROUP-ID', 'EVENT-PROCEDURE', 'EVENT-PROCEDURE-CONTEXT', 'EVENTS',
'EVENT', 'EVENT-TYPE', 'EVENT-T', 'EVENT-TY', 'EVENT-TYP', 'EXCEPT',
'EXCLUSIVE-ID', 'EXCLUSIVE-LOCK', 'EXCLUSIVE', 'EXCLUSIVE-', 'EXCLUSIVE-L',
'EXCLUSIVE-LO', 'EXCLUSIVE-LOC', 'EXCLUSIVE-WEB-USER', 'EXECUTE', 'EXISTS',
'EXP', 'EXPAND', 'EXPANDABLE', 'EXPLICIT', 'EXPORT', 'EXPORT-PRINCIPAL',
'EXTENDED', 'EXTENT', 'EXTERNAL', 'FALSE', 'FETCH', 'FETCH-SELECTED-ROW',
'FGCOLOR', 'FGC', 'FGCO', 'FGCOL', 'FGCOLO', 'FIELD', 'FIELDS', 'FIELD',
'FILE', 'FILE-CREATE-DATE', 'FILE-CREATE-TIME', 'FILE-INFORMATION',
'FILE-INFO', 'FILE-INFOR', 'FILE-INFORM', 'FILE-INFORMA', 'FILE-INFORMAT',
'FILE-INFORMATI', 'FILE-INFORMATIO', 'FILE-MOD-DATE', 'FILE-MOD-TIME',
'FILENAME', 'FILE-NAME', 'FILE-OFFSET', 'FILE-OFF', 'FILE-OFFS',
'FILE-OFFSE', 'FILE-SIZE', 'FILE-TYPE', 'FILL', 'FILLED', 'FILL-IN',
'FILTERS', 'FINAL', 'FINALLY', 'FIND', 'FIND-BY-ROWID',
'FIND-CASE-SENSITIVE', 'FIND-CURRENT', 'FINDER', 'FIND-FIRST',
'FIND-GLOBAL', 'FIND-LAST', 'FIND-NEXT-OCCURRENCE', 'FIND-PREV-OCCURRENCE',
'FIND-SELECT', 'FIND-UNIQUE', 'FIND-WRAP-AROUND', 'FIRST',
'FIRST-ASYNCH-REQUEST', 'FIRST-CHILD', 'FIRST-COLUMN', 'FIRST-FORM',
'FIRST-OBJECT', 'FIRST-OF', 'FIRST-PROCEDURE', 'FIRST-PROC', 'FIRST-PROCE',
'FIRST-PROCED', 'FIRST-PROCEDU', 'FIRST-PROCEDUR', 'FIRST-SERVER',
'FIRST-TAB-ITEM', 'FIRST-TAB-I', 'FIRST-TAB-IT', 'FIRST-TAB-ITE',
'FIT-LAST-COLUMN', 'FIXED-ONLY', 'FLAT-BUTTON', 'FLOAT', 'FOCUS',
'FOCUSED-ROW', 'FOCUSED-ROW-SELECTED', 'FONT', 'FONT-TABLE', 'FOR',
'FORCE-FILE', 'FOREGROUND', 'FORE', 'FOREG', 'FOREGR', 'FOREGRO',
'FOREGROU', 'FOREGROUN', 'FORM', 'FORMAT', 'FORM', 'FORMA', 'FORMATTED',
'FORMATTE', 'FORM-LONG-INPUT', 'FORWARD', 'FORWARDS', 'FORWARD', 'FRAGMENT',
'FRAGMEN', 'FRAME', 'FRAM', 'FRAME-COL', 'FRAME-DB', 'FRAME-DOWN',
'FRAME-FIELD', 'FRAME-FILE', 'FRAME-INDEX', 'FRAME-INDE', 'FRAME-LINE',
'FRAME-NAME', 'FRAME-ROW', 'FRAME-SPACING', 'FRAME-SPA', 'FRAME-SPAC',
'FRAME-SPACI', 'FRAME-SPACIN', 'FRAME-VALUE', 'FRAME-VAL', 'FRAME-VALU',
'FRAME-X', 'FRAME-Y', 'FREQUENCY', 'FROM', 'FROM-CHARS', 'FROM-C',
'FROM-CH', 'FROM-CHA', 'FROM-CHAR', 'FROM-CURRENT', 'FROM-CUR', 'FROM-CURR',
'FROM-CURRE', 'FROM-CURREN', 'FROM-PIXELS', 'FROM-P', 'FROM-PI', 'FROM-PIX',
'FROM-PIXE', 'FROM-PIXEL', 'FULL-HEIGHT-CHARS', 'FULL-HEIGHT',
'FULL-HEIGHT-', 'FULL-HEIGHT-C', 'FULL-HEIGHT-CH', 'FULL-HEIGHT-CHA',
'FULL-HEIGHT-CHAR', 'FULL-HEIGHT-PIXELS', 'FULL-HEIGHT-P', 'FULL-HEIGHT-PI',
'FULL-HEIGHT-PIX', 'FULL-HEIGHT-PIXE', 'FULL-HEIGHT-PIXEL', 'FULL-PATHNAME',
'FULL-PATHN', 'FULL-PATHNA', 'FULL-PATHNAM', 'FULL-WIDTH-CHARS',
'FULL-WIDTH', 'FULL-WIDTH-', 'FULL-WIDTH-C', 'FULL-WIDTH-CH',
'FULL-WIDTH-CHA', 'FULL-WIDTH-CHAR', 'FULL-WIDTH-PIXELS', 'FULL-WIDTH-P',
'FULL-WIDTH-PI', 'FULL-WIDTH-PIX', 'FULL-WIDTH-PIXE', 'FULL-WIDTH-PIXEL',
'FUNCTION', 'FUNCTION-CALL-TYPE', 'GATEWAYS', 'GATEWAY', 'GE',
'GENERATE-MD5', 'GENERATE-PBE-KEY', 'GENERATE-PBE-SALT',
'GENERATE-RANDOM-KEY', 'GENERATE-UUID', 'GET', 'GET-ATTR-CALL-TYPE',
'GET-ATTRIBUTE-NODE', 'GET-BINARY-DATA', 'GET-BLUE-VALUE', 'GET-BLUE',
'GET-BLUE-', 'GET-BLUE-V', 'GET-BLUE-VA', 'GET-BLUE-VAL', 'GET-BLUE-VALU',
'GET-BROWSE-COLUMN', 'GET-BUFFER-HANDLEGETBYTE', 'GET-BYTE',
'GET-CALLBACK-PROC-CONTEXT', 'GET-CALLBACK-PROC-NAME', 'GET-CGI-LIST',
'GET-CGI-LONG-VALUE', 'GET-CGI-VALUE', 'GET-CODEPAGES', 'GET-COLLATIONS',
'GET-CONFIG-VALUE', 'GET-CURRENT', 'GET-DOUBLE', 'GET-DROPPED-FILE',
'GET-DYNAMIC', 'GET-ERROR-COLUMN', 'GET-ERROR-ROW', 'GET-FILE',
'GET-FILE-NAME', 'GET-FILE-OFFSET', 'GET-FILE-OFFSE', 'GET-FIRST',
'GET-FLOAT', 'GET-GREEN-VALUE', 'GET-GREEN', 'GET-GREEN-', 'GET-GREEN-V',
'GET-GREEN-VA', 'GET-GREEN-VAL', 'GET-GREEN-VALU',
'GET-INDEX-BY-NAMESPACE-NAME', 'GET-INDEX-BY-QNAME', 'GET-INT64',
'GET-ITERATION', 'GET-KEY-VALUE', 'GET-KEY-VAL', 'GET-KEY-VALU', 'GET-LAST',
'GET-LOCALNAME-BY-INDEX', 'GET-LONG', 'GET-MESSAGE', 'GET-NEXT',
'GET-NUMBER', 'GET-POINTER-VALUE', 'GET-PREV', 'GET-PRINTERS',
'GET-PROPERTY', 'GET-QNAME-BY-INDEX', 'GET-RED-VALUE', 'GET-RED',
'GET-RED-', 'GET-RED-V', 'GET-RED-VA', 'GET-RED-VAL', 'GET-RED-VALU',
'GET-REPOSITIONED-ROW', 'GET-RGB-VALUE', 'GET-SELECTED-WIDGET',
'GET-SELECTED', 'GET-SELECTED-', 'GET-SELECTED-W', 'GET-SELECTED-WI',
'GET-SELECTED-WID', 'GET-SELECTED-WIDG', 'GET-SELECTED-WIDGE', 'GET-SHORT',
'GET-SIGNATURE', 'GET-SIZE', 'GET-STRING', 'GET-TAB-ITEM',
'GET-TEXT-HEIGHT-CHARS', 'GET-TEXT-HEIGHT', 'GET-TEXT-HEIGHT-',
'GET-TEXT-HEIGHT-C', 'GET-TEXT-HEIGHT-CH', 'GET-TEXT-HEIGHT-CHA',
'GET-TEXT-HEIGHT-CHAR', 'GET-TEXT-HEIGHT-PIXELS', 'GET-TEXT-HEIGHT-P',
'GET-TEXT-HEIGHT-PI', 'GET-TEXT-HEIGHT-PIX', 'GET-TEXT-HEIGHT-PIXE',
'GET-TEXT-HEIGHT-PIXEL', 'GET-TEXT-WIDTH-CHARS', 'GET-TEXT-WIDTH',
'GET-TEXT-WIDTH-', 'GET-TEXT-WIDTH-C', 'GET-TEXT-WIDTH-CH',
'GET-TEXT-WIDTH-CHA', 'GET-TEXT-WIDTH-CHAR', 'GET-TEXT-WIDTH-PIXELS',
'GET-TEXT-WIDTH-P', 'GET-TEXT-WIDTH-PI', 'GET-TEXT-WIDTH-PIX',
'GET-TEXT-WIDTH-PIXE', 'GET-TEXT-WIDTH-PIXEL', 'GET-TYPE-BY-INDEX',
'GET-TYPE-BY-NAMESPACE-NAME', 'GET-TYPE-BY-QNAME', 'GET-UNSIGNED-LONG',
'GET-UNSIGNED-SHORT', 'GET-URI-BY-INDEX', 'GET-VALUE-BY-INDEX',
'GET-VALUE-BY-NAMESPACE-NAME', 'GET-VALUE-BY-QNAME', 'GET-WAIT-STATE',
'GLOBAL', 'GO-ON', 'GO-PENDING', 'GO-PEND', 'GO-PENDI', 'GO-PENDIN',
'GRANT', 'GRAPHIC-EDGE', 'GRAPHIC-E', 'GRAPHIC-ED', 'GRAPHIC-EDG',
'GRID-FACTOR-HORIZONTAL', 'GRID-FACTOR-H', 'GRID-FACTOR-HO',
'GRID-FACTOR-HOR', 'GRID-FACTOR-HORI', 'GRID-FACTOR-HORIZ',
'GRID-FACTOR-HORIZO', 'GRID-FACTOR-HORIZON', 'GRID-FACTOR-HORIZONT',
'GRID-FACTOR-HORIZONTA', 'GRID-FACTOR-VERTICAL', 'GRID-FACTOR-V',
'GRID-FACTOR-VE', 'GRID-FACTOR-VER', 'GRID-FACTOR-VERT', 'GRID-FACTOR-VERT',
'GRID-FACTOR-VERTI', 'GRID-FACTOR-VERTIC', 'GRID-FACTOR-VERTICA',
'GRID-SNAP', 'GRID-UNIT-HEIGHT-CHARS', 'GRID-UNIT-HEIGHT',
'GRID-UNIT-HEIGHT-', 'GRID-UNIT-HEIGHT-C', 'GRID-UNIT-HEIGHT-CH',
'GRID-UNIT-HEIGHT-CHA', 'GRID-UNIT-HEIGHT-PIXELS', 'GRID-UNIT-HEIGHT-P',
'GRID-UNIT-HEIGHT-PI', 'GRID-UNIT-HEIGHT-PIX', 'GRID-UNIT-HEIGHT-PIXE',
'GRID-UNIT-HEIGHT-PIXEL', 'GRID-UNIT-WIDTH-CHARS', 'GRID-UNIT-WIDTH',
'GRID-UNIT-WIDTH-', 'GRID-UNIT-WIDTH-C', 'GRID-UNIT-WIDTH-CH',
'GRID-UNIT-WIDTH-CHA', 'GRID-UNIT-WIDTH-CHAR', 'GRID-UNIT-WIDTH-PIXELS',
'GRID-UNIT-WIDTH-P', 'GRID-UNIT-WIDTH-PI', 'GRID-UNIT-WIDTH-PIX',
'GRID-UNIT-WIDTH-PIXE', 'GRID-UNIT-WIDTH-PIXEL', 'GRID-VISIBLE', 'GROUP',
'GT', 'GUID', 'HANDLER', 'HAS-RECORDS', 'HAVING', 'HEADER', 'HEIGHT-CHARS',
'HEIGHT', 'HEIGHT-', 'HEIGHT-C', 'HEIGHT-CH', 'HEIGHT-CHA', 'HEIGHT-CHAR',
'HEIGHT-PIXELS', 'HEIGHT-P', 'HEIGHT-PI', 'HEIGHT-PIX', 'HEIGHT-PIXE',
'HEIGHT-PIXEL', 'HELP', 'HEX-DECODE', 'HEX-ENCODE', 'HIDDEN', 'HIDE',
'HORIZONTAL', 'HORI', 'HORIZ', 'HORIZO', 'HORIZON', 'HORIZONT', 'HORIZONTA',
'HOST-BYTE-ORDER', 'HTML-CHARSET', 'HTML-END-OF-LINE', 'HTML-END-OF-PAGE',
'HTML-FRAME-BEGIN', 'HTML-FRAME-END', 'HTML-HEADER-BEGIN',
'HTML-HEADER-END', 'HTML-TITLE-BEGIN', 'HTML-TITLE-END', 'HWND', 'ICON',
'IF', 'IMAGE', 'IMAGE-DOWN', 'IMAGE-INSENSITIVE', 'IMAGE-SIZE',
'IMAGE-SIZE-CHARS', 'IMAGE-SIZE-C', 'IMAGE-SIZE-CH', 'IMAGE-SIZE-CHA',
'IMAGE-SIZE-CHAR', 'IMAGE-SIZE-PIXELS', 'IMAGE-SIZE-P', 'IMAGE-SIZE-PI',
'IMAGE-SIZE-PIX', 'IMAGE-SIZE-PIXE', 'IMAGE-SIZE-PIXEL', 'IMAGE-UP',
'IMMEDIATE-DISPLAY', 'IMPLEMENTS', 'IMPORT', 'IMPORT-PRINCIPAL', 'IN',
'INCREMENT-EXCLUSIVE-ID', 'INDEX', 'INDEXED-REPOSITION', 'INDEX-HINT',
'INDEX-INFORMATION', 'INDICATOR', 'INFORMATION', 'INFO', 'INFOR', 'INFORM',
'INFORMA', 'INFORMAT', 'INFORMATI', 'INFORMATIO', 'IN-HANDLE',
'INHERIT-BGCOLOR', 'INHERIT-BGC', 'INHERIT-BGCO', 'INHERIT-BGCOL',
'INHERIT-BGCOLO', 'INHERIT-FGCOLOR', 'INHERIT-FGC', 'INHERIT-FGCO',
'INHERIT-FGCOL', 'INHERIT-FGCOLO', 'INHERITS', 'INITIAL', 'INIT', 'INITI',
'INITIA', 'INITIAL-DIR', 'INITIAL-FILTER', 'INITIALIZE-DOCUMENT-TYPE',
'INITIATE', 'INNER-CHARS', 'INNER-LINES', 'INPUT', 'INPUT-OUTPUT',
'INPUT-O', 'INPUT-OU', 'INPUT-OUT', 'INPUT-OUTP', 'INPUT-OUTPU',
'INPUT-VALUE', 'INSERT', 'INSERT-ATTRIBUTE', 'INSERT-BACKTAB', 'INSERT-B',
'INSERT-BA', 'INSERT-BAC', 'INSERT-BACK', 'INSERT-BACKT', 'INSERT-BACKTA',
'INSERT-FILE', 'INSERT-ROW', 'INSERT-STRING', 'INSERT-TAB', 'INSERT-T',
'INSERT-TA', 'INTERFACE', 'INTERNAL-ENTRIES', 'INTO', 'INVOKE', 'IS',
'IS-ATTR-SPACE', 'IS-ATTR', 'IS-ATTR-', 'IS-ATTR-S', 'IS-ATTR-SP',
'IS-ATTR-SPA', 'IS-ATTR-SPAC', 'IS-CLASS', 'IS-CLAS', 'IS-LEAD-BYTE',
'IS-ATTR', 'IS-OPEN', 'IS-PARAMETER-SET', 'IS-ROW-SELECTED', 'IS-SELECTED',
'ITEM', 'ITEMS-PER-ROW', 'JOIN', 'JOIN-BY-SQLDB', 'KBLABEL',
'KEEP-CONNECTION-OPEN', 'KEEP-FRAME-Z-ORDER', 'KEEP-FRAME-Z',
'KEEP-FRAME-Z-', 'KEEP-FRAME-Z-O', 'KEEP-FRAME-Z-OR', 'KEEP-FRAME-Z-ORD',
'KEEP-FRAME-Z-ORDE', 'KEEP-MESSAGES', 'KEEP-SECURITY-CACHE',
'KEEP-TAB-ORDER', 'KEY', 'KEYCODE', 'KEY-CODE', 'KEYFUNCTION', 'KEYFUNC',
'KEYFUNCT', 'KEYFUNCTI', 'KEYFUNCTIO', 'KEY-FUNCTION', 'KEY-FUNC',
'KEY-FUNCT', 'KEY-FUNCTI', 'KEY-FUNCTIO', 'KEYLABEL', 'KEY-LABEL', 'KEYS',
'KEYWORD', 'KEYWORD-ALL', 'LABEL', 'LABEL-BGCOLOR', 'LABEL-BGC',
'LABEL-BGCO', 'LABEL-BGCOL', 'LABEL-BGCOLO', 'LABEL-DCOLOR', 'LABEL-DC',
'LABEL-DCO', 'LABEL-DCOL', 'LABEL-DCOLO', 'LABEL-FGCOLOR', 'LABEL-FGC',
'LABEL-FGCO', 'LABEL-FGCOL', 'LABEL-FGCOLO', 'LABEL-FONT', 'LABEL-PFCOLOR',
'LABEL-PFC', 'LABEL-PFCO', 'LABEL-PFCOL', 'LABEL-PFCOLO', 'LABELS',
'LANDSCAPE', 'LANGUAGES', 'LANGUAGE', 'LARGE', 'LARGE-TO-SMALL', 'LAST',
'LAST-ASYNCH-REQUEST', 'LAST-BATCH', 'LAST-CHILD', 'LAST-EVENT',
'LAST-EVEN', 'LAST-FORM', 'LASTKEY', 'LAST-KEY', 'LAST-OBJECT', 'LAST-OF',
'LAST-PROCEDURE', 'LAST-PROCE', 'LAST-PROCED', 'LAST-PROCEDU',
'LAST-PROCEDUR', 'LAST-SERVER', 'LAST-TAB-ITEM', 'LAST-TAB-I',
'LAST-TAB-IT', 'LAST-TAB-ITE', 'LC', 'LDBNAME', 'LE', 'LEAVE',
'LEFT-ALIGNED', 'LEFT-ALIGN', 'LEFT-ALIGNE', 'LEFT-TRIM', 'LENGTH',
'LIBRARY', 'LIKE', 'LIKE-SEQUENTIAL', 'LINE', 'LINE-COUNTER', 'LINE-COUNT',
'LINE-COUNTE', 'LIST-EVENTS', 'LISTING', 'LISTI', 'LISTIN',
'LIST-ITEM-PAIRS', 'LIST-ITEMS', 'LIST-PROPERTY-NAMES', 'LIST-QUERY-ATTRS',
'LIST-SET-ATTRS', 'LIST-WIDGETS', 'LITERAL-QUESTION', 'LITTLE-ENDIAN',
'LOAD', 'LOAD-DOMAINS', 'LOAD-ICON', 'LOAD-IMAGE', 'LOAD-IMAGE-DOWN',
'LOAD-IMAGE-INSENSITIVE', 'LOAD-IMAGE-UP', 'LOAD-MOUSE-POINTER',
'LOAD-MOUSE-P', 'LOAD-MOUSE-PO', 'LOAD-MOUSE-POI', 'LOAD-MOUSE-POIN',
'LOAD-MOUSE-POINT', 'LOAD-MOUSE-POINTE', 'LOAD-PICTURE', 'LOAD-SMALL-ICON',
'LOCAL-NAME', 'LOCATOR-COLUMN-NUMBER', 'LOCATOR-LINE-NUMBER',
'LOCATOR-PUBLIC-ID', 'LOCATOR-SYSTEM-ID', 'LOCATOR-TYPE', 'LOCKED',
'LOCK-REGISTRATION', 'LOG', 'LOG-AUDIT-EVENT', 'LOGIN-EXPIRATION-TIMESTAMP',
'LOGIN-HOST', 'LOGIN-STATE', 'LOG-MANAGER', 'LOGOUT', 'LOOKAHEAD', 'LOOKUP',
'LT', 'MACHINE-CLASS', 'MANDATORY', 'MANUAL-HIGHLIGHT', 'MAP',
'MARGIN-EXTRA', 'MARGIN-HEIGHT-CHARS', 'MARGIN-HEIGHT', 'MARGIN-HEIGHT-',
'MARGIN-HEIGHT-C', 'MARGIN-HEIGHT-CH', 'MARGIN-HEIGHT-CHA',
'MARGIN-HEIGHT-CHAR', 'MARGIN-HEIGHT-PIXELS', 'MARGIN-HEIGHT-P',
'MARGIN-HEIGHT-PI', 'MARGIN-HEIGHT-PIX', 'MARGIN-HEIGHT-PIXE',
'MARGIN-HEIGHT-PIXEL', 'MARGIN-WIDTH-CHARS', 'MARGIN-WIDTH',
'MARGIN-WIDTH-', 'MARGIN-WIDTH-C', 'MARGIN-WIDTH-CH', 'MARGIN-WIDTH-CHA',
'MARGIN-WIDTH-CHAR', 'MARGIN-WIDTH-PIXELS', 'MARGIN-WIDTH-P',
'MARGIN-WIDTH-PI', 'MARGIN-WIDTH-PIX', 'MARGIN-WIDTH-PIXE',
'MARGIN-WIDTH-PIXEL', 'MARK-NEW', 'MARK-ROW-STATE', 'MATCHES', 'MAX',
'MAX-BUTTON', 'MAX-CHARS', 'MAX-DATA-GUESS', 'MAX-HEIGHT',
'MAX-HEIGHT-CHARS', 'MAX-HEIGHT-C', 'MAX-HEIGHT-CH', 'MAX-HEIGHT-CHA',
'MAX-HEIGHT-CHAR', 'MAX-HEIGHT-PIXELS', 'MAX-HEIGHT-P', 'MAX-HEIGHT-PI',
'MAX-HEIGHT-PIX', 'MAX-HEIGHT-PIXE', 'MAX-HEIGHT-PIXEL', 'MAXIMIZE',
'MAXIMUM', 'MAX', 'MAXI', 'MAXIM', 'MAXIMU', 'MAXIMUM-LEVEL', 'MAX-ROWS',
'MAX-SIZE', 'MAX-VALUE', 'MAX-VAL', 'MAX-VALU', 'MAX-WIDTH',
'MAX-WIDTH-CHARS', 'MAX-WIDTH', 'MAX-WIDTH-', 'MAX-WIDTH-C', 'MAX-WIDTH-CH',
'MAX-WIDTH-CHA', 'MAX-WIDTH-CHAR', 'MAX-WIDTH-PIXELS', 'MAX-WIDTH-P',
'MAX-WIDTH-PI', 'MAX-WIDTH-PIX', 'MAX-WIDTH-PIXE', 'MAX-WIDTH-PIXEL',
'MD5-DIGEST', 'MEMBER', 'MEMPTR-TO-NODE-VALUE', 'MENU', 'MENUBAR',
'MENU-BAR', 'MENU-ITEM', 'MENU-KEY', 'MENU-K', 'MENU-KE', 'MENU-MOUSE',
'MENU-M', 'MENU-MO', 'MENU-MOU', 'MENU-MOUS', 'MERGE-BY-FIELD', 'MESSAGE',
'MESSAGE-AREA', 'MESSAGE-AREA-FONT', 'MESSAGE-LINES', 'METHOD', 'MIN',
'MIN-BUTTON', 'MIN-COLUMN-WIDTH-CHARS', 'MIN-COLUMN-WIDTH-C',
'MIN-COLUMN-WIDTH-CH', 'MIN-COLUMN-WIDTH-CHA', 'MIN-COLUMN-WIDTH-CHAR',
'MIN-COLUMN-WIDTH-PIXELS', 'MIN-COLUMN-WIDTH-P', 'MIN-COLUMN-WIDTH-PI',
'MIN-COLUMN-WIDTH-PIX', 'MIN-COLUMN-WIDTH-PIXE', 'MIN-COLUMN-WIDTH-PIXEL',
'MIN-HEIGHT-CHARS', 'MIN-HEIGHT', 'MIN-HEIGHT-', 'MIN-HEIGHT-C',
'MIN-HEIGHT-CH', 'MIN-HEIGHT-CHA', 'MIN-HEIGHT-CHAR', 'MIN-HEIGHT-PIXELS',
'MIN-HEIGHT-P', 'MIN-HEIGHT-PI', 'MIN-HEIGHT-PIX', 'MIN-HEIGHT-PIXE',
'MIN-HEIGHT-PIXEL', 'MINIMUM', 'MIN', 'MINI', 'MINIM', 'MINIMU', 'MIN-SIZE',
'MIN-VALUE', 'MIN-VAL', 'MIN-VALU', 'MIN-WIDTH-CHARS', 'MIN-WIDTH',
'MIN-WIDTH-', 'MIN-WIDTH-C', 'MIN-WIDTH-CH', 'MIN-WIDTH-CHA',
'MIN-WIDTH-CHAR', 'MIN-WIDTH-PIXELS', 'MIN-WIDTH-P', 'MIN-WIDTH-PI',
'MIN-WIDTH-PIX', 'MIN-WIDTH-PIXE', 'MIN-WIDTH-PIXEL', 'MODIFIED', 'MODULO',
'MOD', 'MODU', 'MODUL', 'MONTH', 'MOUSE', 'MOUSE-POINTER', 'MOUSE-P',
'MOUSE-PO', 'MOUSE-POI', 'MOUSE-POIN', 'MOUSE-POINT', 'MOUSE-POINTE',
'MOVABLE', 'MOVE-AFTER-TAB-ITEM', 'MOVE-AFTER', 'MOVE-AFTER-',
'MOVE-AFTER-T', 'MOVE-AFTER-TA', 'MOVE-AFTER-TAB', 'MOVE-AFTER-TAB-',
'MOVE-AFTER-TAB-I', 'MOVE-AFTER-TAB-IT', 'MOVE-AFTER-TAB-ITE',
'MOVE-BEFORE-TAB-ITEM', 'MOVE-BEFOR', 'MOVE-BEFORE', 'MOVE-BEFORE-',
'MOVE-BEFORE-T', 'MOVE-BEFORE-TA', 'MOVE-BEFORE-TAB', 'MOVE-BEFORE-TAB-',
'MOVE-BEFORE-TAB-I', 'MOVE-BEFORE-TAB-IT', 'MOVE-BEFORE-TAB-ITE',
'MOVE-COLUMN', 'MOVE-COL', 'MOVE-COLU', 'MOVE-COLUM', 'MOVE-TO-BOTTOM',
'MOVE-TO-B', 'MOVE-TO-BO', 'MOVE-TO-BOT', 'MOVE-TO-BOTT', 'MOVE-TO-BOTTO',
'MOVE-TO-EOF', 'MOVE-TO-TOP', 'MOVE-TO-T', 'MOVE-TO-TO', 'MPE',
'MULTI-COMPILE', 'MULTIPLE', 'MULTIPLE-KEY', 'MULTITASKING-INTERVAL',
'MUST-EXIST', 'NAME', 'NAMESPACE-PREFIX', 'NAMESPACE-URI', 'NATIVE', 'NE',
'NEEDS-APPSERVER-PROMPT', 'NEEDS-PROMPT', 'NEW', 'NEW-INSTANCE', 'NEW-ROW',
'NEXT', 'NEXT-COLUMN', 'NEXT-PROMPT', 'NEXT-ROWID', 'NEXT-SIBLING',
'NEXT-TAB-ITEM', 'NEXT-TAB-I', 'NEXT-TAB-IT', 'NEXT-TAB-ITE', 'NEXT-VALUE',
'NO', 'NO-APPLY', 'NO-ARRAY-MESSAGE', 'NO-ASSIGN', 'NO-ATTR-LIST',
'NO-ATTR', 'NO-ATTR-', 'NO-ATTR-L', 'NO-ATTR-LI', 'NO-ATTR-LIS',
'NO-ATTR-SPACE', 'NO-ATTR', 'NO-ATTR-', 'NO-ATTR-S', 'NO-ATTR-SP',
'NO-ATTR-SPA', 'NO-ATTR-SPAC', 'NO-AUTO-VALIDATE', 'NO-BIND-WHERE',
'NO-BOX', 'NO-CONSOLE', 'NO-CONVERT', 'NO-CONVERT-3D-COLORS',
'NO-CURRENT-VALUE', 'NO-DEBUG', 'NODE-VALUE-TO-MEMPTR', 'NO-DRAG',
'NO-ECHO', 'NO-EMPTY-SPACE', 'NO-ERROR', 'NO-FILL', 'NO-F', 'NO-FI',
'NO-FIL', 'NO-FOCUS', 'NO-HELP', 'NO-HIDE', 'NO-INDEX-HINT',
'NO-INHERIT-BGCOLOR', 'NO-INHERIT-BGC', 'NO-INHERIT-BGCO', 'LABEL-BGCOL',
'LABEL-BGCOLO', 'NO-INHERIT-FGCOLOR', 'NO-INHERIT-FGC', 'NO-INHERIT-FGCO',
'NO-INHERIT-FGCOL', 'NO-INHERIT-FGCOLO', 'NO-JOIN-BY-SQLDB', 'NO-LABELS',
'NO-LABE', 'NO-LOBS', 'NO-LOCK', 'NO-LOOKAHEAD', 'NO-MAP', 'NO-MESSAGE',
'NO-MES', 'NO-MESS', 'NO-MESSA', 'NO-MESSAG', 'NONAMESPACE-SCHEMA-LOCATION',
'NONE', 'NO-PAUSE', 'NO-PREFETCH', 'NO-PREFE', 'NO-PREFET', 'NO-PREFETC',
'NORMALIZE', 'NO-ROW-MARKERS', 'NO-SCROLLBAR-VERTICAL',
'NO-SEPARATE-CONNECTION', 'NO-SEPARATORS', 'NOT', 'NO-TAB-STOP',
'NOT-ACTIVE', 'NO-UNDERLINE', 'NO-UND', 'NO-UNDE', 'NO-UNDER', 'NO-UNDERL',
'NO-UNDERLI', 'NO-UNDERLIN', 'NO-UNDO', 'NO-VALIDATE', 'NO-VAL', 'NO-VALI',
'NO-VALID', 'NO-VALIDA', 'NO-VALIDAT', 'NOW', 'NO-WAIT', 'NO-WORD-WRAP',
'NULL', 'NUM-ALIASES', 'NUM-ALI', 'NUM-ALIA', 'NUM-ALIAS', 'NUM-ALIASE',
'NUM-BUFFERS', 'NUM-BUTTONS', 'NUM-BUT', 'NUM-BUTT', 'NUM-BUTTO',
'NUM-BUTTON', 'NUM-COLUMNS', 'NUM-COL', 'NUM-COLU', 'NUM-COLUM',
'NUM-COLUMN', 'NUM-COPIES', 'NUM-DBS', 'NUM-DROPPED-FILES', 'NUM-ENTRIES',
'NUMERIC', 'NUMERIC-FORMAT', 'NUMERIC-F', 'NUMERIC-FO', 'NUMERIC-FOR',
'NUMERIC-FORM', 'NUMERIC-FORMA', 'NUM-FIELDS', 'NUM-FORMATS', 'NUM-ITEMS',
'NUM-ITERATIONS', 'NUM-LINES', 'NUM-LOCKED-COLUMNS', 'NUM-LOCKED-COL',
'NUM-LOCKED-COLU', 'NUM-LOCKED-COLUM', 'NUM-LOCKED-COLUMN', 'NUM-MESSAGES',
'NUM-PARAMETERS', 'NUM-REFERENCES', 'NUM-REPLACED', 'NUM-RESULTS',
'NUM-SELECTED-ROWS', 'NUM-SELECTED-WIDGETS', 'NUM-SELECTED',
'NUM-SELECTED-', 'NUM-SELECTED-W', 'NUM-SELECTED-WI', 'NUM-SELECTED-WID',
'NUM-SELECTED-WIDG', 'NUM-SELECTED-WIDGE', 'NUM-SELECTED-WIDGET',
'NUM-TABS', 'NUM-TO-RETAIN', 'NUM-VISIBLE-COLUMNS', 'OCTET-LENGTH', 'OF',
'OFF', 'OK', 'OK-CANCEL', 'OLD', 'ON', 'ON-FRAME-BORDER', 'ON-FRAME',
'ON-FRAME-', 'ON-FRAME-B', 'ON-FRAME-BO', 'ON-FRAME-BOR', 'ON-FRAME-BORD',
'ON-FRAME-BORDE', 'OPEN', 'OPSYS', 'OPTION', 'OR', 'ORDERED-JOIN',
'ORDINAL', 'OS-APPEND', 'OS-COMMAND', 'OS-COPY', 'OS-CREATE-DIR',
'OS-DELETE', 'OS-DIR', 'OS-DRIVES', 'OS-DRIVE', 'OS-ERROR', 'OS-GETENV',
'OS-RENAME', 'OTHERWISE', 'OUTPUT', 'OVERLAY', 'OVERRIDE', 'OWNER', 'PAGE',
'PAGE-BOTTOM', 'PAGE-BOT', 'PAGE-BOTT', 'PAGE-BOTTO', 'PAGED',
'PAGE-NUMBER', 'PAGE-NUM', 'PAGE-NUMB', 'PAGE-NUMBE', 'PAGE-SIZE',
'PAGE-TOP', 'PAGE-WIDTH', 'PAGE-WID', 'PAGE-WIDT', 'PARAMETER', 'PARAM',
'PARAME', 'PARAMET', 'PARAMETE', 'PARENT', 'PARSE-STATUS', 'PARTIAL-KEY',
'PASCAL', 'PASSWORD-FIELD', 'PATHNAME', 'PAUSE', 'PBE-HASH-ALGORITHM',
'PBE-HASH-ALG', 'PBE-HASH-ALGO', 'PBE-HASH-ALGOR', 'PBE-HASH-ALGORI',
'PBE-HASH-ALGORIT', 'PBE-HASH-ALGORITH', 'PBE-KEY-ROUNDS', 'PDBNAME',
'PERSISTENT', 'PERSIST', 'PERSISTE', 'PERSISTEN',
'PERSISTENT-CACHE-DISABLED', 'PFCOLOR', 'PFC', 'PFCO', 'PFCOL', 'PFCOLO',
'PIXELS', 'PIXELS-PER-COLUMN', 'PIXELS-PER-COL', 'PIXELS-PER-COLU',
'PIXELS-PER-COLUM', 'PIXELS-PER-ROW', 'POPUP-MENU', 'POPUP-M', 'POPUP-ME',
'POPUP-MEN', 'POPUP-ONLY', 'POPUP-O', 'POPUP-ON', 'POPUP-ONL', 'PORTRAIT',
'POSITION', 'PRECISION', 'PREFER-DATASET', 'PREPARED', 'PREPARE-STRING',
'PREPROCESS', 'PREPROC', 'PREPROCE', 'PREPROCES', 'PRESELECT', 'PRESEL',
'PRESELE', 'PRESELEC', 'PREV', 'PREV-COLUMN', 'PREV-SIBLING',
'PREV-TAB-ITEM', 'PREV-TAB-I', 'PREV-TAB-IT', 'PREV-TAB-ITE', 'PRIMARY',
'PRINTER', 'PRINTER-CONTROL-HANDLE', 'PRINTER-HDC', 'PRINTER-NAME',
'PRINTER-PORT', 'PRINTER-SETUP', 'PRIVATE', 'PRIVATE-DATA', 'PRIVATE-D',
'PRIVATE-DA', 'PRIVATE-DAT', 'PRIVILEGES', 'PROCEDURE', 'PROCE', 'PROCED',
'PROCEDU', 'PROCEDUR', 'PROCEDURE-CALL-TYPE', 'PROCESS', 'PROC-HANDLE',
'PROC-HA', 'PROC-HAN', 'PROC-HAND', 'PROC-HANDL', 'PROC-STATUS', 'PROC-ST',
'PROC-STA', 'PROC-STAT', 'PROC-STATU', 'proc-text', 'proc-text-buffe',
'PROFILER', 'PROGRAM-NAME', 'PROGRESS', 'PROGRESS-SOURCE', 'PROGRESS-S',
'PROGRESS-SO', 'PROGRESS-SOU', 'PROGRESS-SOUR', 'PROGRESS-SOURC', 'PROMPT',
'PROMPT-FOR', 'PROMPT-F', 'PROMPT-FO', 'PROMSGS', 'PROPATH', 'PROPERTY',
'PROTECTED', 'PROVERSION', 'PROVERS', 'PROVERSI', 'PROVERSIO', 'PROXY',
'PROXY-PASSWORD', 'PROXY-USERID', 'PUBLIC', 'PUBLIC-ID', 'PUBLISH',
'PUBLISHED-EVENTS', 'PUT', 'PUTBYTE', 'PUT-BYTE', 'PUT-DOUBLE', 'PUT-FLOAT',
'PUT-INT64', 'PUT-KEY-VALUE', 'PUT-KEY-VAL', 'PUT-KEY-VALU', 'PUT-LONG',
'PUT-SHORT', 'PUT-STRING', 'PUT-UNSIGNED-LONG', 'QUERY', 'QUERY-CLOSE',
'QUERY-OFF-END', 'QUERY-OPEN', 'QUERY-PREPARE', 'QUERY-TUNING', 'QUESTION',
'QUIT', 'QUOTER', 'RADIO-BUTTONS', 'RADIO-SET', 'RANDOM', 'RAW-TRANSFER',
'RCODE-INFORMATION', 'RCODE-INFO', 'RCODE-INFOR', 'RCODE-INFORM',
'RCODE-INFORMA', 'RCODE-INFORMAT', 'RCODE-INFORMATI', 'RCODE-INFORMATIO',
'READ-AVAILABLE', 'READ-EXACT-NUM', 'READ-FILE', 'READKEY', 'READ-ONLY',
'READ-XML', 'READ-XMLSCHEMA', 'REAL', 'RECORD-LENGTH', 'RECTANGLE', 'RECT',
'RECTA', 'RECTAN', 'RECTANG', 'RECTANGL', 'RECURSIVE', 'REFERENCE-ONLY',
'REFRESH', 'REFRESHABLE', 'REFRESH-AUDIT-POLICY', 'REGISTER-DOMAIN',
'RELEASE', 'REMOTE', 'REMOVE-EVENTS-PROCEDURE', 'REMOVE-SUPER-PROCEDURE',
'REPEAT', 'REPLACE', 'REPLACE-SELECTION-TEXT', 'REPOSITION',
'REPOSITION-BACKWARD', 'REPOSITION-FORWARD', 'REPOSITION-MODE',
'REPOSITION-TO-ROW', 'REPOSITION-TO-ROWID', 'REQUEST', 'RESET', 'RESIZABLE',
'RESIZA', 'RESIZAB', 'RESIZABL', 'RESIZE', 'RESTART-ROW', 'RESTART-ROWID',
'RETAIN', 'RETAIN-SHAPE', 'RETRY', 'RETRY-CANCEL', 'RETURN',
'RETURN-INSERTED', 'RETURN-INS', 'RETURN-INSE', 'RETURN-INSER',
'RETURN-INSERT', 'RETURN-INSERTE', 'RETURNS', 'RETURN-TO-START-DIR',
'RETURN-TO-START-DI', 'RETURN-VALUE', 'RETURN-VAL', 'RETURN-VALU',
'RETURN-VALUE-DATA-TYPE', 'REVERSE-FROM', 'REVERT', 'REVOKE', 'RGB-VALUE',
'RIGHT-ALIGNED', 'RETURN-ALIGN', 'RETURN-ALIGNE', 'RIGHT-TRIM', 'R-INDEX',
'ROLES', 'ROUND', 'ROUTINE-LEVEL', 'ROW', 'ROW-HEIGHT-CHARS', 'HEIGHT',
'ROW-HEIGHT-PIXELS', 'HEIGHT-P', 'ROW-MARKERS', 'ROW-OF', 'ROW-RESIZABLE',
'RULE', 'RUN', 'RUN-PROCEDURE', 'SAVE', 'SAVE-AS', 'SAVE-FILE',
'SAX-COMPLETE', 'SAX-COMPLE', 'SAX-COMPLET', 'SAX-PARSE', 'SAX-PARSE-FIRST',
'SAX-PARSE-NEXT', 'SAX-PARSER-ERROR', 'SAX-RUNNING', 'SAX-UNINITIALIZED',
'SAX-WRITE-BEGIN', 'SAX-WRITE-COMPLETE', 'SAX-WRITE-CONTENT',
'SAX-WRITE-ELEMENT', 'SAX-WRITE-ERROR', 'SAX-WRITE-IDLE', 'SAX-WRITER',
'SAX-WRITE-TAG', 'SCHEMA', 'SCHEMA-LOCATION', 'SCHEMA-MARSHAL',
'SCHEMA-PATH', 'SCREEN', 'SCREEN-IO', 'SCREEN-LINES', 'SCREEN-VALUE',
'SCREEN-VAL', 'SCREEN-VALU', 'SCROLL', 'SCROLLABLE', 'SCROLLBAR-HORIZONTAL',
'SCROLLBAR-H', 'SCROLLBAR-HO', 'SCROLLBAR-HOR', 'SCROLLBAR-HORI',
'SCROLLBAR-HORIZ', 'SCROLLBAR-HORIZO', 'SCROLLBAR-HORIZON',
'SCROLLBAR-HORIZONT', 'SCROLLBAR-HORIZONTA', 'SCROLL-BARS',
'SCROLLBAR-VERTICAL', 'SCROLLBAR-V', 'SCROLLBAR-VE', 'SCROLLBAR-VER',
'SCROLLBAR-VERT', 'SCROLLBAR-VERTI', 'SCROLLBAR-VERTIC',
'SCROLLBAR-VERTICA', 'SCROLL-DELTA', 'SCROLLED-ROW-POSITION',
'SCROLLED-ROW-POS', 'SCROLLED-ROW-POSI', 'SCROLLED-ROW-POSIT',
'SCROLLED-ROW-POSITI', 'SCROLLED-ROW-POSITIO', 'SCROLLING', 'SCROLL-OFFSET',
'SCROLL-TO-CURRENT-ROW', 'SCROLL-TO-ITEM', 'SCROLL-TO-I', 'SCROLL-TO-IT',
'SCROLL-TO-ITE', 'SCROLL-TO-SELECTED-ROW', 'SDBNAME', 'SEAL',
'SEAL-TIMESTAMP', 'SEARCH', 'SEARCH-SELF', 'SEARCH-TARGET', 'SECTION',
'SECURITY-POLICY', 'SEEK', 'SELECT', 'SELECTABLE', 'SELECT-ALL', 'SELECTED',
'SELECT-FOCUSED-ROW', 'SELECTION', 'SELECTION-END', 'SELECTION-LIST',
'SELECTION-START', 'SELECTION-TEXT', 'SELECT-NEXT-ROW', 'SELECT-PREV-ROW',
'SELECT-ROW', 'SELF', 'SEND', 'send-sql-statement', 'send-sql', 'SENSITIVE',
'SEPARATE-CONNECTION', 'SEPARATOR-FGCOLOR', 'SEPARATORS', 'SERVER',
'SERVER-CONNECTION-BOUND', 'SERVER-CONNECTION-BOUND-REQUEST',
'SERVER-CONNECTION-CONTEXT', 'SERVER-CONNECTION-ID',
'SERVER-OPERATING-MODE', 'SESSION', 'SESSION-ID', 'SET', 'SET-APPL-CONTEXT',
'SET-ATTR-CALL-TYPE', 'SET-ATTRIBUTE-NODE', 'SET-BLUE-VALUE', 'SET-BLUE',
'SET-BLUE-', 'SET-BLUE-V', 'SET-BLUE-VA', 'SET-BLUE-VAL', 'SET-BLUE-VALU',
'SET-BREAK', 'SET-BUFFERS', 'SET-CALLBACK', 'SET-CLIENT', 'SET-COMMIT',
'SET-CONTENTS', 'SET-CURRENT-VALUE', 'SET-DB-CLIENT', 'SET-DYNAMIC',
'SET-EVENT-MANAGER-OPTION', 'SET-GREEN-VALUE', 'SET-GREEN', 'SET-GREEN-',
'SET-GREEN-V', 'SET-GREEN-VA', 'SET-GREEN-VAL', 'SET-GREEN-VALU',
'SET-INPUT-SOURCE', 'SET-OPTION', 'SET-OUTPUT-DESTINATION', 'SET-PARAMETER',
'SET-POINTER-VALUE', 'SET-PROPERTY', 'SET-RED-VALUE', 'SET-RED', 'SET-RED-',
'SET-RED-V', 'SET-RED-VA', 'SET-RED-VAL', 'SET-RED-VALU',
'SET-REPOSITIONED-ROW', 'SET-RGB-VALUE', 'SET-ROLLBACK', 'SET-SELECTION',
'SET-SIZE', 'SET-SORT-ARROW', 'SETUSERID', 'SETUSER', 'SETUSERI',
'SET-WAIT-STATE', 'SHA1-DIGEST', 'SHARED', 'SHARE-LOCK', 'SHARE', 'SHARE-',
'SHARE-L', 'SHARE-LO', 'SHARE-LOC', 'SHOW-IN-TASKBAR', 'SHOW-STATS',
'SHOW-STAT', 'SIDE-LABEL-HANDLE', 'SIDE-LABEL-H', 'SIDE-LABEL-HA',
'SIDE-LABEL-HAN', 'SIDE-LABEL-HAND', 'SIDE-LABEL-HANDL', 'SIDE-LABELS',
'SIDE-LAB', 'SIDE-LABE', 'SIDE-LABEL', 'SILENT', 'SIMPLE', 'SINGLE', 'SIZE',
'SIZE-CHARS', 'SIZE-C', 'SIZE-CH', 'SIZE-CHA', 'SIZE-CHAR', 'SIZE-PIXELS',
'SIZE-P', 'SIZE-PI', 'SIZE-PIX', 'SIZE-PIXE', 'SIZE-PIXEL', 'SKIP',
'SKIP-DELETED-RECORD', 'SLIDER', 'SMALL-ICON', 'SMALLINT', 'SMALL-TITLE',
'SOME', 'SORT', 'SORT-ASCENDING', 'SORT-NUMBER', 'SOURCE',
'SOURCE-PROCEDURE', 'SPACE', 'SQL', 'SQRT', 'SSL-SERVER-NAME', 'STANDALONE',
'START', 'START-DOCUMENT', 'START-ELEMENT', 'START-MOVE', 'START-RESIZE',
'START-ROW-RESIZE', 'STATE-DETAIL', 'STATIC', 'STATUS', 'STATUS-AREA',
'STATUS-AREA-FONT', 'STDCALL', 'STOP', 'STOP-PARSING', 'STOPPED', 'STOPPE',
'STORED-PROCEDURE', 'STORED-PROC', 'STORED-PROCE', 'STORED-PROCED',
'STORED-PROCEDU', 'STORED-PROCEDUR', 'STREAM', 'STREAM-HANDLE', 'STREAM-IO',
'STRETCH-TO-FIT', 'STRICT', 'STRING', 'STRING-VALUE', 'STRING-XREF',
'SUB-AVERAGE', 'SUB-AVE', 'SUB-AVER', 'SUB-AVERA', 'SUB-AVERAG',
'SUB-COUNT', 'SUB-MAXIMUM', 'SUM-MAX', 'SUM-MAXI', 'SUM-MAXIM',
'SUM-MAXIMU', 'SUB-MENU', 'SUBSUB-', 'MINIMUM', 'SUB-MIN', 'SUBSCRIBE',
'SUBSTITUTE', 'SUBST', 'SUBSTI', 'SUBSTIT', 'SUBSTITU', 'SUBSTITUT',
'SUBSTRING', 'SUBSTR', 'SUBSTRI', 'SUBSTRIN', 'SUB-TOTAL', 'SUBTYPE', 'SUM',
'SUPER', 'SUPER-PROCEDURES', 'SUPPRESS-NAMESPACE-PROCESSING',
'SUPPRESS-WARNINGS', 'SUPPRESS-W', 'SUPPRESS-WA', 'SUPPRESS-WAR',
'SUPPRESS-WARN', 'SUPPRESS-WARNI', 'SUPPRESS-WARNIN', 'SUPPRESS-WARNING',
'SYMMETRIC-ENCRYPTION-ALGORITHM', 'SYMMETRIC-ENCRYPTION-IV',
'SYMMETRIC-ENCRYPTION-KEY', 'SYMMETRIC-SUPPORT', 'SYSTEM-ALERT-BOXES',
'SYSTEM-ALERT', 'SYSTEM-ALERT-', 'SYSTEM-ALERT-B', 'SYSTEM-ALERT-BO',
'SYSTEM-ALERT-BOX', 'SYSTEM-ALERT-BOXE', 'SYSTEM-DIALOG', 'SYSTEM-HELP',
'SYSTEM-ID', 'TABLE', 'TABLE-HANDLE', 'TABLE-NUMBER', 'TAB-POSITION',
'TAB-STOP', 'TARGET', 'TARGET-PROCEDURE', 'TEMP-DIRECTORY', 'TEMP-DIR',
'TEMP-DIRE', 'TEMP-DIREC', 'TEMP-DIRECT', 'TEMP-DIRECTO', 'TEMP-DIRECTOR',
'TEMP-TABLE', 'TEMP-TABLE-PREPARE', 'TERM', 'TERMINAL', 'TERM', 'TERMI',
'TERMIN', 'TERMINA', 'TERMINATE', 'TEXT', 'TEXT-CURSOR', 'TEXT-SEG-GROW',
'TEXT-SELECTED', 'THEN', 'THIS-OBJECT', 'THIS-PROCEDURE', 'THREE-D',
'THROW', 'THROUGH', 'THRU', 'TIC-MARKS', 'TIME', 'TIME-SOURCE', 'TITLE',
'TITLE-BGCOLOR', 'TITLE-BGC', 'TITLE-BGCO', 'TITLE-BGCOL', 'TITLE-BGCOLO',
'TITLE-DCOLOR', 'TITLE-DC', 'TITLE-DCO', 'TITLE-DCOL', 'TITLE-DCOLO',
'TITLE-FGCOLOR', 'TITLE-FGC', 'TITLE-FGCO', 'TITLE-FGCOL', 'TITLE-FGCOLO',
'TITLE-FONT', 'TITLE-FO', 'TITLE-FON', 'TO', 'TODAY', 'TOGGLE-BOX',
'TOOLTIP', 'TOOLTIPS', 'TOPIC', 'TOP-NAV-QUERY', 'TOP-ONLY', 'TO-ROWID',
'TOTAL', 'TRAILING', 'TRANS', 'TRANSACTION', 'TRANSACTION-MODE',
'TRANS-INIT-PROCEDURE', 'TRANSPARENT', 'TRIGGER', 'TRIGGERS', 'TRIM',
'TRUE', 'TRUNCATE', 'TRUNC', 'TRUNCA', 'TRUNCAT', 'TYPE', 'TYPE-OF',
'UNBOX', 'UNBUFFERED', 'UNBUFF', 'UNBUFFE', 'UNBUFFER', 'UNBUFFERE',
'UNDERLINE', 'UNDERL', 'UNDERLI', 'UNDERLIN', 'UNDO', 'UNFORMATTED',
'UNFORM', 'UNFORMA', 'UNFORMAT', 'UNFORMATT', 'UNFORMATTE', 'UNION',
'UNIQUE', 'UNIQUE-ID', 'UNIQUE-MATCH', 'UNIX', 'UNLESS-HIDDEN', 'UNLOAD',
'UNSIGNED-LONG', 'UNSUBSCRIBE', 'UP', 'UPDATE', 'UPDATE-ATTRIBUTE', 'URL',
'URL-DECODE', 'URL-ENCODE', 'URL-PASSWORD', 'URL-USERID', 'USE',
'USE-DICT-EXPS', 'USE-FILENAME', 'USE-INDEX', 'USER', 'USE-REVVIDEO',
'USERID', 'USER-ID', 'USE-TEXT', 'USE-UNDERLINE', 'USE-WIDGET-POOL',
'USING', 'V6DISPLAY', 'V6FRAME', 'VALIDATE', 'VALIDATE-EXPRESSION',
'VALIDATE-MESSAGE', 'VALIDATE-SEAL', 'VALIDATION-ENABLED', 'VALID-EVENT',
'VALID-HANDLE', 'VALID-OBJECT', 'VALUE', 'VALUE-CHANGED', 'VALUES',
'VARIABLE', 'VAR', 'VARI', 'VARIA', 'VARIAB', 'VARIABL', 'VERBOSE',
'VERSION', 'VERTICAL', 'VERT', 'VERTI', 'VERTIC', 'VERTICA', 'VIEW',
'VIEW-AS', 'VIEW-FIRST-COLUMN-ON-REOPEN', 'VIRTUAL-HEIGHT-CHARS',
'VIRTUAL-HEIGHT', 'VIRTUAL-HEIGHT-', 'VIRTUAL-HEIGHT-C',
'VIRTUAL-HEIGHT-CH', 'VIRTUAL-HEIGHT-CHA', 'VIRTUAL-HEIGHT-CHAR',
'VIRTUAL-HEIGHT-PIXELS', 'VIRTUAL-HEIGHT-P', 'VIRTUAL-HEIGHT-PI',
'VIRTUAL-HEIGHT-PIX', 'VIRTUAL-HEIGHT-PIXE', 'VIRTUAL-HEIGHT-PIXEL',
'VIRTUAL-WIDTH-CHARS', 'VIRTUAL-WIDTH', 'VIRTUAL-WIDTH-', 'VIRTUAL-WIDTH-C',
'VIRTUAL-WIDTH-CH', 'VIRTUAL-WIDTH-CHA', 'VIRTUAL-WIDTH-CHAR',
'VIRTUAL-WIDTH-PIXELS', 'VIRTUAL-WIDTH-P', 'VIRTUAL-WIDTH-PI',
'VIRTUAL-WIDTH-PIX', 'VIRTUAL-WIDTH-PIXE', 'VIRTUAL-WIDTH-PIXEL', 'VISIBLE',
'VOID', 'WAIT', 'WAIT-FOR', 'WARNING', 'WEB-CONTEXT', 'WEEKDAY', 'WHEN',
'WHERE', 'WHILE', 'WIDGET', 'WIDGET-ENTER', 'WIDGET-E', 'WIDGET-EN',
'WIDGET-ENT', 'WIDGET-ENTE', 'WIDGET-ID', 'WIDGET-LEAVE', 'WIDGET-L',
'WIDGET-LE', 'WIDGET-LEA', 'WIDGET-LEAV', 'WIDGET-POOL', 'WIDTH',
'WIDTH-CHARS', 'WIDTH', 'WIDTH-', 'WIDTH-C', 'WIDTH-CH', 'WIDTH-CHA',
'WIDTH-CHAR', 'WIDTH-PIXELS', 'WIDTH-P', 'WIDTH-PI', 'WIDTH-PIX',
'WIDTH-PIXE', 'WIDTH-PIXEL', 'WINDOW', 'WINDOW-MAXIMIZED', 'WINDOW-MAXIM',
'WINDOW-MAXIMI', 'WINDOW-MAXIMIZ', 'WINDOW-MAXIMIZE', 'WINDOW-MINIMIZED',
'WINDOW-MINIM', 'WINDOW-MINIMI', 'WINDOW-MINIMIZ', 'WINDOW-MINIMIZE',
'WINDOW-NAME', 'WINDOW-NORMAL', 'WINDOW-STATE', 'WINDOW-STA', 'WINDOW-STAT',
'WINDOW-SYSTEM', 'WITH', 'WORD-INDEX', 'WORD-WRAP',
'WORK-AREA-HEIGHT-PIXELS', 'WORK-AREA-WIDTH-PIXELS', 'WORK-AREA-X',
'WORK-AREA-Y', 'WORKFILE', 'WORK-TABLE', 'WORK-TAB', 'WORK-TABL', 'WRITE',
'WRITE-CDATA', 'WRITE-CHARACTERS', 'WRITE-COMMENT', 'WRITE-DATA-ELEMENT',
'WRITE-EMPTY-ELEMENT', 'WRITE-ENTITY-REF', 'WRITE-EXTERNAL-DTD',
'WRITE-FRAGMENT', 'WRITE-MESSAGE', 'WRITE-PROCESSING-INSTRUCTION',
'WRITE-STATUS', 'WRITE-XML', 'WRITE-XMLSCHEMA', 'X', 'XCODE',
'XML-DATA-TYPE', 'XML-NODE-TYPE', 'XML-SCHEMA-PATH',
'XML-SUPPRESS-NAMESPACE-PROCESSING', 'X-OF', 'XREF', 'XREF-XML', 'Y',
'YEAR', 'YEAR-OFFSET', 'YES', 'YES-NO', 'YES-NO-CANCEL', 'Y-OF'
]
| mit | -207,581,071,209,386,300 | 71.350534 | 80 | 0.626104 | false |
ktan2020/legacy-automation | win/Lib/site-packages/requests/packages/charade/latin1prober.py | 50 | 5387 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe
from .compat import wrap_ord
FREQ_CAT_NUM = 4
UDF = 0 # undefined
OTH = 1 # other
ASC = 2 # ascii capital letter
ASS = 3 # ascii small letter
ACV = 4 # accent capital vowel
ACO = 5 # accent capital other
ASV = 6 # accent small vowel
ASO = 7 # accent small other
CLASS_NUM = 8 # total classes
Latin1_CharToClass = (
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
)
# 0 : illegal
# 1 : very unlikely
# 2 : normal
# 3 : very likely
Latin1ClassModel = (
# UDF OTH ASC ASS ACV ACO ASV ASO
0, 0, 0, 0, 0, 0, 0, 0, # UDF
0, 3, 3, 3, 3, 3, 3, 3, # OTH
0, 3, 3, 3, 3, 3, 3, 3, # ASC
0, 3, 3, 3, 1, 1, 3, 3, # ASS
0, 3, 3, 3, 1, 2, 1, 2, # ACV
0, 3, 3, 3, 3, 3, 3, 3, # ACO
0, 3, 1, 3, 1, 1, 1, 3, # ASV
0, 3, 1, 3, 1, 1, 3, 3, # ASO
)
class Latin1Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self.reset()
def reset(self):
self._mLastCharClass = OTH
self._mFreqCounter = [0] * FREQ_CAT_NUM
CharSetProber.reset(self)
def get_charset_name(self):
return "windows-1252"
def feed(self, aBuf):
aBuf = self.filter_with_english_letters(aBuf)
for c in aBuf:
charClass = Latin1_CharToClass[wrap_ord(c)]
freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM)
+ charClass]
if freq == 0:
self._mState = eNotMe
break
self._mFreqCounter[freq] += 1
self._mLastCharClass = charClass
return self.get_state()
def get_confidence(self):
if self.get_state() == eNotMe:
return 0.01
total = sum(self._mFreqCounter)
if total < 0.01:
confidence = 0.0
else:
confidence = ((float(self._mFreqCounter[3]) / total)
- (self._mFreqCounter[1] * 20.0 / total))
if confidence < 0.0:
confidence = 0.0
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence = confidence * 0.5
return confidence
| mit | -4,621,126,835,872,517,000 | 36.755396 | 70 | 0.54483 | false |
pschmitt/home-assistant | homeassistant/components/water_heater/reproduce_state.py | 16 | 3681 | """Reproduce an Water heater state."""
import asyncio
import logging
from typing import Any, Dict, Iterable, Optional
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import Context, State
from homeassistant.helpers.typing import HomeAssistantType
from . import (
ATTR_AWAY_MODE,
ATTR_OPERATION_MODE,
ATTR_TEMPERATURE,
DOMAIN,
SERVICE_SET_AWAY_MODE,
SERVICE_SET_OPERATION_MODE,
SERVICE_SET_TEMPERATURE,
STATE_ECO,
STATE_ELECTRIC,
STATE_GAS,
STATE_HEAT_PUMP,
STATE_HIGH_DEMAND,
STATE_PERFORMANCE,
)
_LOGGER = logging.getLogger(__name__)
VALID_STATES = {
STATE_ECO,
STATE_ELECTRIC,
STATE_GAS,
STATE_HEAT_PUMP,
STATE_HIGH_DEMAND,
STATE_OFF,
STATE_ON,
STATE_PERFORMANCE,
}
async def _async_reproduce_state(
hass: HomeAssistantType,
state: State,
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce a single state."""
cur_state = hass.states.get(state.entity_id)
if cur_state is None:
_LOGGER.warning("Unable to find entity %s", state.entity_id)
return
if state.state not in VALID_STATES:
_LOGGER.warning(
"Invalid state specified for %s: %s", state.entity_id, state.state
)
return
# Return if we are already at the right state.
if (
cur_state.state == state.state
and cur_state.attributes.get(ATTR_TEMPERATURE)
== state.attributes.get(ATTR_TEMPERATURE)
and cur_state.attributes.get(ATTR_AWAY_MODE)
== state.attributes.get(ATTR_AWAY_MODE)
):
return
service_data = {ATTR_ENTITY_ID: state.entity_id}
if state.state != cur_state.state:
if state.state == STATE_ON:
service = SERVICE_TURN_ON
elif state.state == STATE_OFF:
service = SERVICE_TURN_OFF
else:
service = SERVICE_SET_OPERATION_MODE
service_data[ATTR_OPERATION_MODE] = state.state
await hass.services.async_call(
DOMAIN, service, service_data, context=context, blocking=True
)
if (
state.attributes.get(ATTR_TEMPERATURE)
!= cur_state.attributes.get(ATTR_TEMPERATURE)
and state.attributes.get(ATTR_TEMPERATURE) is not None
):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: state.entity_id,
ATTR_TEMPERATURE: state.attributes.get(ATTR_TEMPERATURE),
},
context=context,
blocking=True,
)
if (
state.attributes.get(ATTR_AWAY_MODE) != cur_state.attributes.get(ATTR_AWAY_MODE)
and state.attributes.get(ATTR_AWAY_MODE) is not None
):
await hass.services.async_call(
DOMAIN,
SERVICE_SET_AWAY_MODE,
{
ATTR_ENTITY_ID: state.entity_id,
ATTR_AWAY_MODE: state.attributes.get(ATTR_AWAY_MODE),
},
context=context,
blocking=True,
)
async def async_reproduce_states(
hass: HomeAssistantType,
states: Iterable[State],
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce Water heater states."""
await asyncio.gather(
*(
_async_reproduce_state(
hass, state, context=context, reproduce_options=reproduce_options
)
for state in states
)
)
| apache-2.0 | -6,793,566,889,694,532,000 | 25.673913 | 88 | 0.601195 | false |
wolfram74/numerical_methods_iserles_notes | venv/lib/python2.7/site-packages/numpy/testing/noseclasses.py | 76 | 14350 | # These classes implement a doctest runner plugin for nose, a "known failure"
# error class, and a customized TestProgram for NumPy.
# Because this module imports nose directly, it should not
# be used except by nosetester.py to avoid a general NumPy
# dependency on nose.
from __future__ import division, absolute_import, print_function
import os
import doctest
import nose
from nose.plugins import doctests as npd
from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
from nose.plugins.base import Plugin
from nose.util import src
import numpy
from .nosetester import get_package_name
import inspect
# Some of the classes in this module begin with 'Numpy' to clearly distinguish
# them from the plethora of very similar names from nose/unittest/doctest
#-----------------------------------------------------------------------------
# Modified version of the one in the stdlib, that fixes a python bug (doctests
# not found in extension modules, http://bugs.python.org/issue3158)
class NumpyDocTestFinder(doctest.DocTestFinder):
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
#print '_fm C1' # dbg
return True
elif inspect.isfunction(object):
#print '_fm C2' # dbg
return module.__dict__ is object.__globals__
elif inspect.isbuiltin(object):
#print '_fm C2-1' # dbg
return module.__name__ == object.__module__
elif inspect.isclass(object):
#print '_fm C3' # dbg
return module.__name__ == object.__module__
elif inspect.ismethod(object):
# This one may be a bug in cython that fails to correctly set the
# __module__ attribute of methods, but since the same error is easy
# to make by extension code writers, having this safety in place
# isn't such a bad idea
#print '_fm C3-1' # dbg
return module.__name__ == object.__self__.__class__.__module__
elif inspect.getmodule(object) is not None:
#print '_fm C4' # dbg
#print 'C4 mod',module,'obj',object # dbg
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
#print '_fm C5' # dbg
return module.__name__ == object.__module__
elif isinstance(object, property):
#print '_fm C6' # dbg
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
doctest.DocTestFinder._find(self, tests, obj, name, module,
source_lines, globs, seen)
# Below we re-run pieces of the above method with manual modifications,
# because the original code is buggy and fails to correctly identify
# doctests in extension modules.
# Local shorthands
from inspect import isroutine, isclass, ismodule, isfunction, \
ismethod
# Look for tests in a module's contained objects.
if ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname1 = '%s.%s' % (name, valname)
if ( (isroutine(val) or isclass(val))
and self._from_module(module, val) ):
self._find(tests, val, valname1, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if isclass(obj) and self._recurse:
#print 'RECURSE into class:',obj # dbg
for valname, val in obj.__dict__.items():
#valname1 = '%s.%s' % (name, valname) # dbg
#print 'N',name,'VN:',valname,'val:',str(val)[:77] # dbg
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if ((isfunction(val) or isclass(val) or
ismethod(val) or isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# second-chance checker; if the default comparison doesn't
# pass, then see if the expected output string contains flags that
# tell us to ignore the output
class NumpyOutputChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
ret = doctest.OutputChecker.check_output(self, want, got,
optionflags)
if not ret:
if "#random" in want:
return True
# it would be useful to normalize endianness so that
# bigendian machines don't fail all the tests (and there are
# actually some bigendian examples in the doctests). Let's try
# making them all little endian
got = got.replace("'>", "'<")
want= want.replace("'>", "'<")
# try to normalize out 32 and 64 bit default int sizes
for sz in [4, 8]:
got = got.replace("'<i%d'"%sz, "int")
want= want.replace("'<i%d'"%sz, "int")
ret = doctest.OutputChecker.check_output(self, want,
got, optionflags)
return ret
# Subclass nose.plugins.doctests.DocTestCase to work around a bug in
# its constructor that blocks non-default arguments from being passed
# down into doctest.DocTestCase
class NumpyDocTestCase(npd.DocTestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None, obj=None, result_var='_'):
self._result_var = result_var
self._nose_obj = obj
doctest.DocTestCase.__init__(self, test,
optionflags=optionflags,
setUp=setUp, tearDown=tearDown,
checker=checker)
print_state = numpy.get_printoptions()
class NumpyDoctest(npd.Doctest):
name = 'numpydoctest' # call nosetests with --with-numpydoctest
score = 1000 # load late, after doctest builtin
# always use whitespace and ellipsis options for doctests
doctest_optflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
# files that should be ignored for doctests
doctest_ignore = ['generate_numpy_api.py',
'setup.py']
# Custom classes; class variables to allow subclassing
doctest_case_class = NumpyDocTestCase
out_check_class = NumpyOutputChecker
test_finder_class = NumpyDocTestFinder
# Don't use the standard doctest option handler; hard-code the option values
def options(self, parser, env=os.environ):
Plugin.options(self, parser, env)
# Test doctests in 'test' files / directories. Standard plugin default
# is False
self.doctest_tests = True
# Variable name; if defined, doctest results stored in this variable in
# the top-level namespace. None is the standard default
self.doctest_result_var = None
def configure(self, options, config):
# parent method sets enabled flag from command line --with-numpydoctest
Plugin.configure(self, options, config)
self.finder = self.test_finder_class()
self.parser = doctest.DocTestParser()
if self.enabled:
# Pull standard doctest out of plugin list; there's no reason to run
# both. In practice the Unplugger plugin above would cover us when
# run from a standard numpy.test() call; this is just in case
# someone wants to run our plugin outside the numpy.test() machinery
config.plugins.plugins = [p for p in config.plugins.plugins
if p.name != 'doctest']
def set_test_context(self, test):
""" Configure `test` object to set test context
We set the numpy / scipy standard doctest namespace
Parameters
----------
test : test object
with ``globs`` dictionary defining namespace
Returns
-------
None
Notes
-----
`test` object modified in place
"""
# set the namespace for tests
pkg_name = get_package_name(os.path.dirname(test.filename))
# Each doctest should execute in an environment equivalent to
# starting Python and executing "import numpy as np", and,
# for SciPy packages, an additional import of the local
# package (so that scipy.linalg.basic.py's doctests have an
# implicit "from scipy import linalg" as well.
#
# Note: __file__ allows the doctest in NoseTester to run
# without producing an error
test.globs = {'__builtins__':__builtins__,
'__file__':'__main__',
'__name__':'__main__',
'np':numpy}
# add appropriate scipy import for SciPy tests
if 'scipy' in pkg_name:
p = pkg_name.split('.')
p2 = p[-1]
test.globs[p2] = __import__(pkg_name, test.globs, {}, [p2])
# Override test loading to customize test context (with set_test_context
# method), set standard docstring options, and install our own test output
# checker
def loadTestsFromModule(self, module):
if not self.matches(module.__name__):
npd.log.debug("Doctest doesn't want module %s", module)
return
try:
tests = self.finder.find(module)
except AttributeError:
# nose allows module.__test__ = False; doctest does not and
# throws AttributeError
return
if not tests:
return
tests.sort()
module_file = src(module.__file__)
for test in tests:
if not test.examples:
continue
if not test.filename:
test.filename = module_file
# Set test namespace; test altered in place
self.set_test_context(test)
yield self.doctest_case_class(test,
optionflags=self.doctest_optflags,
checker=self.out_check_class(),
result_var=self.doctest_result_var)
# Add an afterContext method to nose.plugins.doctests.Doctest in order
# to restore print options to the original state after each doctest
def afterContext(self):
numpy.set_printoptions(**print_state)
# Ignore NumPy-specific build files that shouldn't be searched for tests
def wantFile(self, file):
bn = os.path.basename(file)
if bn in self.doctest_ignore:
return False
return npd.Doctest.wantFile(self, file)
class Unplugger(object):
""" Nose plugin to remove named plugin late in loading
By default it removes the "doctest" plugin.
"""
name = 'unplugger'
enabled = True # always enabled
score = 4000 # load late in order to be after builtins
def __init__(self, to_unplug='doctest'):
self.to_unplug = to_unplug
def options(self, parser, env):
pass
def configure(self, options, config):
# Pull named plugin out of plugins list
config.plugins.plugins = [p for p in config.plugins.plugins
if p.name != self.to_unplug]
class KnownFailureTest(Exception):
'''Raise this exception to mark a test as a known failing test.'''
pass
class KnownFailure(ErrorClassPlugin):
'''Plugin that installs a KNOWNFAIL error class for the
KnownFailureClass exception. When KnownFailureTest is raised,
the exception will be logged in the knownfail attribute of the
result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
exception will not be counted as an error or failure.'''
enabled = True
knownfail = ErrorClass(KnownFailureTest,
label='KNOWNFAIL',
isfailure=False)
def options(self, parser, env=os.environ):
env_opt = 'NOSE_WITHOUT_KNOWNFAIL'
parser.add_option('--no-knownfail', action='store_true',
dest='noKnownFail', default=env.get(env_opt, False),
help='Disable special handling of KnownFailureTest '
'exceptions')
def configure(self, options, conf):
if not self.can_configure:
return
self.conf = conf
disable = getattr(options, 'noKnownFail', False)
if disable:
self.enabled = False
# Class allows us to save the results of the tests in runTests - see runTests
# method docstring for details
class NumpyTestProgram(nose.core.TestProgram):
def runTests(self):
"""Run Tests. Returns true on success, false on failure, and
sets self.success to the same value.
Because nose currently discards the test result object, but we need
to return it to the user, override TestProgram.runTests to retain
the result
"""
if self.testRunner is None:
self.testRunner = nose.core.TextTestRunner(stream=self.config.stream,
verbosity=self.config.verbosity,
config=self.config)
plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
if plug_runner is not None:
self.testRunner = plug_runner
self.result = self.testRunner.run(self.test)
self.success = self.result.wasSuccessful()
return self.success
| mit | 4,407,569,344,774,178,000 | 39.651558 | 87 | 0.588153 | false |
darius/mccarthy-to-bryant | problems.py | 2 | 1351 | """
Use BDDs to solve SAT problems from DIMACS files.
TODO: try the tableau method too
"""
import bddsat
import dimacs
import sat
# Some problems from http://toughsat.appspot.com/
filenames = ['problems/trivial.dimacs',
'problems/factoring6.dimacs',
'problems/factoring2.dimacs',
'problems/subsetsum_random.dimacs',
]
def main():
for filename in filenames:
print(filename)
_, problem = dimacs.load(filename)
solution = bddsat.solve(problem)
print(solution)
if solution is not None:
assert sat.is_satisfied(problem, solution)
print
## main()
#. problems/trivial.dimacs
#. {1: 1, 2: 1}
#.
#. problems/factoring6.dimacs
#. {1: 0, 2: 1, 3: 1, 4: 1, 5: 0, 6: 1, 7: 0, 8: 1, 9: 0, 10: 1, 11: 1, 12: 0, 13: 1, 14: 0}
#.
#. problems/factoring2.dimacs
#. {1: 0, 2: 1, 3: 0, 4: 1, 5: 1, 6: 0, 7: 0, 8: 1, 9: 0, 10: 1, 11: 0, 12: 0, 13: 1, 14: 0, 15: 0, 16: 0, 17: 0, 18: 1, 19: 0, 20: 1, 21: 0, 22: 0, 23: 0, 24: 1, 25: 0, 26: 0, 27: 0, 28: 0, 29: 0}
#.
#. problems/subsetsum_random.dimacs
#. {1: 0, 2: 0, 3: 1, 4: 1, 5: 0, 6: 0, 7: 1, 8: 1, 9: 1, 10: 0, 11: 0, 12: 0, 13: 0, 14: 0, 15: 1, 16: 1, 17: 0, 18: 0, 19: 0, 20: 0, 21: 0, 22: 0, 23: 0, 24: 0, 25: 1, 26: 1, 27: 0, 28: 0, 29: 0, 30: 0}
#.
if __name__ == '__main__':
main()
| gpl-3.0 | -7,355,265,207,366,471,000 | 31.166667 | 204 | 0.533679 | false |
Volcanoscar/omim | 3party/protobuf/python/google/protobuf/message.py | 78 | 10275 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# TODO(robinson): We should just make these methods all "pure-virtual" and move
# all implementation out, into reflection.py for now.
"""Contains an abstract base class for protocol messages."""
__author__ = '[email protected] (Will Robinson)'
class Error(Exception): pass
class DecodeError(Error): pass
class EncodeError(Error): pass
class Message(object):
"""Abstract base class for protocol messages.
Protocol message classes are almost always generated by the protocol
compiler. These generated types subclass Message and implement the methods
shown below.
TODO(robinson): Link to an HTML document here.
TODO(robinson): Document that instances of this class will also
have an Extensions attribute with __getitem__ and __setitem__.
Again, not sure how to best convey this.
TODO(robinson): Document that the class must also have a static
RegisterExtension(extension_field) method.
Not sure how to best express at this point.
"""
# TODO(robinson): Document these fields and methods.
__slots__ = []
DESCRIPTOR = None
def __deepcopy__(self, memo=None):
clone = type(self)()
clone.MergeFrom(self)
return clone
def __eq__(self, other_msg):
"""Recursively compares two messages by value and structure."""
raise NotImplementedError
def __ne__(self, other_msg):
# Can't just say self != other_msg, since that would infinitely recurse. :)
return not self == other_msg
def __hash__(self):
raise TypeError('unhashable object')
def __str__(self):
"""Outputs a human-readable representation of the message."""
raise NotImplementedError
def __unicode__(self):
"""Outputs a human-readable representation of the message."""
raise NotImplementedError
def MergeFrom(self, other_msg):
"""Merges the contents of the specified message into current message.
This method merges the contents of the specified message into the current
message. Singular fields that are set in the specified message overwrite
the corresponding fields in the current message. Repeated fields are
appended. Singular sub-messages and groups are recursively merged.
Args:
other_msg: Message to merge into the current message.
"""
raise NotImplementedError
def CopyFrom(self, other_msg):
"""Copies the content of the specified message into the current message.
The method clears the current message and then merges the specified
message using MergeFrom.
Args:
other_msg: Message to copy into the current one.
"""
if self is other_msg:
return
self.Clear()
self.MergeFrom(other_msg)
def Clear(self):
"""Clears all data that was set in the message."""
raise NotImplementedError
def SetInParent(self):
"""Mark this as present in the parent.
This normally happens automatically when you assign a field of a
sub-message, but sometimes you want to make the sub-message
present while keeping it empty. If you find yourself using this,
you may want to reconsider your design."""
raise NotImplementedError
def IsInitialized(self):
"""Checks if the message is initialized.
Returns:
The method returns True if the message is initialized (i.e. all of its
required fields are set).
"""
raise NotImplementedError
# TODO(robinson): MergeFromString() should probably return None and be
# implemented in terms of a helper that returns the # of bytes read. Our
# deserialization routines would use the helper when recursively
# deserializing, but the end user would almost always just want the no-return
# MergeFromString().
def MergeFromString(self, serialized):
"""Merges serialized protocol buffer data into this message.
When we find a field in |serialized| that is already present
in this message:
- If it's a "repeated" field, we append to the end of our list.
- Else, if it's a scalar, we overwrite our field.
- Else, (it's a nonrepeated composite), we recursively merge
into the existing composite.
TODO(robinson): Document handling of unknown fields.
Args:
serialized: Any object that allows us to call buffer(serialized)
to access a string of bytes using the buffer interface.
TODO(robinson): When we switch to a helper, this will return None.
Returns:
The number of bytes read from |serialized|.
For non-group messages, this will always be len(serialized),
but for messages which are actually groups, this will
generally be less than len(serialized), since we must
stop when we reach an END_GROUP tag. Note that if
we *do* stop because of an END_GROUP tag, the number
of bytes returned does not include the bytes
for the END_GROUP tag information.
"""
raise NotImplementedError
def ParseFromString(self, serialized):
"""Parse serialized protocol buffer data into this message.
Like MergeFromString(), except we clear the object first and
do not return the value that MergeFromString returns.
"""
self.Clear()
self.MergeFromString(serialized)
def SerializeToString(self):
"""Serializes the protocol message to a binary string.
Returns:
A binary string representation of the message if all of the required
fields in the message are set (i.e. the message is initialized).
Raises:
message.EncodeError if the message isn't initialized.
"""
raise NotImplementedError
def SerializePartialToString(self):
"""Serializes the protocol message to a binary string.
This method is similar to SerializeToString but doesn't check if the
message is initialized.
Returns:
A string representation of the partial message.
"""
raise NotImplementedError
# TODO(robinson): Decide whether we like these better
# than auto-generated has_foo() and clear_foo() methods
# on the instances themselves. This way is less consistent
# with C++, but it makes reflection-type access easier and
# reduces the number of magically autogenerated things.
#
# TODO(robinson): Be sure to document (and test) exactly
# which field names are accepted here. Are we case-sensitive?
# What do we do with fields that share names with Python keywords
# like 'lambda' and 'yield'?
#
# nnorwitz says:
# """
# Typically (in python), an underscore is appended to names that are
# keywords. So they would become lambda_ or yield_.
# """
def ListFields(self):
"""Returns a list of (FieldDescriptor, value) tuples for all
fields in the message which are not empty. A singular field is non-empty
if HasField() would return true, and a repeated field is non-empty if
it contains at least one element. The fields are ordered by field
number"""
raise NotImplementedError
def HasField(self, field_name):
"""Checks if a certain field is set for the message. Note if the
field_name is not defined in the message descriptor, ValueError will be
raised."""
raise NotImplementedError
def ClearField(self, field_name):
raise NotImplementedError
def HasExtension(self, extension_handle):
raise NotImplementedError
def ClearExtension(self, extension_handle):
raise NotImplementedError
def ByteSize(self):
"""Returns the serialized size of this message.
Recursively calls ByteSize() on all contained messages.
"""
raise NotImplementedError
def _SetListener(self, message_listener):
"""Internal method used by the protocol message implementation.
Clients should not call this directly.
Sets a listener that this message will call on certain state transitions.
The purpose of this method is to register back-edges from children to
parents at runtime, for the purpose of setting "has" bits and
byte-size-dirty bits in the parent and ancestor objects whenever a child or
descendant object is modified.
If the client wants to disconnect this Message from the object tree, she
explicitly sets callback to None.
If message_listener is None, unregisters any existing listener. Otherwise,
message_listener must implement the MessageListener interface in
internal/message_listener.py, and we discard any listener registered
via a previous _SetListener() call.
"""
raise NotImplementedError
def __getstate__(self):
"""Support the pickle protocol."""
return dict(serialized=self.SerializePartialToString())
def __setstate__(self, state):
"""Support the pickle protocol."""
self.__init__()
self.ParseFromString(state['serialized'])
| apache-2.0 | 1,216,193,845,680,204,800 | 35.179577 | 79 | 0.726034 | false |
Darthkpo/xtt | openpyxl/xml/tests/test_incremental_xmlfile.py | 1 | 11428 | from __future__ import absolute_import
"""
Tests for the incremental XML serialisation API.
From lxml
"""
from io import BytesIO
import unittest
import tempfile, os, sys
from .common_imports import etree, HelperTestCase, skipIf
from .. import xmlfile as etree
import pytest
from openpyxl.tests.helper import compare_xml
import xml.etree.ElementTree
# _parse_file needs parse routine - take it from ElementTree
etree.parse = xml.etree.ElementTree.parse
class _XmlFileTestCaseBase(HelperTestCase):
_file = None # to be set by specific subtypes below
def setUp(self):
self._file = BytesIO()
def test_element(self):
with etree.xmlfile(self._file) as xf:
with xf.element('test'):
pass
self.assertXml('<test></test>')
def test_element_write_text(self):
with etree.xmlfile(self._file) as xf:
with xf.element('test'):
xf.write('toast')
self.assertXml('<test>toast</test>')
def test_element_nested(self):
with etree.xmlfile(self._file) as xf:
with xf.element('test'):
with xf.element('toast'):
with xf.element('taste'):
xf.write('conTent')
self.assertXml('<test><toast><taste>conTent</taste></toast></test>')
def test_element_nested_with_text(self):
with etree.xmlfile(self._file) as xf:
with xf.element('test'):
xf.write('con')
with xf.element('toast'):
xf.write('tent')
with xf.element('taste'):
xf.write('inside')
xf.write('tnet')
xf.write('noc')
self.assertXml('<test>con<toast>tent<taste>inside</taste>'
'tnet</toast>noc</test>')
def test_write_Element(self):
with etree.xmlfile(self._file) as xf:
xf.write(etree.Element('test'))
self.assertXml('<test/>')
def test_write_Element_repeatedly(self):
element = etree.Element('test')
with etree.xmlfile(self._file) as xf:
with xf.element('test'):
for i in range(100):
xf.write(element)
tree = self._parse_file()
self.assertTrue(tree is not None)
self.assertEqual(100, len(tree.getroot()))
self.assertEqual(set(['test']), set(el.tag for el in tree.getroot()))
def test_namespace_nsmap(self):
with etree.xmlfile(self._file) as xf:
with xf.element('{nsURI}test', nsmap={'x': 'nsURI'}):
pass
self.assertXml('<x:test xmlns:x="nsURI"></x:test>')
def test_namespace_nested_nsmap(self):
with etree.xmlfile(self._file) as xf:
with xf.element('test', nsmap={'x': 'nsURI'}):
with xf.element('{nsURI}toast'):
pass
self.assertXml('<test xmlns:x="nsURI"><x:toast></x:toast></test>')
def test_anonymous_namespace(self):
with etree.xmlfile(self._file) as xf:
with xf.element('{nsURI}test'):
pass
self.assertXml('<ns0:test xmlns:ns0="nsURI"></ns0:test>')
def test_namespace_nested_anonymous(self):
with etree.xmlfile(self._file) as xf:
with xf.element('test'):
with xf.element('{nsURI}toast'):
pass
self.assertXml('<test><ns0:toast xmlns:ns0="nsURI"></ns0:toast></test>')
def test_default_namespace(self):
with etree.xmlfile(self._file) as xf:
with xf.element('{nsURI}test', nsmap={None: 'nsURI'}):
pass
self.assertXml('<test xmlns="nsURI"></test>')
def test_nested_default_namespace(self):
with etree.xmlfile(self._file) as xf:
with xf.element('{nsURI}test', nsmap={None: 'nsURI'}):
with xf.element('{nsURI}toast'):
pass
self.assertXml('<test xmlns="nsURI"><toast></toast></test>')
@pytest.mark.xfail
def test_pi(self):
with etree.xmlfile(self._file) as xf:
xf.write(etree.ProcessingInstruction('pypi'))
with xf.element('test'):
pass
self.assertXml('<?pypi ?><test></test>')
@pytest.mark.xfail
def test_comment(self):
with etree.xmlfile(self._file) as xf:
xf.write(etree.Comment('a comment'))
with xf.element('test'):
pass
self.assertXml('<!--a comment--><test></test>')
def test_attribute(self):
with etree.xmlfile(self._file) as xf:
with xf.element('test', attrib={'k': 'v'}):
pass
self.assertXml('<test k="v"></test>')
def test_escaping(self):
with etree.xmlfile(self._file) as xf:
with xf.element('test'):
xf.write('Comments: <!-- text -->\n')
xf.write('Entities: &')
self.assertXml(
'<test>Comments: <!-- text -->\nEntities: &amp;</test>')
@pytest.mark.xfail
def test_encoding(self):
with etree.xmlfile(self._file, encoding='utf16') as xf:
with xf.element('test'):
xf.write('toast')
self.assertXml('<test>toast</test>', encoding='utf16')
@pytest.mark.xfail
def test_buffering(self):
with etree.xmlfile(self._file, buffered=False) as xf:
with xf.element('test'):
self.assertXml("<test>")
xf.write('toast')
self.assertXml("<test>toast")
with xf.element('taste'):
self.assertXml("<test>toast<taste>")
xf.write('some', etree.Element("more"), "toast")
self.assertXml("<test>toast<taste>some<more/>toast")
self.assertXml("<test>toast<taste>some<more/>toast</taste>")
xf.write('end')
self.assertXml("<test>toast<taste>some<more/>toast</taste>end")
self.assertXml("<test>toast<taste>some<more/>toast</taste>end</test>")
self.assertXml("<test>toast<taste>some<more/>toast</taste>end</test>")
@pytest.mark.xfail
def test_flush(self):
with etree.xmlfile(self._file, buffered=True) as xf:
with xf.element('test'):
self.assertXml("")
xf.write('toast')
self.assertXml("")
with xf.element('taste'):
self.assertXml("")
xf.flush()
self.assertXml("<test>toast<taste>")
self.assertXml("<test>toast<taste>")
self.assertXml("<test>toast<taste>")
self.assertXml("<test>toast<taste></taste></test>")
def test_failure_preceding_text(self):
try:
with etree.xmlfile(self._file) as xf:
xf.write('toast')
except etree.LxmlSyntaxError:
self.assertTrue(True)
else:
self.assertTrue(False)
def test_failure_trailing_text(self):
with etree.xmlfile(self._file) as xf:
with xf.element('test'):
pass
try:
xf.write('toast')
except etree.LxmlSyntaxError:
self.assertTrue(True)
else:
self.assertTrue(False)
def test_failure_trailing_Element(self):
with etree.xmlfile(self._file) as xf:
with xf.element('test'):
pass
try:
xf.write(etree.Element('test'))
except etree.LxmlSyntaxError:
self.assertTrue(True)
else:
self.assertTrue(False)
@pytest.mark.xfail
def test_closing_out_of_order_in_error_case(self):
cm_exit = None
try:
with etree.xmlfile(self._file) as xf:
x = xf.element('test')
cm_exit = x.__exit__
x.__enter__()
raise ValueError('123')
except ValueError:
self.assertTrue(cm_exit)
try:
cm_exit(ValueError, ValueError("huhu"), None)
except etree.LxmlSyntaxError:
self.assertTrue(True)
else:
self.assertTrue(False)
else:
self.assertTrue(False)
def _read_file(self):
pos = self._file.tell()
self._file.seek(0)
try:
return self._file.read()
finally:
self._file.seek(pos)
def _parse_file(self):
pos = self._file.tell()
self._file.seek(0)
try:
return etree.parse(self._file)
finally:
self._file.seek(pos)
def tearDown(self):
if self._file is not None:
self._file.close()
def assertXml(self, expected, encoding='utf8'):
diff = compare_xml(self._read_file().decode(encoding), expected)
assert diff is None, diff
class BytesIOXmlFileTestCase(_XmlFileTestCaseBase):
def setUp(self):
self._file = BytesIO()
def test_filelike_close(self):
with etree.xmlfile(self._file, close=True) as xf:
with xf.element('test'):
pass
self.assertRaises(ValueError, self._file.getvalue)
class TempXmlFileTestCase(_XmlFileTestCaseBase):
def setUp(self):
self._file = tempfile.TemporaryFile()
class TempPathXmlFileTestCase(_XmlFileTestCaseBase):
def setUp(self):
self._tmpfile = tempfile.NamedTemporaryFile(delete=False)
self._file = self._tmpfile.name
def tearDown(self):
try:
self._tmpfile.close()
finally:
if os.path.exists(self._tmpfile.name):
os.unlink(self._tmpfile.name)
def _read_file(self):
self._tmpfile.seek(0)
return self._tmpfile.read()
def _parse_file(self):
self._tmpfile.seek(0)
return etree.parse(self._tmpfile)
@skipIf(True, "temp file behaviour is too platform specific here")
def test_buffering(self):
pass
@skipIf(True, "temp file behaviour is too platform specific here")
def test_flush(self):
pass
class SimpleFileLikeXmlFileTestCase(_XmlFileTestCaseBase):
class SimpleFileLike(object):
def __init__(self, target):
self._target = target
self.write = target.write
self.tell = target.tell
self.seek = target.seek
self.closed = False
def close(self):
assert not self.closed
self.closed = True
self._target.close()
def setUp(self):
self._target = BytesIO()
self._file = self.SimpleFileLike(self._target)
def _read_file(self):
return self._target.getvalue()
def _parse_file(self):
pos = self._file.tell()
self._target.seek(0)
try:
return etree.parse(self._target)
finally:
self._target.seek(pos)
def test_filelike_not_closing(self):
with etree.xmlfile(self._file) as xf:
with xf.element('test'):
pass
self.assertFalse(self._file.closed)
def test_filelike_close(self):
with etree.xmlfile(self._file, close=True) as xf:
with xf.element('test'):
pass
self.assertTrue(self._file.closed)
self._file = None # prevent closing in tearDown()
| mit | 7,292,895,480,039,333,000 | 31.558405 | 82 | 0.547077 | false |
fujunwei/chromium-crosswalk | build/android/pylib/instrumentation/test_package.py | 71 | 1334 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Class representing instrumentation test apk and jar."""
import os
from pylib.instrumentation import test_jar
from pylib.utils import apk_helper
class TestPackage(test_jar.TestJar):
def __init__(self, apk_path, jar_path, test_support_apk_path):
test_jar.TestJar.__init__(self, jar_path)
if not os.path.exists(apk_path):
raise Exception('%s not found, please build it' % apk_path)
self._apk_path = apk_path
self._apk_name = os.path.splitext(os.path.basename(apk_path))[0]
self._package_name = apk_helper.GetPackageName(self._apk_path)
self._test_support_apk_path = test_support_apk_path
def GetApkPath(self):
"""Returns the absolute path to the APK."""
return self._apk_path
def GetApkName(self):
"""Returns the name of the apk without the suffix."""
return self._apk_name
def GetPackageName(self):
"""Returns the package name of this APK."""
return self._package_name
# Override.
def Install(self, device):
device.Install(self.GetApkPath())
if (self._test_support_apk_path and
os.path.exists(self._test_support_apk_path)):
device.Install(self._test_support_apk_path)
| bsd-3-clause | 6,378,936,343,346,833,000 | 30.761905 | 72 | 0.694903 | false |
ghedsouza/django | tests/forms_tests/field_tests/test_base.py | 131 | 1455 | from django.forms import ChoiceField, Field, Form, Select
from django.test import SimpleTestCase
class BasicFieldsTests(SimpleTestCase):
def test_field_sets_widget_is_required(self):
self.assertTrue(Field(required=True).widget.is_required)
self.assertFalse(Field(required=False).widget.is_required)
def test_cooperative_multiple_inheritance(self):
class A:
def __init__(self):
self.class_a_var = True
super().__init__()
class ComplexField(Field, A):
def __init__(self):
super().__init__()
f = ComplexField()
self.assertTrue(f.class_a_var)
def test_field_deepcopies_widget_instance(self):
class CustomChoiceField(ChoiceField):
widget = Select(attrs={'class': 'my-custom-class'})
class TestForm(Form):
field1 = CustomChoiceField(choices=[])
field2 = CustomChoiceField(choices=[])
f = TestForm()
f.fields['field1'].choices = [('1', '1')]
f.fields['field2'].choices = [('2', '2')]
self.assertEqual(f.fields['field1'].widget.choices, [('1', '1')])
self.assertEqual(f.fields['field2'].widget.choices, [('2', '2')])
class DisabledFieldTests(SimpleTestCase):
def test_disabled_field_has_changed_always_false(self):
disabled_field = Field(disabled=True)
self.assertFalse(disabled_field.has_changed('x', 'y'))
| bsd-3-clause | -8,400,373,920,952,449,000 | 33.642857 | 73 | 0.610309 | false |
clione/django-kanban | src/core/userena/tests/commands.py | 7 | 4703 | from django.test import TestCase
from django.core.management import call_command
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from userena.models import UserenaSignup
from userena.managers import ASSIGNED_PERMISSIONS
from userena import settings as userena_settings
from userena.utils import get_profile_model, get_user_model
from guardian.shortcuts import remove_perm
from guardian.models import UserObjectPermission
import datetime
User = get_user_model()
class CleanExpiredTests(TestCase):
user_info = {'username': 'alice',
'password': 'swordfish',
'email': '[email protected]'}
def test_clean_expired(self):
"""
Test if ``clean_expired`` deletes all users which ``activation_key``
is expired.
"""
# Create an account which is expired.
user = UserenaSignup.objects.create_user(**self.user_info)
user.date_joined -= datetime.timedelta(days=userena_settings.USERENA_ACTIVATION_DAYS + 1)
user.save()
# There should be one account now
User.objects.get(username=self.user_info['username'])
# Clean it.
call_command('clean_expired')
self.failUnlessEqual(User.objects.filter(username=self.user_info['username']).count(), 0)
class CheckPermissionTests(TestCase):
user_info = {'username': 'alice',
'password': 'swordfish',
'email': '[email protected]'}
def test_check_permissions(self):
# Create a new account.
user = UserenaSignup.objects.create_user(**self.user_info)
user.save()
# Remove all permissions
UserObjectPermission.objects.filter(user=user).delete()
self.failUnlessEqual(UserObjectPermission.objects.filter(user=user).count(),
0)
# Check it
call_command('check_permissions')
# User should have all permissions again
user_permissions = UserObjectPermission.objects.filter(user=user).values_list('permission__codename', flat=True)
required_permissions = [u'change_user', u'delete_user', u'change_profile', u'view_profile']
for perm in required_permissions:
if perm not in user_permissions:
self.fail()
# Check it again should do nothing
call_command('check_permissions', test=True)
def test_incomplete_permissions(self):
# Delete the neccesary permissions
profile_model_obj = get_profile_model()
content_type_profile = ContentType.objects.get_for_model(profile_model_obj)
content_type_user = ContentType.objects.get_for_model(User)
for model, perms in ASSIGNED_PERMISSIONS.items():
if model == "profile":
content_type = content_type_profile
else: content_type = content_type_user
for perm in perms:
Permission.objects.get(name=perm[1],
content_type=content_type).delete()
# Check if they are they are back
for model, perms in ASSIGNED_PERMISSIONS.items():
if model == "profile":
content_type = content_type_profile
else: content_type = content_type_user
for perm in perms:
try:
perm = Permission.objects.get(name=perm[1],
content_type=content_type)
except Permission.DoesNotExist: pass
else: self.fail("Found %s: " % perm)
# Repair them
call_command('check_permissions', test=True)
# Check if they are they are back
for model, perms in ASSIGNED_PERMISSIONS.items():
if model == "profile":
content_type = content_type_profile
else: content_type = content_type_user
for perm in perms:
try:
perm = Permission.objects.get(name=perm[1],
content_type=content_type)
except Permission.DoesNotExist:
self.fail()
def test_no_profile(self):
""" Check for warning when there is no profile """
# TODO: Dirty! Currently we check for the warning by getting a 100%
# test coverage, meaning that it dit output some warning.
user = UserenaSignup.objects.create_user(**self.user_info)
# remove the profile of this user
get_profile_model().objects.get(user=user).delete()
# run the command to check for the warning.
call_command('check_permissions', test=True)
| mit | 350,332,701,186,179,650 | 37.54918 | 120 | 0.612162 | false |
flh/odoo | addons/hr_holidays/report/__init__.py | 442 | 1129 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import holidays_summary_report
import available_holidays
import hr_holidays_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,111,375,308,332,560,400 | 42.423077 | 78 | 0.629761 | false |
splav/servo | components/script/dom/bindings/codegen/parser/tests/test_interface_identifier_conflicts_across_members.py | 276 | 1371 | def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
interface IdentifierConflictAcrossMembers1 {
const byte thing1 = 1;
readonly attribute long thing1;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface IdentifierConflictAcrossMembers2 {
readonly attribute long thing1;
const byte thing1 = 1;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface IdentifierConflictAcrossMembers3 {
getter boolean thing1(DOMString name);
readonly attribute long thing1;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface IdentifierConflictAcrossMembers1 {
const byte thing1 = 1;
long thing1();
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
| mpl-2.0 | 3,129,112,015,806,216,000 | 21.85 | 56 | 0.520058 | false |
looker/sentry | src/sentry/api/endpoints/broadcast_details.py | 2 | 3477 | from __future__ import absolute_import
import logging
from django.db import IntegrityError, transaction
from django.db.models import Q
from django.utils import timezone
from rest_framework.permissions import IsAuthenticated
from sentry.api.base import Endpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize, AdminBroadcastSerializer, BroadcastSerializer
from sentry.api.validators import AdminBroadcastValidator, BroadcastValidator
from sentry.auth.superuser import is_active_superuser
from sentry.models import Broadcast, BroadcastSeen
logger = logging.getLogger('sentry')
class BroadcastDetailsEndpoint(Endpoint):
permission_classes = (IsAuthenticated, )
def _get_broadcast(self, request, broadcast_id):
if is_active_superuser(request) and request.access.has_permission('broadcasts.admin'):
queryset = Broadcast.objects.all()
else:
queryset = Broadcast.objects.filter(
Q(date_expires__isnull=True) | Q(date_expires__gt=timezone.now()),
is_active=True,
)
try:
return queryset.get(id=broadcast_id)
except Broadcast.DoesNotExist:
raise ResourceDoesNotExist
def _get_validator(self, request):
if is_active_superuser(request):
return AdminBroadcastValidator
return BroadcastValidator
def _serialize_response(self, request, broadcast):
if is_active_superuser(request):
serializer_cls = AdminBroadcastSerializer
else:
serializer_cls = BroadcastSerializer
return self.respond(serialize(broadcast, request.user, serializer=serializer_cls()))
def get(self, request, broadcast_id):
broadcast = self._get_broadcast(request, broadcast_id)
return self._serialize_response(request, broadcast)
def put(self, request, broadcast_id):
broadcast = self._get_broadcast(request, broadcast_id)
validator = self._get_validator(request)(data=request.DATA, partial=True)
if not validator.is_valid():
return self.respond(validator.errors, status=400)
result = validator.object
update_kwargs = {}
if result.get('title'):
update_kwargs['title'] = result['title']
if result.get('message'):
update_kwargs['message'] = result['message']
if result.get('link'):
update_kwargs['link'] = result['link']
if result.get('isActive') is not None:
update_kwargs['is_active'] = result['isActive']
if result.get('dateExpires', -1) != -1:
update_kwargs['date_expires'] = result['dateExpires']
if update_kwargs:
with transaction.atomic():
broadcast.update(**update_kwargs)
logger.info('broadcasts.update', extra={
'ip_address': request.META['REMOTE_ADDR'],
'user_id': request.user.id,
'broadcast_id': broadcast.id,
'data': update_kwargs,
})
if result.get('hasSeen'):
try:
with transaction.atomic():
BroadcastSeen.objects.create(
broadcast=broadcast,
user=request.user,
)
except IntegrityError:
pass
return self._serialize_response(request, broadcast)
| bsd-3-clause | -7,549,675,293,973,359,000 | 36.793478 | 94 | 0.626977 | false |
mssurajkaiga/empathy | tools/glib-gtypes-generator.py | 12 | 12756 | #!/usr/bin/python
# Generate GLib GInterfaces from the Telepathy specification.
# The master copy of this program is in the telepathy-glib repository -
# please make any changes there.
#
# Copyright (C) 2006, 2007 Collabora Limited
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import sys
import xml.dom.minidom
from libtpcodegen import file_set_contents, u
from libglibcodegen import escape_as_identifier, \
get_docstring, \
NS_TP, \
Signature, \
type_to_gtype, \
xml_escape
def types_to_gtypes(types):
return [type_to_gtype(t)[1] for t in types]
class GTypesGenerator(object):
def __init__(self, dom, output, mixed_case_prefix):
self.dom = dom
self.Prefix = mixed_case_prefix
self.PREFIX_ = self.Prefix.upper() + '_'
self.prefix_ = self.Prefix.lower() + '_'
self.header = []
self.body = []
self.docs = []
self.output = output
for f in (self.header, self.body, self.docs):
f.append('/* Auto-generated, do not edit.\n *\n'
' * This file may be distributed under the same terms\n'
' * as the specification from which it was generated.\n'
' */\n\n')
# keys are e.g. 'sv', values are the key escaped
self.need_mappings = {}
# keys are the contents of the struct (e.g. 'sssu'), values are the
# key escaped
self.need_structs = {}
# keys are the contents of the struct (e.g. 'sssu'), values are the
# key escaped
self.need_struct_arrays = {}
# keys are the contents of the array (unlike need_struct_arrays!),
# values are the key escaped
self.need_other_arrays = {}
def h(self, code):
self.header.append(code)
def c(self, code):
self.body.append(code)
def d(self, code):
self.docs.append(code)
def do_mapping_header(self, mapping):
members = mapping.getElementsByTagNameNS(NS_TP, 'member')
assert len(members) == 2
impl_sig = ''.join([elt.getAttribute('type')
for elt in members])
esc_impl_sig = escape_as_identifier(impl_sig)
name = (self.PREFIX_ + 'HASH_TYPE_' +
mapping.getAttribute('name').upper())
impl = self.prefix_ + 'type_dbus_hash_' + esc_impl_sig
docstring = get_docstring(mapping) or '(Undocumented)'
self.d('/**\n * %s:\n *\n' % name.strip())
self.d(' * %s\n' % xml_escape(docstring))
self.d(' *\n')
self.d(' * This macro expands to a call to a function\n')
self.d(' * that returns the #GType of a #GHashTable\n')
self.d(' * appropriate for representing a D-Bus\n')
self.d(' * dictionary of signature\n')
self.d(' * <literal>a{%s}</literal>.\n' % impl_sig)
self.d(' *\n')
key, value = members
self.d(' * Keys (D-Bus type <literal>%s</literal>,\n'
% key.getAttribute('type'))
tp_type = key.getAttributeNS(NS_TP, 'type')
if tp_type:
self.d(' * type <literal>%s</literal>,\n' % tp_type)
self.d(' * named <literal>%s</literal>):\n'
% key.getAttribute('name'))
docstring = get_docstring(key) or '(Undocumented)'
self.d(' * %s\n' % xml_escape(docstring))
self.d(' *\n')
self.d(' * Values (D-Bus type <literal>%s</literal>,\n'
% value.getAttribute('type'))
tp_type = value.getAttributeNS(NS_TP, 'type')
if tp_type:
self.d(' * type <literal>%s</literal>,\n' % tp_type)
self.d(' * named <literal>%s</literal>):\n'
% value.getAttribute('name'))
docstring = get_docstring(value) or '(Undocumented)'
self.d(' * %s\n' % xml_escape(docstring))
self.d(' *\n')
self.d(' */\n')
self.h('#define %s (%s ())\n\n' % (name, impl))
self.need_mappings[impl_sig] = esc_impl_sig
array_name = mapping.getAttribute('array-name')
if array_name:
gtype_name = self.PREFIX_ + 'ARRAY_TYPE_' + array_name.upper()
contents_sig = 'a{' + impl_sig + '}'
esc_contents_sig = escape_as_identifier(contents_sig)
impl = self.prefix_ + 'type_dbus_array_of_' + esc_contents_sig
self.d('/**\n * %s:\n\n' % gtype_name)
self.d(' * Expands to a call to a function\n')
self.d(' * that returns the #GType of a #GPtrArray\n')
self.d(' * of #%s.\n' % name)
self.d(' */\n\n')
self.h('#define %s (%s ())\n\n' % (gtype_name, impl))
self.need_other_arrays[contents_sig] = esc_contents_sig
def do_struct_header(self, struct):
members = struct.getElementsByTagNameNS(NS_TP, 'member')
impl_sig = ''.join([elt.getAttribute('type') for elt in members])
esc_impl_sig = escape_as_identifier(impl_sig)
name = (self.PREFIX_ + 'STRUCT_TYPE_' +
struct.getAttribute('name').upper())
impl = self.prefix_ + 'type_dbus_struct_' + esc_impl_sig
docstring = struct.getElementsByTagNameNS(NS_TP, 'docstring')
if docstring:
docstring = docstring[0].toprettyxml()
if docstring.startswith('<tp:docstring>'):
docstring = docstring[14:]
if docstring.endswith('</tp:docstring>\n'):
docstring = docstring[:-16]
if docstring.strip() in ('<tp:docstring/>', ''):
docstring = '(Undocumented)'
else:
docstring = '(Undocumented)'
self.d('/**\n * %s:\n\n' % name)
self.d(' * %s\n' % xml_escape(docstring))
self.d(' *\n')
self.d(' * This macro expands to a call to a function\n')
self.d(' * that returns the #GType of a #GValueArray\n')
self.d(' * appropriate for representing a D-Bus struct\n')
self.d(' * with signature <literal>(%s)</literal>.\n'
% impl_sig)
self.d(' *\n')
for i, member in enumerate(members):
self.d(' * Member %d (D-Bus type '
'<literal>%s</literal>,\n'
% (i, member.getAttribute('type')))
tp_type = member.getAttributeNS(NS_TP, 'type')
if tp_type:
self.d(' * type <literal>%s</literal>,\n' % tp_type)
self.d(' * named <literal>%s</literal>):\n'
% member.getAttribute('name'))
docstring = get_docstring(member) or '(Undocumented)'
self.d(' * %s\n' % xml_escape(docstring))
self.d(' *\n')
self.d(' */\n\n')
self.h('#define %s (%s ())\n\n' % (name, impl))
array_name = struct.getAttribute('array-name')
if array_name != '':
array_name = (self.PREFIX_ + 'ARRAY_TYPE_' + array_name.upper())
impl = self.prefix_ + 'type_dbus_array_' + esc_impl_sig
self.d('/**\n * %s:\n\n' % array_name)
self.d(' * Expands to a call to a function\n')
self.d(' * that returns the #GType of a #GPtrArray\n')
self.d(' * of #%s.\n' % name)
self.d(' */\n\n')
self.h('#define %s (%s ())\n\n' % (array_name, impl))
self.need_struct_arrays[impl_sig] = esc_impl_sig
self.need_structs[impl_sig] = esc_impl_sig
def __call__(self):
mappings = self.dom.getElementsByTagNameNS(NS_TP, 'mapping')
structs = self.dom.getElementsByTagNameNS(NS_TP, 'struct')
for mapping in mappings:
self.do_mapping_header(mapping)
for sig in self.need_mappings:
self.h('GType %stype_dbus_hash_%s (void);\n\n' %
(self.prefix_, self.need_mappings[sig]))
self.c('GType\n%stype_dbus_hash_%s (void)\n{\n' %
(self.prefix_, self.need_mappings[sig]))
self.c(' static GType t = 0;\n\n')
self.c(' if (G_UNLIKELY (t == 0))\n')
# FIXME: translate sig into two GTypes
items = tuple(Signature(sig))
gtypes = types_to_gtypes(items)
self.c(' t = dbus_g_type_get_map ("GHashTable", '
'%s, %s);\n' % (gtypes[0], gtypes[1]))
self.c(' return t;\n')
self.c('}\n\n')
for struct in structs:
self.do_struct_header(struct)
for sig in self.need_structs:
self.h('GType %stype_dbus_struct_%s (void);\n\n' %
(self.prefix_, self.need_structs[sig]))
self.c('GType\n%stype_dbus_struct_%s (void)\n{\n' %
(self.prefix_, self.need_structs[sig]))
self.c(' static GType t = 0;\n\n')
self.c(' if (G_UNLIKELY (t == 0))\n')
self.c(' t = dbus_g_type_get_struct ("GValueArray",\n')
items = tuple(Signature(sig))
gtypes = types_to_gtypes(items)
for gtype in gtypes:
self.c(' %s,\n' % gtype)
self.c(' G_TYPE_INVALID);\n')
self.c(' return t;\n')
self.c('}\n\n')
for sig in self.need_struct_arrays:
self.h('GType %stype_dbus_array_%s (void);\n\n' %
(self.prefix_, self.need_struct_arrays[sig]))
self.c('GType\n%stype_dbus_array_%s (void)\n{\n' %
(self.prefix_, self.need_struct_arrays[sig]))
self.c(' static GType t = 0;\n\n')
self.c(' if (G_UNLIKELY (t == 0))\n')
self.c(' t = dbus_g_type_get_collection ("GPtrArray", '
'%stype_dbus_struct_%s ());\n' %
(self.prefix_, self.need_struct_arrays[sig]))
self.c(' return t;\n')
self.c('}\n\n')
for sig in self.need_other_arrays:
self.h('GType %stype_dbus_array_of_%s (void);\n\n' %
(self.prefix_, self.need_other_arrays[sig]))
self.c('GType\n%stype_dbus_array_of_%s (void)\n{\n' %
(self.prefix_, self.need_other_arrays[sig]))
self.c(' static GType t = 0;\n\n')
self.c(' if (G_UNLIKELY (t == 0))\n')
if sig[:2] == 'a{' and sig[-1:] == '}':
# array of mappings
self.c(' t = dbus_g_type_get_collection ('
'"GPtrArray", '
'%stype_dbus_hash_%s ());\n' %
(self.prefix_, escape_as_identifier(sig[2:-1])))
elif sig[:2] == 'a(' and sig[-1:] == ')':
# array of arrays of struct
self.c(' t = dbus_g_type_get_collection ('
'"GPtrArray", '
'%stype_dbus_array_%s ());\n' %
(self.prefix_, escape_as_identifier(sig[2:-1])))
elif sig[:1] == 'a':
# array of arrays of non-struct
self.c(' t = dbus_g_type_get_collection ('
'"GPtrArray", '
'%stype_dbus_array_of_%s ());\n' %
(self.prefix_, escape_as_identifier(sig[1:])))
else:
raise AssertionError("array of '%s' not supported" % sig)
self.c(' return t;\n')
self.c('}\n\n')
file_set_contents(self.output + '.h', u('').join(self.header).encode('utf-8'))
file_set_contents(self.output + '-body.h', u('').join(self.body).encode('utf-8'))
file_set_contents(self.output + '-gtk-doc.h', u('').join(self.docs).encode('utf-8'))
if __name__ == '__main__':
argv = sys.argv[1:]
dom = xml.dom.minidom.parse(argv[0])
GTypesGenerator(dom, argv[1], argv[2])()
| gpl-2.0 | -8,197,877,046,561,582,000 | 40.960526 | 92 | 0.508545 | false |
readevalprint/mezzanine | mezzanine/utils/docs.py | 16 | 12232 | """
Utils called from project_root/docs/conf.py when Sphinx
documentation is generated.
"""
from __future__ import division, print_function, unicode_literals
from future.builtins import map, open, str
from collections import OrderedDict
from datetime import datetime
import os.path
from shutil import copyfile, move
from string import ascii_letters as letters
from socket import gethostname
from warnings import warn
from django.template.defaultfilters import urlize
from django.utils.encoding import force_text
from django.utils.functional import Promise
from mezzanine import __version__
from mezzanine.conf import registry
from mezzanine.utils.importing import import_dotted_path, path_for_import
def deep_force_unicode(value):
"""
Recursively call force_text on value.
"""
if isinstance(value, (list, tuple, set)):
value = type(value)(map(deep_force_unicode, value))
elif isinstance(value, dict):
value = type(value)(map(deep_force_unicode, value.items()))
elif isinstance(value, Promise):
value = force_text(value)
return value
def build_settings_docs(docs_path, prefix=None):
"""
Converts names, descriptions and defaults for settings in
``mezzanine.conf.registry`` into RST format for use in docs,
optionally filtered by setting names with the given prefix.
"""
# String to use instead of setting value for dynamic defaults
dynamic = "[dynamic]"
lines = [".. THIS DOCUMENT IS AUTO GENERATED VIA conf.py"]
for name in sorted(registry.keys()):
if prefix and not name.startswith(prefix):
continue
setting = registry[name]
settings_name = "``%s``" % name
settings_label = ".. _%s:" % name
setting_default = setting["default"]
if isinstance(setting_default, str):
if gethostname() in setting_default or (
setting_default.startswith("/") and
os.path.exists(setting_default)):
setting_default = dynamic
if setting_default != dynamic:
setting_default = repr(deep_force_unicode(setting_default))
lines.extend(["", settings_label])
lines.extend(["", settings_name, "-" * len(settings_name)])
lines.extend(["",
urlize(setting["description"] or "").replace(
"<a href=\"", "`").replace(
"\" rel=\"nofollow\">", " <").replace(
"</a>", ">`_")])
if setting["choices"]:
choices = ", ".join(["%s: ``%s``" % (str(v), force_text(k))
for k, v in setting["choices"]])
lines.extend(["", "Choices: %s" % choices, ""])
lines.extend(["", "Default: ``%s``" % setting_default])
with open(os.path.join(docs_path, "settings.rst"), "w") as f:
f.write("\n".join(lines).replace("u'", "'").replace("yo'",
"you'").replace("'", "'"))
def build_deploy_docs(docs_path):
try:
from fabric.main import load_fabfile
except ImportError:
warn("Couldn't build fabfile.rst, fabric not installed")
return
project_template_path = path_for_import("mezzanine.project_template")
commands = load_fabfile(os.path.join(project_template_path, "fabfile"))[1]
lines = []
for name in sorted(commands.keys()):
doc = commands[name].__doc__.strip().split("\n")[0]
lines.append(" * ``fab %s`` - %s" % (name, doc))
with open(os.path.join(docs_path, "fabfile.rst"), "w") as f:
f.write("\n".join(lines))
# Python complains if this is inside build_changelog which uses exec.
_changeset_date = lambda cs: datetime.fromtimestamp(cs.date()[0])
def build_changelog(docs_path, package_name="mezzanine"):
"""
Converts Mercurial commits into a changelog in RST format.
"""
project_path = os.path.join(docs_path, "..")
version_file = os.path.join(package_name, "__init__.py")
version_var = "__version__"
changelog_filename = "CHANGELOG"
changelog_file = os.path.join(project_path, changelog_filename)
versions = OrderedDict()
repo = None
ignore = ("AUTHORS", "formatting", "typo", "pep8", "pep 8",
"whitespace", "README", "trans", "print debug",
"debugging", "tabs", "style", "sites", "ignore",
"tweak", "cleanup", "minor", "for changeset",
".com``", "oops", "syntax")
hotfixes = {
"40cbc47b8d8a": "1.0.9",
"a25749986abc": "1.0.10",
}
# Load the repo.
try:
from mercurial import ui, hg, error
from mercurial.commands import tag
except ImportError:
pass
else:
try:
ui = ui.ui()
repo = hg.repository(ui, project_path)
except error.RepoError:
return
if repo is None:
return
# Go through each changeset and assign it to the versions dict.
changesets = [repo.changectx(changeset) for changeset in repo.changelog]
for cs in sorted(changesets, reverse=True, key=_changeset_date):
# Check if the file with the version number is in this changeset
# and if it is, pull it out and assign it as a variable.
files = cs.files()
new_version = False
# Commit message cleanup hacks.
description = cs.description().decode("utf-8")
description = description.rstrip(".").replace("\n", ". ")
while " " in description:
description = description.replace(" ", " ")
description = description.replace(". . ", ". ").replace("...", ",")
while ".." in description:
description = description.replace("..", ".")
description = description.replace(":.", ":").replace("n'. t", "n't")
words = description.split()
# Format var names in commit.
for i, word in enumerate(words):
if (set("._") & set(word[:-1]) and set(letters) & set(word) and
"`" not in word and not word[0].isdigit()):
last = ""
if word[-1] in ",.":
last, word = word[-1], word[:-1]
words[i] = "``%s``%s" % (word, last)
description = " ".join(words)
if version_file in files:
for line in cs[version_file].data().split("\n"):
if line.startswith(version_var):
exec(line)
if locals()[version_var] == "0.1.0":
locals()[version_var] = "1.0.0"
break
versions[locals()[version_var]] = {
"changes": [],
"date": _changeset_date(cs).strftime("%b %d, %Y")
}
new_version = len(files) == 1
# Tag new versions.
hotfix = hotfixes.get(cs.hex()[:12])
if hotfix or new_version:
if hotfix:
version_tag = hotfix
else:
try:
version_tag = locals()[version_var]
except KeyError:
version_tag = None
if version_tag and version_tag not in cs.tags():
try:
tag(ui, repo, version_tag, rev=cs.hex())
print("Tagging version %s" % version_tag)
except:
pass
# Ignore changesets that are merges, bumped the version, closed
# a branch, regenerated the changelog itself, contain an ignore
# word, or contain too few words to be meaningful.
merge = len(cs.parents()) > 1
branch_closed = len(files) == 0
changelog_update = changelog_filename in files
ignored = [w for w in ignore if w.lower() in description.lower()]
too_few_words = len(description.split()) <= 3
if (merge or new_version or branch_closed or changelog_update or
ignored or too_few_words):
continue
# Ensure we have a current version and if so, add this changeset's
# description to it.
version = None
try:
version = locals()[version_var]
except KeyError:
if not hotfix:
continue
user = cs.user().decode("utf-8").split("<")[0].strip()
entry = "%s - %s" % (description, user)
if hotfix or entry not in versions[version]["changes"]:
if hotfix:
versions[hotfix] = {
"changes": [entry],
"date": _changeset_date(cs).strftime("%b %d, %Y"),
}
else:
versions[version]["changes"].insert(0, entry)
# Write out the changelog.
with open(changelog_file, "w") as f:
for version, version_info in versions.items():
header = "Version %s (%s)" % (version, version_info["date"])
f.write("%s\n" % header)
f.write("%s\n" % ("-" * len(header)))
f.write("\n")
if version_info["changes"]:
for change in version_info["changes"]:
f.write(" * %s\n" % change)
else:
f.write(" * No changes listed.\n")
f.write("\n")
def build_modelgraph(docs_path, package_name="mezzanine"):
"""
Creates a diagram of all the models for mezzanine and the given
package name, generates a smaller version and add it to the
docs directory for use in model-graph.rst
"""
to_path = os.path.join(docs_path, "img", "graph.png")
build_path = os.path.join(docs_path, "build", "_images")
resized_path = os.path.join(os.path.dirname(to_path), "graph-small.png")
settings = import_dotted_path(package_name +
".project_template.project_name.settings")
apps = [a.rsplit(".")[1] for a in settings.INSTALLED_APPS
if a.startswith("mezzanine.") or a.startswith(package_name + ".")]
try:
from django_extensions.management.commands import graph_models
except ImportError:
warn("Couldn't build model_graph, django_extensions not installed")
else:
options = {"inheritance": True, "outputfile": "graph.png",
"layout": "dot"}
try:
graph_models.Command().execute(*apps, **options)
except Exception as e:
warn("Couldn't build model_graph, graph_models failed on: %s" % e)
else:
try:
move("graph.png", to_path)
except OSError as e:
warn("Couldn't build model_graph, move failed on: %s" % e)
# docs/img/graph.png should exist in the repo - move it to the build path.
try:
if not os.path.exists(build_path):
os.makedirs(build_path)
copyfile(to_path, os.path.join(build_path, "graph.png"))
except OSError as e:
warn("Couldn't build model_graph, copy to build failed on: %s" % e)
try:
from PIL import Image
image = Image.open(to_path)
image.width = 800
image.height = image.size[1] * 800 // image.size[0]
image.save(resized_path, "PNG", quality=100)
except Exception as e:
warn("Couldn't build model_graph, resize failed on: %s" % e)
return
# Copy the dashboard screenshot to the build dir too. This doesn't
# really belong anywhere, so we do it here since this is the only
# spot we deal with doc images.
d = "dashboard.png"
copyfile(os.path.join(docs_path, "img", d), os.path.join(build_path, d))
def build_requirements(docs_path, package_name="mezzanine"):
"""
Updates the requirements file with Mezzanine's version number.
"""
mezz_string = "Mezzanine=="
project_path = os.path.join(docs_path, "..")
requirements_file = os.path.join(project_path, package_name,
"project_template", "requirements.txt")
with open(requirements_file, "r") as f:
requirements = f.readlines()
with open(requirements_file, "w") as f:
f.write("Mezzanine==%s\n" % __version__)
for requirement in requirements:
if requirement.strip() and not requirement.startswith(mezz_string):
f.write(requirement)
| bsd-2-clause | -5,292,996,621,614,961,000 | 39.503311 | 79 | 0.56671 | false |
geimer/easybuild-easyconfigs | test/easyconfigs/suite.py | 8 | 2616 | #!/usr/bin/python
##
# Copyright 2012-2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
This script is a collection of all the testcases for easybuild-easyconfigs.
Usage: "python -m easybuild.easyconfigs.test.suite.py" or "./easybuild/easyconfigs/test/suite.py"
@author: Toon Willems (Ghent University)
@author: Kenneth Hoste (Ghent University)
"""
import glob
import os
import shutil
import sys
import tempfile
import unittest
from vsc import fancylogger
import easybuild.tools.build_log # initialize EasyBuild logging, so we disable it
import test.easyconfigs.easyconfigs as e
# disable all logging to significantly speed up tests
fancylogger.disableDefaultHandlers()
fancylogger.setLogLevelError()
os.environ['EASYBUILD_TMP_LOGDIR'] = tempfile.mkdtemp(prefix='easyconfigs_test_')
# call suite() for each module and then run them all
SUITE = unittest.TestSuite([x.suite() for x in [e]])
# uses XMLTestRunner if possible, so we can output an XML file that can be supplied to Jenkins
xml_msg = ""
try:
import xmlrunner # requires unittest-xml-reporting package
xml_dir = 'test-reports'
res = xmlrunner.XMLTestRunner(output=xml_dir, verbosity=1).run(SUITE)
xml_msg = ", XML output of tests available in %s directory" % xml_dir
except ImportError, err:
sys.stderr.write("WARNING: xmlrunner module not available, falling back to using unittest...\n\n")
res = unittest.TextTestRunner().run(SUITE)
shutil.rmtree(os.environ['EASYBUILD_TMP_LOGDIR'])
del os.environ['EASYBUILD_TMP_LOGDIR']
if not res.wasSuccessful():
sys.stderr.write("ERROR: Not all tests were successful.\n")
sys.exit(2)
| gpl-2.0 | 266,304,011,502,791,780 | 36.913043 | 102 | 0.754969 | false |
sdrdl/sdipylib | sdipylib/geo.py | 1 | 1843 | """Support functions for geographic operations"""
def aspect(df):
"""Return the aspect ratio of a Geopandas dataset"""
tb = df.total_bounds
return abs((tb[0] - tb[2]) / (tb[1] - tb[3]))
def scale(df, x):
"""Given an x dimension, return the x and y dimensions to maintain the dataframe aspect ratio"""
return (x, x / aspect(df))
def aspect_fig_size(df, width, subplots='111', **kwargs):
"""
Create a matplotlib figure and axis with a given X width and a height
to keep the boundary box aspect ratio.
:param df: Geopandas GeoDataFrame, from which to calculate the aspect ratio
:param width: X dimension, in inches, of the plot
:param subplots: A Matplotlib subplots string
:param kwargs: Other arguments for plt.figure
:return:
"""
import matplotlib.pylab as plt
fig = plt.figure(figsize = scale(df, width), **kwargs)
ax = fig.add_subplot(subplots)
return fig, ax
def total_centroid(df):
return list(reversed(df.geometry.unary_union.centroid.coords[0]))
def folium_map(df, data_column, tiles='Stamen Toner', fill_color='RdYlGn', zoom_start=12, **kwargs):
import folium
mapa = folium.Map(location=total_centroid(df),
tiles=tiles, zoom_start=zoom_start)
if not df.crs:
df.crs = {'init' :'epsg:4326'}
#threshold_scale = np.linspace(_['non_min_r'].min(),
# _['non_min_r'].max(), 6, dtype=float).tolist()
choro_args = dict(
fill_color=fill_color,
fill_opacity=.6,
line_weight=.7
)
mapa.choropleth(geo_data=df.reset_index(),
data=df.reset_index(),
key_on='feature.properties.geoid',
columns=['geoid',data_column],
**choro_args
)
return mapa
| bsd-2-clause | 241,327,706,808,510,800 | 28.253968 | 100 | 0.601194 | false |
samueldotj/TeeRISC-Simulator | util/stats/barchart.py | 90 | 12472 | # Copyright (c) 2005-2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Lisa Hsu
import matplotlib, pylab
from matplotlib.font_manager import FontProperties
from matplotlib.numerix import array, arange, reshape, shape, transpose, zeros
from matplotlib.numerix import Float
from matplotlib.ticker import NullLocator
matplotlib.interactive(False)
from chart import ChartOptions
class BarChart(ChartOptions):
def __init__(self, default=None, **kwargs):
super(BarChart, self).__init__(default, **kwargs)
self.inputdata = None
self.chartdata = None
self.inputerr = None
self.charterr = None
def gen_colors(self, count):
cmap = matplotlib.cm.get_cmap(self.colormap)
if count == 1:
return cmap([ 0.5 ])
if count < 5:
return cmap(arange(5) / float(4))[:count]
return cmap(arange(count) / float(count - 1))
# The input data format does not match the data format that the
# graph function takes because it is intuitive. The conversion
# from input data format to chart data format depends on the
# dimensionality of the input data. Check here for the
# dimensionality and correctness of the input data
def set_data(self, data):
if data is None:
self.inputdata = None
self.chartdata = None
return
data = array(data)
dim = len(shape(data))
if dim not in (1, 2, 3):
raise AttributeError, "Input data must be a 1, 2, or 3d matrix"
self.inputdata = data
# If the input data is a 1d matrix, then it describes a
# standard bar chart.
if dim == 1:
self.chartdata = array([[data]])
# If the input data is a 2d matrix, then it describes a bar
# chart with groups. The matrix being an array of groups of
# bars.
if dim == 2:
self.chartdata = transpose([data], axes=(2,0,1))
# If the input data is a 3d matrix, then it describes an array
# of groups of bars with each bar being an array of stacked
# values.
if dim == 3:
self.chartdata = transpose(data, axes=(1,2,0))
def get_data(self):
return self.inputdata
data = property(get_data, set_data)
def set_err(self, err):
if err is None:
self.inputerr = None
self.charterr = None
return
err = array(err)
dim = len(shape(err))
if dim not in (1, 2, 3):
raise AttributeError, "Input err must be a 1, 2, or 3d matrix"
self.inputerr = err
if dim == 1:
self.charterr = array([[err]])
if dim == 2:
self.charterr = transpose([err], axes=(2,0,1))
if dim == 3:
self.charterr = transpose(err, axes=(1,2,0))
def get_err(self):
return self.inputerr
err = property(get_err, set_err)
# Graph the chart data.
# Input is a 3d matrix that describes a plot that has multiple
# groups, multiple bars in each group, and multiple values stacked
# in each bar. The underlying bar() function expects a sequence of
# bars in the same stack location and same group location, so the
# organization of the matrix is that the inner most sequence
# represents one of these bar groups, then those are grouped
# together to make one full stack of bars in each group, and then
# the outer most layer describes the groups. Here is an example
# data set and how it gets plotted as a result.
#
# e.g. data = [[[10,11,12], [13,14,15], [16,17,18], [19,20,21]],
# [[22,23,24], [25,26,27], [28,29,30], [31,32,33]]]
#
# will plot like this:
#
# 19 31 20 32 21 33
# 16 28 17 29 18 30
# 13 25 14 26 15 27
# 10 22 11 23 12 24
#
# Because this arrangement is rather conterintuitive, the rearrange
# function takes various matricies and arranges them to fit this
# profile.
#
# This code deals with one of the dimensions in the matrix being
# one wide.
#
def graph(self):
if self.chartdata is None:
raise AttributeError, "Data not set for bar chart!"
dim = len(shape(self.inputdata))
cshape = shape(self.chartdata)
if self.charterr is not None and shape(self.charterr) != cshape:
raise AttributeError, 'Dimensions of error and data do not match'
if dim == 1:
colors = self.gen_colors(cshape[2])
colors = [ [ colors ] * cshape[1] ] * cshape[0]
if dim == 2:
colors = self.gen_colors(cshape[0])
colors = [ [ [ c ] * cshape[2] ] * cshape[1] for c in colors ]
if dim == 3:
colors = self.gen_colors(cshape[1])
colors = [ [ [ c ] * cshape[2] for c in colors ] ] * cshape[0]
colors = array(colors)
self.figure = pylab.figure(figsize=self.chart_size)
outer_axes = None
inner_axes = None
if self.xsubticks is not None:
color = self.figure.get_facecolor()
self.metaaxes = self.figure.add_axes(self.figure_size,
axisbg=color, frameon=False)
for tick in self.metaaxes.xaxis.majorTicks:
tick.tick1On = False
tick.tick2On = False
self.metaaxes.set_yticklabels([])
self.metaaxes.set_yticks([])
size = [0] * 4
size[0] = self.figure_size[0]
size[1] = self.figure_size[1] + .12
size[2] = self.figure_size[2]
size[3] = self.figure_size[3] - .12
self.axes = self.figure.add_axes(size)
outer_axes = self.metaaxes
inner_axes = self.axes
else:
self.axes = self.figure.add_axes(self.figure_size)
outer_axes = self.axes
inner_axes = self.axes
bars_in_group = len(self.chartdata)
width = 1.0 / ( bars_in_group + 1)
center = width / 2
bars = []
for i,stackdata in enumerate(self.chartdata):
bottom = array([0.0] * len(stackdata[0]), Float)
stack = []
for j,bardata in enumerate(stackdata):
bardata = array(bardata)
ind = arange(len(bardata)) + i * width + center
yerr = None
if self.charterr is not None:
yerr = self.charterr[i][j]
bar = self.axes.bar(ind, bardata, width, bottom=bottom,
color=colors[i][j], yerr=yerr)
if self.xsubticks is not None:
self.metaaxes.bar(ind, [0] * len(bardata), width)
stack.append(bar)
bottom += bardata
bars.append(stack)
if self.xlabel is not None:
outer_axes.set_xlabel(self.xlabel)
if self.ylabel is not None:
inner_axes.set_ylabel(self.ylabel)
if self.yticks is not None:
ymin, ymax = self.axes.get_ylim()
nticks = float(len(self.yticks))
ticks = arange(nticks) / (nticks - 1) * (ymax - ymin) + ymin
inner_axes.set_yticks(ticks)
inner_axes.set_yticklabels(self.yticks)
elif self.ylim is not None:
inner_axes.set_ylim(self.ylim)
if self.xticks is not None:
outer_axes.set_xticks(arange(cshape[2]) + .5)
outer_axes.set_xticklabels(self.xticks)
if self.xsubticks is not None:
numticks = (cshape[0] + 1) * cshape[2]
inner_axes.set_xticks(arange(numticks) * width + 2 * center)
xsubticks = list(self.xsubticks) + [ '' ]
inner_axes.set_xticklabels(xsubticks * cshape[2], fontsize=7,
rotation=30)
if self.legend is not None:
if dim == 1:
lbars = bars[0][0]
if dim == 2:
lbars = [ bars[i][0][0] for i in xrange(len(bars))]
if dim == 3:
number = len(bars[0])
lbars = [ bars[0][number - j - 1][0] for j in xrange(number)]
if self.fig_legend:
self.figure.legend(lbars, self.legend, self.legend_loc,
prop=FontProperties(size=self.legend_size))
else:
self.axes.legend(lbars, self.legend, self.legend_loc,
prop=FontProperties(size=self.legend_size))
if self.title is not None:
self.axes.set_title(self.title)
def savefig(self, name):
self.figure.savefig(name)
def savecsv(self, name):
f = file(name, 'w')
data = array(self.inputdata)
dim = len(data.shape)
if dim == 1:
#if self.xlabel:
# f.write(', '.join(list(self.xlabel)) + '\n')
f.write(', '.join([ '%f' % val for val in data]) + '\n')
if dim == 2:
#if self.xlabel:
# f.write(', '.join([''] + list(self.xlabel)) + '\n')
for i,row in enumerate(data):
ylabel = []
#if self.ylabel:
# ylabel = [ self.ylabel[i] ]
f.write(', '.join(ylabel + [ '%f' % v for v in row]) + '\n')
if dim == 3:
f.write("don't do 3D csv files\n")
pass
f.close()
if __name__ == '__main__':
from random import randrange
import random, sys
dim = 3
number = 5
args = sys.argv[1:]
if len(args) > 3:
sys.exit("invalid number of arguments")
elif len(args) > 0:
myshape = [ int(x) for x in args ]
else:
myshape = [ 3, 4, 8 ]
# generate a data matrix of the given shape
size = reduce(lambda x,y: x*y, myshape)
#data = [ random.randrange(size - i) + 10 for i in xrange(size) ]
data = [ float(i)/100.0 for i in xrange(size) ]
data = reshape(data, myshape)
# setup some test bar charts
if True:
chart1 = BarChart()
chart1.data = data
chart1.xlabel = 'Benchmark'
chart1.ylabel = 'Bandwidth (GBps)'
chart1.legend = [ 'x%d' % x for x in xrange(myshape[-1]) ]
chart1.xticks = [ 'xtick%d' % x for x in xrange(myshape[0]) ]
chart1.title = 'this is the title'
if len(myshape) > 2:
chart1.xsubticks = [ '%d' % x for x in xrange(myshape[1]) ]
chart1.graph()
chart1.savefig('/tmp/test1.png')
chart1.savefig('/tmp/test1.ps')
chart1.savefig('/tmp/test1.eps')
chart1.savecsv('/tmp/test1.csv')
if False:
chart2 = BarChart()
chart2.data = data
chart2.colormap = 'gray'
chart2.graph()
chart2.savefig('/tmp/test2.png')
chart2.savefig('/tmp/test2.ps')
# pylab.show()
| bsd-3-clause | 30,879,693,202,814,600 | 35.57478 | 78 | 0.572563 | false |
yjxtogo/horizon | openstack_dashboard/dashboards/project/stacks/urls.py | 56 | 1765 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.project.stacks import views
urlpatterns = patterns(
'',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^select_template$',
views.SelectTemplateView.as_view(),
name='select_template'),
url(r'^launch$', views.CreateStackView.as_view(), name='launch'),
url(r'^preview_template$',
views.PreviewTemplateView.as_view(), name='preview_template'),
url(r'^preview$', views.PreviewStackView.as_view(), name='preview'),
url(r'^preview_details$',
views.PreviewStackDetailsView.as_view(), name='preview_details'),
url(r'^stack/(?P<stack_id>[^/]+)/$',
views.DetailView.as_view(), name='detail'),
url(r'^(?P<stack_id>[^/]+)/change_template$',
views.ChangeTemplateView.as_view(), name='change_template'),
url(r'^(?P<stack_id>[^/]+)/edit_stack$',
views.EditStackView.as_view(), name='edit_stack'),
url(r'^stack/(?P<stack_id>[^/]+)/(?P<resource_name>[^/]+)/$',
views.ResourceView.as_view(), name='resource'),
url(r'^get_d3_data/(?P<stack_id>[^/]+)/$',
views.JSONView.as_view(), name='d3_data'),
)
| apache-2.0 | 4,274,024,333,208,426,500 | 43.125 | 75 | 0.665722 | false |
splav/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/tests/browsers/test_sauce.py | 11 | 6268 | import sys
import mock
import pytest
from os.path import join, dirname
sys.path.insert(0, join(dirname(__file__), "..", "..", ".."))
sauce = pytest.importorskip("wptrunner.browsers.sauce")
from wptserve.config import ConfigBuilder
def test_sauceconnect_success():
with mock.patch.object(sauce.SauceConnect, "upload_prerun_exec"),\
mock.patch.object(sauce.subprocess, "Popen") as Popen,\
mock.patch.object(sauce.os.path, "exists") as exists:
# Act as if it's still running
Popen.return_value.poll.return_value = None
Popen.return_value.returncode = None
# Act as if file created
exists.return_value = True
sauce_connect = sauce.SauceConnect(
sauce_user="aaa",
sauce_key="bbb",
sauce_tunnel_id="ccc",
sauce_connect_binary="ddd",
sauce_connect_args=[])
with ConfigBuilder(browser_host="example.net") as env_config:
sauce_connect(None, env_config)
with sauce_connect:
pass
@pytest.mark.parametrize("readyfile,returncode", [
(True, 0),
(True, 1),
(True, 2),
(False, 0),
(False, 1),
(False, 2),
])
def test_sauceconnect_failure_exit(readyfile, returncode):
with mock.patch.object(sauce.SauceConnect, "upload_prerun_exec"),\
mock.patch.object(sauce.subprocess, "Popen") as Popen,\
mock.patch.object(sauce.os.path, "exists") as exists,\
mock.patch.object(sauce.time, "sleep") as sleep:
Popen.return_value.poll.return_value = returncode
Popen.return_value.returncode = returncode
exists.return_value = readyfile
sauce_connect = sauce.SauceConnect(
sauce_user="aaa",
sauce_key="bbb",
sauce_tunnel_id="ccc",
sauce_connect_binary="ddd",
sauce_connect_args=[])
with ConfigBuilder(browser_host="example.net") as env_config:
sauce_connect(None, env_config)
with pytest.raises(sauce.SauceException):
with sauce_connect:
pass
# Given we appear to exit immediately with these mocks, sleep shouldn't be called
sleep.assert_not_called()
def test_sauceconnect_cleanup():
"""Ensure that execution pauses when the process is closed while exiting
the context manager. This allow Sauce Connect to close any active
tunnels."""
with mock.patch.object(sauce.SauceConnect, "upload_prerun_exec"),\
mock.patch.object(sauce.subprocess, "Popen") as Popen,\
mock.patch.object(sauce.os.path, "exists") as exists,\
mock.patch.object(sauce.time, "sleep") as sleep:
Popen.return_value.poll.return_value = True
Popen.return_value.returncode = None
exists.return_value = True
sauce_connect = sauce.SauceConnect(
sauce_user="aaa",
sauce_key="bbb",
sauce_tunnel_id="ccc",
sauce_connect_binary="ddd",
sauce_connect_args=[])
with ConfigBuilder(browser_host="example.net") as env_config:
sauce_connect(None, env_config)
with sauce_connect:
Popen.return_value.poll.return_value = None
sleep.assert_not_called()
sleep.assert_called()
def test_sauceconnect_failure_never_ready():
with mock.patch.object(sauce.SauceConnect, "upload_prerun_exec"),\
mock.patch.object(sauce.subprocess, "Popen") as Popen,\
mock.patch.object(sauce.os.path, "exists") as exists,\
mock.patch.object(sauce.time, "sleep") as sleep:
Popen.return_value.poll.return_value = None
Popen.return_value.returncode = None
exists.return_value = False
sauce_connect = sauce.SauceConnect(
sauce_user="aaa",
sauce_key="bbb",
sauce_tunnel_id="ccc",
sauce_connect_binary="ddd",
sauce_connect_args=[])
with ConfigBuilder(browser_host="example.net") as env_config:
sauce_connect(None, env_config)
with pytest.raises(sauce.SauceException):
with sauce_connect:
pass
# We should sleep while waiting for it to create the readyfile
sleep.assert_called()
# Check we actually kill it after termination fails
Popen.return_value.terminate.assert_called()
Popen.return_value.kill.assert_called()
def test_sauceconnect_tunnel_domains():
with mock.patch.object(sauce.SauceConnect, "upload_prerun_exec"),\
mock.patch.object(sauce.subprocess, "Popen") as Popen,\
mock.patch.object(sauce.os.path, "exists") as exists:
Popen.return_value.poll.return_value = None
Popen.return_value.returncode = None
exists.return_value = True
sauce_connect = sauce.SauceConnect(
sauce_user="aaa",
sauce_key="bbb",
sauce_tunnel_id="ccc",
sauce_connect_binary="ddd",
sauce_connect_args=[])
with ConfigBuilder(browser_host="example.net",
alternate_hosts={"alt": "example.org"},
subdomains={"a", "b"},
not_subdomains={"x", "y"}) as env_config:
sauce_connect(None, env_config)
with sauce_connect:
Popen.assert_called_once()
args, kwargs = Popen.call_args
cmd = args[0]
assert "--tunnel-domains" in cmd
i = cmd.index("--tunnel-domains")
rest = cmd[i+1:]
assert len(rest) >= 1
if len(rest) > 1:
assert rest[1].startswith("-"), "--tunnel-domains takes a comma separated list (not a space separated list)"
assert set(rest[0].split(",")) == {'example.net',
'a.example.net',
'b.example.net',
'example.org',
'a.example.org',
'b.example.org'}
| mpl-2.0 | -4,200,923,071,959,524,000 | 37.219512 | 128 | 0.564295 | false |
HPENetworking/HPEIMCUtils | PythonUtilities/Gather_IMC_Data/Gather_System_Device_Categories/gather_system_device_categories.py | 1 | 1524 |
#!/usr/bin/env python3
# author: @netmanchris
"""
Copyright 2016 Hewlett Packard Enterprise Development LP.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
""" This file will take the GET the contents of the HPE IMC Network Assets module and dump them into a CSV file called
all_assets.csv where each line of the CSV file represents one physical or logical asset as discovered by the HPE IMC
platform.
This library uses the pyhpeimc python wrapper around the IMC RESTful API to automatically push the new performance tasks
with minimal effort on the part of the user."""
import csv
from pyhpeimc.auth import *
from pyhpeimc.plat.device import *
auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
all_device_categories = get_system_category(auth.creds, auth.url)
keys = all_device_categories[0].keys()
with open ('all_device_categories.csv', 'w') as file:
dict_writer = csv.DictWriter(file, keys)
dict_writer.writeheader()
dict_writer.writerows(all_device_categories)
| apache-2.0 | 1,096,317,630,269,541,600 | 29.48 | 121 | 0.744751 | false |
mhostetter/gnuradio | gr-audio/examples/python/noise.py | 58 | 1968 | #!/usr/bin/env python
#
# Copyright 2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import audio
from gnuradio import digital
from gnuradio.eng_option import eng_option
from optparse import OptionParser
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
parser = OptionParser(option_class=eng_option)
parser.add_option("-O", "--audio-output", type="string", default="",
help="pcm output device name. E.g., hw:0,0 or /dev/dsp")
parser.add_option("-r", "--sample-rate", type="eng_float", default=48000,
help="set sample rate to RATE (48000)")
(options, args) = parser.parse_args ()
if len(args) != 0:
parser.print_help()
raise SystemExit, 1
sample_rate = int(options.sample_rate)
ampl = 0.1
src = digital.glfsr_source_b(32) # Pseudorandom noise source
b2f = digital.chunks_to_symbols_bf([ampl, -ampl], 1)
dst = audio.sink(sample_rate, options.audio_output)
self.connect(src, b2f, dst)
if __name__ == '__main__':
try:
my_top_block().run()
except KeyboardInterrupt:
pass
| gpl-3.0 | 2,115,864,993,229,418,200 | 34.142857 | 83 | 0.655488 | false |
trezorg/django | tests/regressiontests/select_related_regress/models.py | 92 | 2390 | from django.db import models
class Building(models.Model):
name = models.CharField(max_length=10)
def __unicode__(self):
return u"Building: %s" % self.name
class Device(models.Model):
building = models.ForeignKey('Building')
name = models.CharField(max_length=10)
def __unicode__(self):
return u"device '%s' in building %s" % (self.name, self.building)
class Port(models.Model):
device = models.ForeignKey('Device')
port_number = models.CharField(max_length=10)
def __unicode__(self):
return u"%s/%s" % (self.device.name, self.port_number)
class Connection(models.Model):
start = models.ForeignKey(Port, related_name='connection_start',
unique=True)
end = models.ForeignKey(Port, related_name='connection_end', unique=True)
def __unicode__(self):
return u"%s to %s" % (self.start, self.end)
# Another non-tree hierarchy that exercises code paths similar to the above
# example, but in a slightly different configuration.
class TUser(models.Model):
name = models.CharField(max_length=200)
class Person(models.Model):
user = models.ForeignKey(TUser, unique=True)
class Organizer(models.Model):
person = models.ForeignKey(Person)
class Student(models.Model):
person = models.ForeignKey(Person)
class Class(models.Model):
org = models.ForeignKey(Organizer)
class Enrollment(models.Model):
std = models.ForeignKey(Student)
cls = models.ForeignKey(Class)
# Models for testing bug #8036.
class Country(models.Model):
name = models.CharField(max_length=50)
class State(models.Model):
name = models.CharField(max_length=50)
country = models.ForeignKey(Country)
class ClientStatus(models.Model):
name = models.CharField(max_length=50)
class Client(models.Model):
name = models.CharField(max_length=50)
state = models.ForeignKey(State, null=True)
status = models.ForeignKey(ClientStatus)
class SpecialClient(Client):
value = models.IntegerField()
# Some model inheritance exercises
class Parent(models.Model):
name = models.CharField(max_length=10)
def __unicode__(self):
return self.name
class Child(Parent):
value = models.IntegerField()
class Item(models.Model):
name = models.CharField(max_length=10)
child = models.ForeignKey(Child, null=True)
def __unicode__(self):
return self.name
| bsd-3-clause | 2,913,020,079,177,469,400 | 26.790698 | 77 | 0.696234 | false |
synaptek/libchromiumcontent | tools/generate_filenames_gypi.py | 3 | 2342 | #!/usr/bin/env python
import glob
import os
import sys
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
SHARED_LIBRARY_SUFFIX = {
'darwin': 'dylib',
'linux': 'so',
'win32': 'dll',
}[TARGET_PLATFORM]
STATIC_LIBRARY_SUFFIX = {
'darwin': 'a',
'linux': 'a',
'win32': 'lib',
}[TARGET_PLATFORM]
EXCLUDE_SHARED_LIBRARIES = {
'darwin': [
],
'linux': [
],
'win32': [
'd3dcompiler_47.dll',
'libEGL.dll',
'libGLESv2.dll',
],
}[TARGET_PLATFORM]
EXCLUDE_STATIC_LIBRARIES = {
'darwin': [
'libv8_nosnapshot.a',
],
'linux': [
'libprotobuf_full_do_not_use.a',
'libgenperf_libs.a',
'libv8_nosnapshot.a',
],
'win32': [
'libEGL.dll.lib',
'libGLESv2.dll.lib',
],
}[TARGET_PLATFORM]
GYPI_TEMPLATE = """\
{
'variables': {
'libchromiumcontent_root_dir': %(src)s,
'libchromiumcontent_shared_libraries': %(shared_libraries)s,
'libchromiumcontent_shared_v8_libraries': %(shared_v8_libraries)s,
'libchromiumcontent_static_libraries': %(static_libraries)s,
'libchromiumcontent_static_v8_libraries': %(static_v8_libraries)s,
},
}
"""
def main(target_file, shared_src, static_src):
(shared_libraries, shared_v8_libraries) = searh_files(
shared_src, SHARED_LIBRARY_SUFFIX, EXCLUDE_SHARED_LIBRARIES)
(static_libraries, static_v8_libraries) = searh_files(
static_src, STATIC_LIBRARY_SUFFIX, EXCLUDE_STATIC_LIBRARIES)
content = GYPI_TEMPLATE % {
'src': repr(os.path.abspath(os.path.dirname(target_file))),
'shared_libraries': shared_libraries,
'shared_v8_libraries': shared_v8_libraries,
'static_libraries': static_libraries,
'static_v8_libraries': static_v8_libraries,
}
with open(target_file, 'wb+') as f:
f.write(content)
def searh_files(src, suffix, exclude):
files = glob.glob(os.path.join(src, '*.' + suffix))
files = [f for f in files if os.path.basename(f) not in exclude]
return ([os.path.abspath(f) for f in files if not is_v8_library(f)],
[os.path.abspath(f) for f in files if is_v8_library(f)])
def is_v8_library(p):
return (os.path.basename(p).startswith(('v8', 'libv8')) or
os.path.basename(p).startswith(('icu', 'libicu')))
if __name__ == '__main__':
sys.exit(main(sys.argv[1], sys.argv[2], sys.argv[3]))
| mit | -3,437,688,668,875,663,400 | 23.914894 | 70 | 0.636208 | false |
boryszef/moltools-python | tests/test_trajectory_xyz.py | 1 | 9849 | import unittest
import tempfile
import random
import os
import stat
import numpy
import mdarray as mt
# Return random string of spaces and tabs
def rndb():
blanks = [' ', '\t']
l = [ random.choice(blanks) for i in range(random.randint(0, 5)) ]
random.shuffle(l)
return "".join(l)
atomicMasses = {
'H':(1,1.008), 'C':(6,12.011), 'N':(7,14.007), 'O':(8,15.999),
'P':(15,30.973762), 'S':(16,32.06), 'Cl':(17,35.45), 'Na':(11,22.98976928),
'Cu':(29,63.546), 'Fe':(26,55.845) }
characters = "".join([chr(x) for x in range(ord('A'), ord('z')+1)])
class TestTrajectoryXYZ(unittest.TestCase):
def setUp(self):
# Create tempdir for XYZ files
self.tmpDir = tempfile.mkdtemp()
# Keep for comparison
self.data = []
# Write ten, very messy xyz files
self.nFiles = 10
for i in range(self.nFiles):
structure = {}
nAtoms = random.randint(1, 20)
structure['nAtoms'] = nAtoms
nFrames = random.randint(1, 10)
structure['nFrames'] = nFrames
xyz = open("%s/%d.xyz" % (self.tmpDir, i), "w")
asym = list(atomicMasses.keys())
symbols = [random.choice(asym) for x in range(nAtoms)]
structure['symbols'] = symbols[:]
structure['coordinates'] = []
structure['comments'] = []
for f in range(nFrames):
xyz.write("%s%d%s\n" % (rndb(), nAtoms, rndb()))
comLen = random.randint(0, 100)
comment = "".join([random.choice(characters) for x in range(comLen)])
structure['comments'].append(comment[:])
xyz.write("%s\n" % comment)
crd = numpy.random.uniform(-100, 100, (nAtoms, 3))
structure['coordinates'].append(crd.copy())
for a in range(nAtoms):
xyz.write("%s%s%s %.6f%s %.6f%s %.6f%s\n" % (rndb(),
symbols[a], rndb(), crd[a,0], rndb(), crd[a,1], rndb(),
crd[a,2], rndb()))
xyz.close()
self.data.append(structure)
# Use various units
nAtoms = 10
units = { "angs" : 1, "bohr" : 0.529177209, "nm" : 10 }
self.units_crd = numpy.random.uniform(-10, 10, (nAtoms, 3))
for u,f in units.items():
xyz = open("%s/%s.xyz" % (self.tmpDir, u), "w")
xyz.write("%d\n\n" % nAtoms)
for a in range(nAtoms):
xyz.write("He %.7f %.7f %.7f\n" % tuple(self.units_crd[a,:]/f))
xyz.close()
# Write file with extra data
nAtoms = 10
crd = numpy.random.uniform(-10, 10, (nAtoms, 4))
self.extra_data = crd[:,3]
xyz = open("%s/extra.xyz" % self.tmpDir, "w")
xyz.write("%d\n\n" % nAtoms)
for a in range(nAtoms):
xyz.write("Ar %.3f %.3f %.3f %.6f\n" % tuple(crd[a,:]))
xyz.close()
def tearDown(self):
# Remove files
for f in os.listdir(self.tmpDir):
if not f.startswith("."): os.remove(self.tmpDir+"/"+f)
# Remove directory
os.rmdir(self.tmpDir)
def test_initResult(self):
for i in range(self.nFiles):
absolute = "%s/%d.xyz" % (self.tmpDir, i)
traj = mt.Trajectory(absolute)
self.assertEqual(traj.fileName, absolute)
self.assertEqual(traj.nAtoms, self.data[i]['nAtoms'])
symbols = self.data[i]['symbols']
for a in range(self.data[i]['nAtoms']):
self.assertEqual(traj.symbols[a], symbols[a])
m = atomicMasses[symbols[a]]
self.assertAlmostEqual(traj.masses[a], m[1])
self.assertEqual(traj.aNumbers[a], m[0])
def test_readXYZ(self):
for i in range(self.nFiles):
absolute = "%s/%d.xyz" % (self.tmpDir, i)
traj = mt.Trajectory(absolute)
frameNo = 0
frame = traj.read()
while frame:
diff = frame['coordinates'] - self.data[i]['coordinates'][frameNo]
maxDiff = numpy.max(numpy.abs(diff))
self.assertTrue(maxDiff <= 1e-6)
comment = frame['comment']
self.assertEqual(comment, self.data[i]['comments'][frameNo])
frameNo += 1
frame = traj.read()
self.assertEqual(frameNo, len(self.data[i]['coordinates']))
def test_Units(self):
for u in ["angs", "bohr", "nm"]:
absolute = "%s/%s.xyz" % (self.tmpDir, u)
traj = mt.Trajectory(absolute, units=u)
frame = traj.read()
diff = frame['coordinates'] - self.units_crd
maxDiff = numpy.max(numpy.abs(diff))
self.assertTrue(maxDiff <= 1e-6)
def test_initExceptions(self):
self.assertRaises(TypeError, mt.Trajectory)
self.assertRaises(FileNotFoundError, mt.Trajectory, 'nonexistent.xyz')
denied = self.tmpDir+"/denied.xyz"
open(denied, "w").close()
os.chmod(denied, 0)
self.assertRaises(PermissionError, mt.Trajectory, denied)
os.chmod(denied, stat.S_IRWXU)
xyz = self.tmpDir+"/angs.xyz"
# Wrong format
self.assertRaises(ValueError, mt.Trajectory, xyz, format='fmt')
# Wrong mode
self.assertRaises(ValueError, mt.Trajectory, xyz, 'mode')
self.assertRaises(ValueError, mt.Trajectory, xyz, mode='mode')
# Wrong units
self.assertRaises(ValueError, mt.Trajectory, xyz, units='units')
# Symbols in 'read' mode
self.assertRaises(ValueError, mt.Trajectory, xyz, 'r', [])
self.assertRaises(ValueError, mt.Trajectory, xyz, 'r', symbols=[])
# 'append' mode but no symbols
self.assertRaises(ValueError, mt.Trajectory, xyz, 'a')
self.assertRaises(ValueError, mt.Trajectory, xyz, mode='a')
empty = self.tmpDir+"/empty"
open(empty, "w").close()
# Empty file and no format given
self.assertRaises(OSError, mt.Trajectory, empty)
nonempty = self.tmpDir+"/nonempty"
f = open(nonempty, "w")
f.write("x\n")
f.close()
# Guessing format fails
self.assertRaises(RuntimeError, mt.Trajectory, nonempty)
self.assertRaises(RuntimeError, mt.Trajectory, nonempty, 'a', [])
# Writing to an existing file
self.assertRaises(FileExistsError, mt.Trajectory, nonempty, 'w', [], format='XYZ')
# Wrong format
self.assertRaises(ValueError, mt.Trajectory, nonempty, 'a', [], format='fmt')
# Wrong units
self.assertRaises(ValueError, mt.Trajectory, nonempty, 'a', [], format='XYZ', units='units')
def test_WriteXYZ(self):
sym = ['C']
comment = 'blah'
crd = [1.1111111111,2,3]
for dtype in [ numpy.float16, numpy.float32,
numpy.float64, numpy.float128 ]:
x = numpy.array([crd], dtype=dtype)
xyz = self.tmpDir+"/out.xyz"
traj = mt.Trajectory(xyz, 'w', sym, format='XYZ', units='angs')
traj.write(x, comment=comment)
del traj
traj = mt.Trajectory(xyz)
self.assertEqual(traj.nAtoms, 1)
self.assertEqual(traj.symbols, sym)
frame = traj.read()
self.assertEqual(frame['comment'], comment)
diff = crd - frame['coordinates'][0,:]
maxDiff = numpy.max(numpy.abs(diff))
# 1e-8 is the precision of write_frame_to_xyz due to the format % 12.8f
epsilon = max(1e-8, numpy.finfo(dtype).resolution)
self.assertTrue(maxDiff <= epsilon)
# Try (and fail) to read more frames
frame = traj.read()
self.assertIsNone(frame)
os.remove(xyz)
def test_writeExceptions(self):
sym = ['C']
comment = 'blah'
crd = [1,2,3]
xyz = self.tmpDir+"/out.xyz"
# No symbols given
self.assertRaises(ValueError, mt.Trajectory, xyz, 'w', format='XYZ')
self.assertRaises(ValueError, mt.Trajectory, xyz, mode='w', format='XYZ')
traj = mt.Trajectory(xyz, 'w', sym, format='XYZ')
# Wrong shape of the matrix
x = numpy.array([], dtype=numpy.float)
self.assertRaises(RuntimeError, traj.write, x, comment=comment)
x = numpy.array([[[]]], dtype=numpy.float)
self.assertRaises(RuntimeError, traj.write, x, comment=comment)
# Wrong dimensions of the matrix
x = numpy.array([crd, crd], dtype=numpy.float)
self.assertRaises(RuntimeError, traj.write, x, comment=comment)
# Try to read in write mode
self.assertRaises(RuntimeError, traj.read)
del traj
def test_writeMultiple(self):
nFrames = 10
sym = ['C', 'O']
comment = 'blah'
crd = [[1,2,3], [4,5,6]]
x = numpy.array(crd, dtype=numpy.float)
xyz = self.tmpDir+"/out.xyz"
traj = mt.Trajectory(xyz, 'w', sym, format='XYZ', units='angs')
for i in range(nFrames):
traj.write(x, comment=str(i))
del traj
traj = mt.Trajectory(xyz)
count = 0
frame = traj.read()
while frame:
self.assertEqual(frame['comment'], str(count))
count += 1
frame = traj.read()
self.assertEqual(traj.lastFrame, nFrames-1)
def test_readExtraData(self):
traj = mt.Trajectory(self.tmpDir+"/extra.xyz")
frame = traj.read()
self.assertTrue('extra' in frame)
diff = frame['extra'] - self.extra_data
maxDiff = numpy.max(numpy.abs(diff))
epsilon = max(1e-6, numpy.finfo(numpy.float).resolution)
self.assertTrue(maxDiff <= epsilon)
| gpl-3.0 | 5,579,460,720,449,596,000 | 35.343173 | 100 | 0.547365 | false |
inveniosoftware/invenio-upgrader | invenio_upgrader/upgrades/invenio_upgrader_2015_09_23_legacy_removal.py | 3 | 5314 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Remove legacy upgrade recipes."""
import warnings
from invenio_db import db
from sqlalchemy.sql import text
from invenio_upgrader import UpgradeBase, op
class LegacyRemoval(UpgradeBase):
"""Remove legacy upgrade recipes."""
_depends_on = []
legacy_upgrades = [
'invenio_2012_10_29_idxINDEX_new_indexer_column',
'invenio_2012_10_31_WebAuthorProfile_bibformat_dependency_update',
'invenio_2012_10_31_tablesorter_location',
'invenio_2012_11_01_lower_user_email',
'invenio_2012_11_04_circulation_and_linkback_updates',
'invenio_2012_11_07_xtrjob_last_recid',
'invenio_2012_11_15_bibdocfile_model',
'invenio_2012_11_15_hstRECORD_marcxml_longblob',
'invenio_2012_11_21_aiduserinputlog_userid_check',
'invenio_2012_11_27_new_selfcite_tables',
'invenio_2012_12_05_oaiHARVEST_arguments_blob',
'invenio_2012_12_06_new_citation_dict_table',
'invenio_2012_12_11_new_citation_errors_table',
'invenio_2013_01_08_new_goto_table',
'invenio_2013_01_12_bibrec_master_format',
'invenio_2013_02_01_oaiREPOSITORY_last_updated',
'invenio_2013_02_06_new_collectionboxname_table',
'invenio_2013_03_07_crcILLREQUEST_overdue_letter',
'invenio_2013_03_18_aidPERSONIDDATA_last_updated',
'invenio_2013_03_18_bibauthorid_search_engine_tables',
'invenio_2013_03_18_wapCACHE_object_value_longblob',
'invenio_2013_03_20_idxINDEX_synonym_kb',
'invenio_2013_03_20_new_self_citation_dict_table',
'invenio_2013_03_21_idxINDEX_stopwords',
'invenio_2013_03_25_idxINDEX_html_markup',
'invenio_2013_03_26_new_citation_log_table',
'invenio_2013_03_28_bibindex_bibrank_type_index',
'invenio_2013_03_28_idxINDEX_tokenizer',
'invenio_2013_03_29_idxINDEX_stopwords_update',
'invenio_2013_04_11_bibformat_2nd_pass',
'invenio_2013_04_30_new_plotextractor_websubmit_function',
'invenio_2013_06_11_rnkDOWNLOADS_file_format',
'invenio_2013_06_20_new_bibcheck_rules_table',
'invenio_2013_06_24_new_bibsched_status_table',
'invenio_2013_08_20_bibauthority_updates',
'invenio_2013_08_22_hstRECORD_affected_fields',
'invenio_2013_08_22_new_index_itemcount',
'invenio_2013_09_02_new_bibARXIVPDF',
'invenio_2013_09_10_new_param_websubmit_function',
'invenio_2013_09_13_new_bibEDITCACHE',
'invenio_2013_09_16_aidPERSONIDDATA_datablob',
'invenio_2013_09_25_virtual_indexes',
'invenio_2013_09_26_webauthorlist',
'invenio_2013_09_30_indexer_interface',
'invenio_2013_10_11_bibHOLDINGPEN_longblob',
'invenio_2013_10_18_crcLIBRARY_type',
'invenio_2013_10_18_new_index_filetype',
'invenio_2013_10_25_delete_recjson_cache',
'invenio_2013_10_25_new_param_websubmit_function',
'invenio_2013_11_12_new_param_websubmit_function',
'invenio_2013_12_04_seqSTORE_larger_value',
'invenio_2013_12_05_new_index_doi',
'invenio_2014_01_22_queue_table_virtual_index',
'invenio_2014_01_22_redis_sessions',
'invenio_2014_01_24_seqSTORE_larger_value',
'invenio_2014_03_13_new_index_filename',
'invenio_2014_06_02_oaiHARVEST_arguments_cfg_namechange',
'invenio_2014_08_12_format_code_varchar20',
'invenio_2014_08_13_tag_recjsonvalue',
'invenio_2014_08_31_next_collection_tree',
'invenio_2014_09_09_tag_recjsonvalue_not_null',
'invenio_2014_11_04_format_recjson',
'invenio_2015_01_13_hide_holdings',
'invenio_2015_03_03_tag_value',
'invenio_2015_04_15_collection_names_with_slashes',
'invenio_release_1_1_0',
'invenio_release_1_2_0',
'submit_2015_03_03_fix_models',
'submit_2015_04_30_fix_models_sbmFORMATEXTENSION_sbmGFILERESULT',
'invenio_2015_07_14_innodb',
]
def do_upgrade(self):
"""Implement your upgrades here."""
sql = text('delete from upgrade where upgrade = :upgrade')
for upgrade in self.legacy_upgrades:
db.engine.execute(sql, upgrade=upgrade)
def pre_upgrade(self):
"""Run pre-upgrade checks (optional)."""
sql = text('select 1 from upgrade where upgrade = :upgrade')
for upgrade in self.legacy_upgrades:
if not db.engine.execute(sql, upgrade=upgrade).fetchall():
warnings.warn("Upgrade '{}' was not applied.".format(upgrade))
| gpl-2.0 | 895,683,516,293,590,400 | 43.655462 | 78 | 0.668988 | false |
Naeka/vosae-app | www/core/views.py | 1 | 4175 | # -*- coding:Utf-8 -*-
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext, loader
from django.views.generic import TemplateView
from django.utils.formats import get_format
from django.conf import settings
from django.http import (
HttpResponseRedirect,
HttpResponseForbidden,
HttpResponseNotFound,
HttpResponseServerError,
Http404
)
import urllib2
from core.models import (
Tenant,
VosaeUser,
VosaeFile
)
def error_403(request):
t = loader.get_template('core/errors/403.html')
return HttpResponseForbidden(t.render(RequestContext(request)))
def error_404(request):
t = loader.get_template('core/errors/404.html')
return HttpResponseNotFound(t.render(RequestContext(request)))
def error_500(request):
t = loader.get_template('core/errors/500.html')
return HttpResponseServerError(t.render(RequestContext(request)))
def root(request):
if not request.user.is_authenticated():
return redirect('signin')
return HttpResponseRedirect(settings.WEB_ENDPOINT)
def download_file(request, tenant_slug, file_id, public_token=None, stream=False):
try:
request.tenant = Tenant.objects.get(slug=tenant_slug)
if not request.user.is_anonymous():
request.user.groups.get(name=tenant_slug)
request.vosae_user = VosaeUser.objects.get(tenant=request.tenant, email=request.user.email)
else:
# If anonymous user, public_token is required
assert public_token is not None
except:
raise Http404()
if request.vosae_user and not request.vosae_user.has_perm("see_vosaefile"):
return HttpResponseForbidden()
try:
kwargs = {'tenant': request.tenant, 'id': file_id}
if public_token:
kwargs.update(public_token=public_token)
vosae_file = VosaeFile.objects.get(**kwargs)
if not public_token:
# Since public token is provided, file is assumed as publicly accessible
# and authorization checks are useless.
for perm in vosae_file.permissions:
if not request.vosae_user.has_perm(perm):
return HttpResponseForbidden()
response_headers = {'response-content-disposition': 'inline; filename="{0}"'.format(str(urllib2.quote(vosae_file.name.encode('utf8'))))}
s3url = vosae_file.file.key.generate_url(20, response_headers=response_headers if stream else None)
return HttpResponseRedirect(s3url)
except:
raise Http404()
def spec(request):
# During specs tests we use local static files (even during Travis builds)
original_settings_staticfiles_storage = settings.STATICFILES_STORAGE
settings.STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# Force the auth user email during specs
request.user.email = "[email protected]"
formats = {
# Defaults
"DECIMAL_SEPARATOR": get_format("DECIMAL_SEPARATOR"),
"THOUSAND_SEPARATOR": get_format("THOUSAND_SEPARATOR"),
"NUMBER_GROUPING": get_format("NUMBER_GROUPING"),
# Custom
"MON_DECIMAL_POINT": get_format("MON_DECIMAL_POINT"),
"MON_THOUSANDS_SEP": get_format("MON_THOUSANDS_SEP"),
"N_CS_PRECEDES": get_format("N_CS_PRECEDES"),
"P_CS_PRECEDES": get_format("P_CS_PRECEDES"),
"N_SEP_BY_SPACE": get_format("N_SEP_BY_SPACE"),
"P_SEP_BY_SPACE": get_format("P_SEP_BY_SPACE"),
"N_SIGN_POSN": get_format("N_SIGN_POSN"),
"P_SIGN_POSN": get_format("P_SIGN_POSN"),
"NEGATIVE_SIGN": get_format("NEGATIVE_SIGN"),
"POSITIVE_SIGN": get_format("POSITIVE_SIGN"),
}
response = render_to_response("core/spec.html", {'formats': formats}, context_instance=RequestContext(request))
settings.STATICFILES_STORAGE = original_settings_staticfiles_storage
return response
class TextTemplateView(TemplateView):
def render_to_response(self, context, **response_kwargs):
response_kwargs['content_type'] = 'text/plain'
return super(TemplateView, self).render_to_response(context, **response_kwargs)
| agpl-3.0 | 1,459,856,867,453,657,900 | 34.381356 | 144 | 0.679042 | false |
olituks/sentinella | frontend/library/web2py/applications/admin/languages/pt-br.py | 17 | 18517 | # -*- coding: utf-8 -*-
{
'!langcode!': 'pt-br',
'!langname!': 'Português Brasileiro',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" é uma expressão opcional como "campo1=\'novo_valor\'". Não é permitido atualizar ou apagar resultados de um JOIN',
'%s %%{row} deleted': '%s registros apagados',
'%s %%{row} updated': '%s registros atualizados',
'%Y-%m-%d': '%d/%m/%Y',
'%Y-%m-%d %H:%M:%S': '%d/%m/%Y %H:%M:%S',
'(requires internet access, experimental)': '(requer acesso à internet, experimental)',
'(something like "it-it")': '(algo como "it-it")',
'@markmin\x01An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page',
'@markmin\x01Searching: **%s** %%{file}': 'Buscando: **%s** arquivos',
'A new version of web2py is available': 'Está disponível uma nova versão do web2py',
'A new version of web2py is available: %s': 'Está disponível uma nova versão do web2py: %s',
'About': 'sobre',
'About application': 'Sobre a aplicação',
'additional code for your application': 'código adicional para sua aplicação',
'Additional code for your application': 'Código adicional para sua aplicação',
'admin disabled because no admin password': ' admin desabilitado por falta de senha definida',
'admin disabled because not supported on google app engine': 'admin desabilitado porque não é suportado no GAE',
'admin disabled because unable to access password file': 'admin desabilitado porque não foi possível ler o arquivo de senha',
'Admin is disabled because insecure channel': 'Admin desabilitado pois o canal não é seguro',
'Admin is disabled because unsecure channel': 'Admin desabilitado pois o canal não é seguro',
'Admin language': 'Idioma do Admin',
'administrative interface': 'interface administrativa',
'Administrator Password:': 'Senha de administrador:',
'and rename it (required):': 'e renomeie (requerido):',
'and rename it:': ' e renomeie:',
'appadmin': 'appadmin',
'appadmin is disabled because insecure channel': 'admin desabilitado porque o canal não é seguro',
'application "%s" uninstalled': 'aplicação "%s" desinstalada',
'application compiled': 'aplicação compilada',
'application is compiled and cannot be designed': 'A aplicação está compilada e não pode ser modificada',
'Application name:': 'Nome da aplicação:',
'Are you sure you want to delete file "%s"?': 'Tem certeza que deseja apagar o arquivo "%s"?',
'Are you sure you want to delete plugin "%s"?': 'Tem certeza que deseja apagar o plugin "%s"?',
'Are you sure you want to delete this object?': 'Tem certeza que deseja apagar esse objeto?',
'Are you sure you want to uninstall application "%s"': 'Tem certeza que deseja desinstalar a aplicação "%s"?',
'Are you sure you want to uninstall application "%s"?': 'Tem certeza que deseja desinstalar a aplicação "%s"?',
'Are you sure you want to upgrade web2py now?': 'Tem certeza que deseja atualizar o web2py agora?',
'arguments': 'argumentos',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'ATENÇÃO: o login requer uma conexão segura (HTTPS) ou executar de localhost.',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'ATENÇÃO OS TESTES NÃO SÃO THREAD SAFE, NÃO EFETUE MÚLTIPLOS TESTES AO MESMO TEMPO.',
'ATTENTION: you cannot edit the running application!': 'ATENÇÃO: Não pode modificar a aplicação em execução!',
'Available databases and tables': 'Bancos de dados e tabelas disponíveis',
'back': 'voltar',
'Basics': 'Informações básicas',
'Begin': 'Iniciar',
'browse': 'navegar',
'cache': 'cache',
'cache, errors and sessions cleaned': 'cache, erros e sessões eliminadas',
'can be a git repo': 'pode ser um repositório git',
'Cannot be empty': 'Não pode ser vazio',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': 'Não é possível compilar: Existem erros em sua aplicação. Depure, corrija os errros e tente novamente',
'Cannot compile: there are errors in your app:': 'Não é possível compilar: Existem erros em sua aplicação',
'cannot create file': 'Não é possível criar o arquivo',
'cannot upload file "%(filename)s"': 'não é possível fazer upload do arquivo "%(filename)s"',
'Change admin password': 'mudar senha de administrador',
'Change Password': 'Mudar Senha',
'check all': 'marcar todos',
'Check for upgrades': 'Verificar se existem atualizações',
'Check to delete': 'Marque para apagar',
'Checking for upgrades...': 'Buscando atualizações...',
'Clean': 'Limpo',
'click here for online examples': 'clique para ver exemplos online',
'click here for the administrative interface': 'Clique aqui para acessar a interface administrativa',
'Click row to expand traceback': 'Clique na linha para expandir o traceback',
'click to check for upgrades': 'clique aqui para verificar se existem atualizações',
'click to open': 'clique para abrir',
'Client IP': 'IP do cliente',
'code': 'código',
'collapse/expand all': 'fechar/abrir todos',
'commit (mercurial)': 'commit (mercurial)',
'Compile': 'Compilar',
'compiled application removed': 'a aplicação compilada foi removida',
'Controllers': 'Controladores',
'controllers': 'controladores',
'Count': 'Contagem',
'Create': 'Criar',
'create file with filename:': 'criar um arquivo com o nome:',
'Create new application using the Wizard': 'Criar nova aplicação utilizando o Assistente',
'create new application:': 'nome da nova aplicação:',
'Create new simple application': 'Crie uma nova aplicação',
'created by': 'criado por',
'crontab': 'crontab',
'Current request': 'Requisição atual',
'Current response': 'Resposta atual',
'Current session': 'Sessão atual',
'currently running': 'Executando',
'currently saved or': 'Atualmente salvo ou',
'customize me!': 'Modifique-me!',
'data uploaded': 'Dados enviados',
'database': 'banco de dados',
'database %s select': 'Select no banco de dados %s',
'database administration': 'administração do banco de dados',
'Date and Time': 'Data e Hora',
'db': 'db',
'Debug': 'Debug',
'defines tables': 'define as tabelas',
'Delete': 'Apague',
'delete': 'apagar',
'delete all checked': 'apagar marcados',
'delete plugin': 'apagar plugin',
'Delete:': 'Apague:',
'Deploy': 'Publicar',
'Deploy on Google App Engine': 'Publicar no Google App Engine',
'Deploy to OpenShift': 'Publicar no OpenShift',
'Description': 'Descrição',
'design': 'projeto',
'DESIGN': 'Projeto',
'Design for': 'Projeto de',
'Detailed traceback description': 'Descrição detalhada do traceback',
'direction: ltr': 'direção: ltr',
'Disable': 'Desabilitar',
'done!': 'feito!',
'Download .w2p': 'Download .w2p',
'download layouts': 'download de layouts',
'download plugins': 'download de plugins',
'E-mail': 'E-mail',
'EDIT': 'EDITAR',
'Edit': 'Editar',
'edit all': 'editar todos',
'Edit application': 'Editar aplicação',
'edit controller': 'editar controlador',
'Edit current record': 'Editar o registro atual',
'Edit Profile': 'Editar Perfil',
'edit views:': 'editar visões:',
'Editing file': 'Editando arquivo',
'Editing file "%s"': 'Editando arquivo "%s"',
'Editing Language file': 'Editando arquivo de idioma',
'Enterprise Web Framework': 'Framework Web Corporativo',
'Error': 'Erro',
'Error logs for "%(app)s"': 'Logs de erro para "%(app)s"',
'Error snapshot': 'Momento do Erro',
'Error ticket': 'Tiquete de Erro',
'Errors': 'Erros',
'Exception instance attributes': 'Atributos de instância da Exception',
'export as csv file': 'exportar como arquivo CSV',
'exposes': 'expõe',
'extends': 'estende',
'failed to reload module': 'Falha ao recarregar o módulo',
'failed to reload module because:': 'falha ao recarregar o módulo porque:',
'File': 'Arquivo',
'file "%(filename)s" created': 'arquivo "%(filename)s" criado',
'file "%(filename)s" deleted': 'arquivo "%(filename)s" apagado',
'file "%(filename)s" uploaded': 'arquivo "%(filename)s" enviado',
'file "%(filename)s" was not deleted': 'arquivo "%(filename)s" não foi apagado',
'file "%s" of %s restored': 'arquivo "%s" de %s restaurado',
'file changed on disk': 'arquivo modificado no disco',
'file does not exist': 'arquivo não existe',
'file saved on %(time)s': 'arquivo salvo em %(time)s',
'file saved on %s': 'arquivo salvo em %s',
'filter': 'filtro',
'First name': 'Nome',
'Frames': 'Frames',
'Functions with no doctests will result in [passed] tests.': 'Funções sem doctests resultarão em testes [aceitos].',
'Generate': 'Gerar',
'go!': 'vai!',
'Group ID': 'ID do Grupo',
'Hello World': 'Olá Mundo',
'Help': 'Ajuda',
'htmledit': 'htmledit',
'If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code.\nA green title indicates that all tests (if defined) passed. In this case test results are not shown.': 'Se o relatório acima contém um número de ticket, isso indica uma falha no controlador em execução, antes de tentar executar os doctests. Isto acontece geralmente por erro de identação ou um erro fora do código da função.\nO título em verde indica que os testes (se definidos) passaram. Neste caso o resultado dos testes não são mostrados.',
'Import/Export': 'Importar/Exportar',
'includes': 'inclui',
'insert new': 'inserir novo',
'insert new %s': 'inserir novo %s',
'inspect attributes': 'inspeciona atributos',
'Install': 'instalar',
'Installed applications': 'Aplicações instaladas',
'internal error': 'erro interno',
'Internal State': 'Estado Interno',
'Invalid action': 'Ação inválida',
'Invalid email': 'E-mail inválido',
'invalid password': 'senha inválida',
'Invalid Query': 'Consulta inválida',
'invalid request': 'solicitação inválida',
'invalid ticket': 'ticket inválido',
'language file "%(filename)s" created/updated': 'arquivo de idioma "%(filename)s" criado/atualizado',
'Language files (static strings) updated': 'Arquivos de idioma (textos estáticos) atualizados',
'languages': 'idiomas',
'Languages': 'Idiomas',
'languages updated': 'idiomas atualizados',
'Last name': 'Sobrenome',
'Last saved on:': 'Salvo pela última vez em:',
'License for': 'Licença para',
'loading...': 'carregando...',
'locals': 'locals',
'Login': 'Entrar',
'login': 'início de sessão',
'Login to the Administrative Interface': 'Entrar na interface adminitrativa',
'Logout': 'finalizar sessão',
'Lost Password': 'Perdi a senha',
'Manage': 'Gerenciar',
'manage': 'gerenciar',
'merge': 'juntar',
'Models': 'Modelos',
'models': 'modelos',
'Modules': 'Módulos',
'modules': 'módulos',
'Name': 'Nome',
'new application "%s" created': 'nova aplicação "%s" criada',
'New Application Wizard': 'Assistente para novas aplicações ',
'New application wizard': 'Assistente para novas aplicações',
'new plugin installed': 'novo plugin instalado',
'New Record': 'Novo registro',
'new record inserted': 'novo registro inserido',
'New simple application': 'Nova aplicação básica',
'next 100 rows': 'próximos 100 registros',
'NO': 'NÃO',
'No databases in this application': 'Não existem bancos de dados nesta aplicação',
'no match': 'não encontrado',
'no package selected': 'nenhum pacote selecionado',
'Or Get from URL:': 'Ou baixa da URL:',
'or import from csv file': 'ou importar de um arquivo CSV',
'or provide app url:': 'ou forneça a url de uma aplicação:',
'or provide application url:': 'ou forneça a url de uma aplicação:',
'Origin': 'Origem',
'Original/Translation': 'Original/Tradução',
'Overwrite installed app': 'Sobrescrever aplicação instalada',
'Pack all': 'Criar pacote',
'Pack compiled': 'Criar pacote compilado',
'Pack custom': 'Customizar pacote',
'pack plugin': 'empacotar plugin',
'PAM authenticated user, cannot change password here': 'usuário autenticado por PAM não pode alterar a senha aqui',
'Password': 'Senha',
'password changed': 'senha alterada',
'Peeking at file': 'Visualizando arquivo',
'plugin "%(plugin)s" deleted': 'plugin "%(plugin)s" apagado',
'Plugin "%s" in application': 'Plugin "%s" na aplicação',
'plugins': 'plugins',
'Plugins': 'Plugins',
'Powered by': 'Este site utiliza',
'previous 100 rows': '100 registros anteriores',
'Query:': 'Consulta:',
'record': 'registro',
'record does not exist': 'o registro não existe',
'record id': 'id do registro',
'Record ID': 'ID do Registro',
'Register': 'Registrar-se',
'Registration key': 'Chave de registro',
'Reload routes': 'Recarregar routes',
'Remove compiled': 'Eliminar compilados',
'request': 'request',
'Resolve Conflict file': 'Arquivo de resolução de conflito',
'response': 'response',
'restart': 'reiniciar',
'restore': 'restaurar',
'revert': 'reverter',
'Role': 'Papel',
'Rows in table': 'Registros na tabela',
'Rows selected': 'Registros selecionados',
'Running on %s': 'Rodando em %s',
'save': 'salvar',
'Saved file hash:': 'Hash do arquivo salvo:',
'Select Files to Package': 'Selecione arquivos para empacotar',
'selected': 'selecionado(s)',
'session': 'session',
'session expired': 'sessão expirada',
'shell': 'Terminal',
'Site': 'Site',
'skip to generate': 'pular para a gerar a aplicação',
'some files could not be removed': 'alguns arquivos não puderam ser removidos',
'Start a new app': 'Inicie uma nova aplicação',
'Start wizard': 'Iniciar assistente',
'state': 'estado',
'static': 'estáticos',
'Static files': 'Arquivos estáticos',
'Step': 'Passo',
'Submit': 'Enviar',
'submit': 'enviar',
'Sure you want to delete this object?': 'Tem certeza que deseja apagar este objeto?',
'table': 'tabela',
'Table name': 'Nome da tabela',
'test': 'testar',
'Testing application': 'Testando a aplicação',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'A "consulta" é uma condição como "db.tabela.campo1==\'valor\'". Algo como "db.tabela1.campo1==db.tabela2.campo2" resulta em um JOIN SQL.',
'the application logic, each URL path is mapped in one exposed function in the controller': 'A lógica da aplicação, cada URL é mapeada para uma função exposta pelo controlador',
'The application logic, each URL path is mapped in one exposed function in the controller': 'A lógica da aplicação, cada URL é mapeada para uma função exposta pelo controlador',
'the data representation, define database tables and sets': 'A representação dos dados, define tabelas do banco de dados e conjuntos',
'The data representation, define database tables and sets': 'A representação dos dados, define tabelas do banco de dados e conjuntos',
'The presentations layer, views are also known as templates': 'A camada de apresentação, as visões também são chamadas de templates',
'the presentations layer, views are also known as templates': 'A camada de apresentação, as visões também são chamadas de templates',
'There are no controllers': 'Não existem controladores',
'There are no models': 'Não existem modelos',
'There are no modules': 'Não existem módulos',
'There are no plugins': 'Não existem plugins',
'There are no static files': 'Não existem arquicos estáticos',
'There are no translators, only default language is supported': 'Não há tradutores, somente a linguagem padrão é suportada',
'There are no views': 'Não existem visões',
'These files are served without processing, your images go here': 'Estes arquivos são servidos sem processamento, suas imagens ficam aqui',
'these files are served without processing, your images go here': 'Estes arquivos são servidos sem processamento, suas imagens ficam aqui',
'This is the %(filename)s template': 'Este é o template %(filename)s',
'Ticket': 'Ticket',
'Ticket ID': 'Número do Ticket',
'Timestamp': 'Momento de geração',
'TM': 'MR',
'to previous version.': 'para a versão anterior.',
'To create a plugin, name a file/folder plugin_[name]': 'Para criar um plugin, nomeie um arquivo/pasta como plugin_[nome]',
'Traceback': 'Traceback',
'translation strings for the application': 'textos traduzidos para a aplicação',
'Translation strings for the application': 'textos traduzidos para a aplicação',
'try': 'tente',
'try something like': 'tente algo como',
'Try the mobile interface': 'Experimente a interface para smartphones e tablets',
'Unable to check for upgrades': 'Não é possível checar as atualizações',
'unable to create application "%s"': 'não é possível criar a aplicação "%s"',
'unable to delete file "%(filename)s"': 'não é possível criar o arquivo "%(filename)s"',
'unable to delete file plugin "%(plugin)s"': 'não é possível criar o plugin "%(plugin)s"',
'Unable to download': 'Não é possível efetuar o download',
'Unable to download app': 'Não é possível baixar a aplicação',
'Unable to download app because:': 'Não é possível baixar a aplicação porque:',
'Unable to download because': 'Não é possível baixar porque',
'unable to parse csv file': 'não é possível analisar o arquivo CSV',
'unable to uninstall "%s"': 'não é possível desinstalar "%s"',
'unable to upgrade because "%s"': 'não é possível atualizar porque "%s"',
'uncheck all': 'desmarcar todos',
'Uninstall': 'Desinstalar',
'update': 'alterar',
'update all languages': 'alterar todos os idiomas',
'Update:': 'Alterar:',
'upgrade now to %s': 'Atualize agora para %s',
'upgrade web2py now': 'atualize o web2py agora',
'upload': 'upload',
'Upload a package:': 'Faça upload de um pacote:',
'Upload and install packed application': 'Faça upload e instale uma aplicação empacotada',
'upload application:': 'Fazer upload de uma aplicação:',
'Upload existing application': 'Faça upload de uma aplicação existente',
'upload file:': 'Enviar arquivo:',
'upload plugin file:': 'Enviar arquivo de plugin:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) para AND, (...)|(...) para OR, e ~(...) para NOT, para criar consultas mais complexas.',
'Use an url:': 'Use uma url:',
'User ID': 'ID do Usuário',
'variables': 'variáveis',
'Version': 'Versão',
'versioning': 'versionamento',
'Versioning': 'Versionamento',
'view': 'visão',
'Views': 'Visões',
'views': 'visões',
'Web Framework': 'Web Framework',
'web2py is up to date': 'web2py está atualizado',
'web2py Recent Tweets': 'Tweets Recentes de @web2py',
'web2py upgraded; please restart it': 'web2py atualizado; favor reiniciar',
'Welcome to web2py': 'Bem-vindo ao web2py',
'YES': 'SIM',
}
| lgpl-2.1 | 372,883,085,279,948,800 | 51.327586 | 666 | 0.719001 | false |
PyEatingContest/crispy-node | node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/xcode.py | 426 | 56534 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import filecmp
import gyp.common
import gyp.xcodeproj_file
import gyp.xcode_ninja
import errno
import os
import sys
import posixpath
import re
import shutil
import subprocess
import tempfile
# Project files generated by this module will use _intermediate_var as a
# custom Xcode setting whose value is a DerivedSources-like directory that's
# project-specific and configuration-specific. The normal choice,
# DERIVED_FILE_DIR, is target-specific, which is thought to be too restrictive
# as it is likely that multiple targets within a single project file will want
# to access the same set of generated files. The other option,
# PROJECT_DERIVED_FILE_DIR, is unsuitable because while it is project-specific,
# it is not configuration-specific. INTERMEDIATE_DIR is defined as
# $(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION).
_intermediate_var = 'INTERMEDIATE_DIR'
# SHARED_INTERMEDIATE_DIR is the same, except that it is shared among all
# targets that share the same BUILT_PRODUCTS_DIR.
_shared_intermediate_var = 'SHARED_INTERMEDIATE_DIR'
_library_search_paths_var = 'LIBRARY_SEARCH_PATHS'
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.dylib',
# INTERMEDIATE_DIR is a place for targets to build up intermediate products.
# It is specific to each build environment. It is only guaranteed to exist
# and be constant within the context of a project, corresponding to a single
# input file. Some build environments may allow their intermediate directory
# to be shared on a wider scale, but this is not guaranteed.
'INTERMEDIATE_DIR': '$(%s)' % _intermediate_var,
'OS': 'mac',
'PRODUCT_DIR': '$(BUILT_PRODUCTS_DIR)',
'LIB_DIR': '$(BUILT_PRODUCTS_DIR)',
'RULE_INPUT_ROOT': '$(INPUT_FILE_BASE)',
'RULE_INPUT_EXT': '$(INPUT_FILE_SUFFIX)',
'RULE_INPUT_NAME': '$(INPUT_FILE_NAME)',
'RULE_INPUT_PATH': '$(INPUT_FILE_PATH)',
'RULE_INPUT_DIRNAME': '$(INPUT_FILE_DIRNAME)',
'SHARED_INTERMEDIATE_DIR': '$(%s)' % _shared_intermediate_var,
'CONFIGURATION_NAME': '$(CONFIGURATION)',
}
# The Xcode-specific sections that hold paths.
generator_additional_path_sections = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
# 'mac_framework_dirs', input already handles _dirs endings.
]
# The Xcode-specific keys that exist on targets and aren't moved down to
# configurations.
generator_additional_non_configuration_keys = [
'ios_app_extension',
'ios_watch_app',
'ios_watchkit_extension',
'mac_bundle',
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
'mac_xctest_bundle',
'xcode_create_dependents_test_runner',
]
# We want to let any rules apply to files that are resources also.
generator_extra_sources_for_rules = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
]
# Xcode's standard set of library directories, which don't need to be duplicated
# in LIBRARY_SEARCH_PATHS. This list is not exhaustive, but that's okay.
xcode_standard_library_dirs = frozenset([
'$(SDKROOT)/usr/lib',
'$(SDKROOT)/usr/local/lib',
])
def CreateXCConfigurationList(configuration_names):
xccl = gyp.xcodeproj_file.XCConfigurationList({'buildConfigurations': []})
if len(configuration_names) == 0:
configuration_names = ['Default']
for configuration_name in configuration_names:
xcbc = gyp.xcodeproj_file.XCBuildConfiguration({
'name': configuration_name})
xccl.AppendProperty('buildConfigurations', xcbc)
xccl.SetProperty('defaultConfigurationName', configuration_names[0])
return xccl
class XcodeProject(object):
def __init__(self, gyp_path, path, build_file_dict):
self.gyp_path = gyp_path
self.path = path
self.project = gyp.xcodeproj_file.PBXProject(path=path)
projectDirPath = gyp.common.RelativePath(
os.path.dirname(os.path.abspath(self.gyp_path)),
os.path.dirname(path) or '.')
self.project.SetProperty('projectDirPath', projectDirPath)
self.project_file = \
gyp.xcodeproj_file.XCProjectFile({'rootObject': self.project})
self.build_file_dict = build_file_dict
# TODO(mark): add destructor that cleans up self.path if created_dir is
# True and things didn't complete successfully. Or do something even
# better with "try"?
self.created_dir = False
try:
os.makedirs(self.path)
self.created_dir = True
except OSError, e:
if e.errno != errno.EEXIST:
raise
def Finalize1(self, xcode_targets, serialize_all_tests):
# Collect a list of all of the build configuration names used by the
# various targets in the file. It is very heavily advised to keep each
# target in an entire project (even across multiple project files) using
# the same set of configuration names.
configurations = []
for xct in self.project.GetProperty('targets'):
xccl = xct.GetProperty('buildConfigurationList')
xcbcs = xccl.GetProperty('buildConfigurations')
for xcbc in xcbcs:
name = xcbc.GetProperty('name')
if name not in configurations:
configurations.append(name)
# Replace the XCConfigurationList attached to the PBXProject object with
# a new one specifying all of the configuration names used by the various
# targets.
try:
xccl = CreateXCConfigurationList(configurations)
self.project.SetProperty('buildConfigurationList', xccl)
except:
sys.stderr.write("Problem with gyp file %s\n" % self.gyp_path)
raise
# The need for this setting is explained above where _intermediate_var is
# defined. The comments below about wanting to avoid project-wide build
# settings apply here too, but this needs to be set on a project-wide basis
# so that files relative to the _intermediate_var setting can be displayed
# properly in the Xcode UI.
#
# Note that for configuration-relative files such as anything relative to
# _intermediate_var, for the purposes of UI tree view display, Xcode will
# only resolve the configuration name once, when the project file is
# opened. If the active build configuration is changed, the project file
# must be closed and reopened if it is desired for the tree view to update.
# This is filed as Apple radar 6588391.
xccl.SetBuildSetting(_intermediate_var,
'$(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION)')
xccl.SetBuildSetting(_shared_intermediate_var,
'$(SYMROOT)/DerivedSources/$(CONFIGURATION)')
# Set user-specified project-wide build settings and config files. This
# is intended to be used very sparingly. Really, almost everything should
# go into target-specific build settings sections. The project-wide
# settings are only intended to be used in cases where Xcode attempts to
# resolve variable references in a project context as opposed to a target
# context, such as when resolving sourceTree references while building up
# the tree tree view for UI display.
# Any values set globally are applied to all configurations, then any
# per-configuration values are applied.
for xck, xcv in self.build_file_dict.get('xcode_settings', {}).iteritems():
xccl.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in self.build_file_dict:
config_ref = self.project.AddOrGetFileInRootGroup(
self.build_file_dict['xcode_config_file'])
xccl.SetBaseConfiguration(config_ref)
build_file_configurations = self.build_file_dict.get('configurations', {})
if build_file_configurations:
for config_name in configurations:
build_file_configuration_named = \
build_file_configurations.get(config_name, {})
if build_file_configuration_named:
xcc = xccl.ConfigurationNamed(config_name)
for xck, xcv in build_file_configuration_named.get('xcode_settings',
{}).iteritems():
xcc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in build_file_configuration_named:
config_ref = self.project.AddOrGetFileInRootGroup(
build_file_configurations[config_name]['xcode_config_file'])
xcc.SetBaseConfiguration(config_ref)
# Sort the targets based on how they appeared in the input.
# TODO(mark): Like a lot of other things here, this assumes internal
# knowledge of PBXProject - in this case, of its "targets" property.
# ordinary_targets are ordinary targets that are already in the project
# file. run_test_targets are the targets that run unittests and should be
# used for the Run All Tests target. support_targets are the action/rule
# targets used by GYP file targets, just kept for the assert check.
ordinary_targets = []
run_test_targets = []
support_targets = []
# targets is full list of targets in the project.
targets = []
# does the it define it's own "all"?
has_custom_all = False
# targets_for_all is the list of ordinary_targets that should be listed
# in this project's "All" target. It includes each non_runtest_target
# that does not have suppress_wildcard set.
targets_for_all = []
for target in self.build_file_dict['targets']:
target_name = target['target_name']
toolset = target['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path, target_name,
toolset)
xcode_target = xcode_targets[qualified_target]
# Make sure that the target being added to the sorted list is already in
# the unsorted list.
assert xcode_target in self.project._properties['targets']
targets.append(xcode_target)
ordinary_targets.append(xcode_target)
if xcode_target.support_target:
support_targets.append(xcode_target.support_target)
targets.append(xcode_target.support_target)
if not int(target.get('suppress_wildcard', False)):
targets_for_all.append(xcode_target)
if target_name.lower() == 'all':
has_custom_all = True;
# If this target has a 'run_as' attribute, add its target to the
# targets, and add it to the test targets.
if target.get('run_as'):
# Make a target to run something. It should have one
# dependency, the parent xcode target.
xccl = CreateXCConfigurationList(configurations)
run_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run ' + target_name,
'productName': xcode_target.GetProperty('productName'),
'buildConfigurationList': xccl,
},
parent=self.project)
run_target.AddDependency(xcode_target)
command = target['run_as']
script = ''
if command.get('working_directory'):
script = script + 'cd "%s"\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
command.get('working_directory'))
if command.get('environment'):
script = script + "\n".join(
['export %s="%s"' %
(key, gyp.xcodeproj_file.ConvertVariablesToShellSyntax(val))
for (key, val) in command.get('environment').iteritems()]) + "\n"
# Some test end up using sockets, files on disk, etc. and can get
# confused if more then one test runs at a time. The generator
# flag 'xcode_serialize_all_test_runs' controls the forcing of all
# tests serially. It defaults to True. To get serial runs this
# little bit of python does the same as the linux flock utility to
# make sure only one runs at a time.
command_prefix = ''
if serialize_all_tests:
command_prefix = \
"""python -c "import fcntl, subprocess, sys
file = open('$TMPDIR/GYP_serialize_test_runs', 'a')
fcntl.flock(file.fileno(), fcntl.LOCK_EX)
sys.exit(subprocess.call(sys.argv[1:]))" """
# If we were unable to exec for some reason, we want to exit
# with an error, and fixup variable references to be shell
# syntax instead of xcode syntax.
script = script + 'exec ' + command_prefix + '%s\nexit 1\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
gyp.common.EncodePOSIXShellList(command.get('action')))
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'shellScript': script,
'showEnvVarsInLog': 0,
})
run_target.AppendProperty('buildPhases', ssbp)
# Add the run target to the project file.
targets.append(run_target)
run_test_targets.append(run_target)
xcode_target.test_runner = run_target
# Make sure that the list of targets being replaced is the same length as
# the one replacing it, but allow for the added test runner targets.
assert len(self.project._properties['targets']) == \
len(ordinary_targets) + len(support_targets)
self.project._properties['targets'] = targets
# Get rid of unnecessary levels of depth in groups like the Source group.
self.project.RootGroupsTakeOverOnlyChildren(True)
# Sort the groups nicely. Do this after sorting the targets, because the
# Products group is sorted based on the order of the targets.
self.project.SortGroups()
# Create an "All" target if there's more than one target in this project
# file and the project didn't define its own "All" target. Put a generated
# "All" target first so that people opening up the project for the first
# time will build everything by default.
if len(targets_for_all) > 1 and not has_custom_all:
xccl = CreateXCConfigurationList(configurations)
all_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'All',
},
parent=self.project)
for target in targets_for_all:
all_target.AddDependency(target)
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._properties. It's important to get the "All" target first,
# though.
self.project._properties['targets'].insert(0, all_target)
# The same, but for run_test_targets.
if len(run_test_targets) > 1:
xccl = CreateXCConfigurationList(configurations)
run_all_tests_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'Run All Tests',
},
parent=self.project)
for run_test_target in run_test_targets:
run_all_tests_target.AddDependency(run_test_target)
# Insert after the "All" target, which must exist if there is more than
# one run_test_target.
self.project._properties['targets'].insert(1, run_all_tests_target)
def Finalize2(self, xcode_targets, xcode_target_to_target_dict):
# Finalize2 needs to happen in a separate step because the process of
# updating references to other projects depends on the ordering of targets
# within remote project files. Finalize1 is responsible for sorting duty,
# and once all project files are sorted, Finalize2 can come in and update
# these references.
# To support making a "test runner" target that will run all the tests
# that are direct dependents of any given target, we look for
# xcode_create_dependents_test_runner being set on an Aggregate target,
# and generate a second target that will run the tests runners found under
# the marked target.
for bf_tgt in self.build_file_dict['targets']:
if int(bf_tgt.get('xcode_create_dependents_test_runner', 0)):
tgt_name = bf_tgt['target_name']
toolset = bf_tgt['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path,
tgt_name, toolset)
xcode_target = xcode_targets[qualified_target]
if isinstance(xcode_target, gyp.xcodeproj_file.PBXAggregateTarget):
# Collect all the run test targets.
all_run_tests = []
pbxtds = xcode_target.GetProperty('dependencies')
for pbxtd in pbxtds:
pbxcip = pbxtd.GetProperty('targetProxy')
dependency_xct = pbxcip.GetProperty('remoteGlobalIDString')
if hasattr(dependency_xct, 'test_runner'):
all_run_tests.append(dependency_xct.test_runner)
# Directly depend on all the runners as they depend on the target
# that builds them.
if len(all_run_tests) > 0:
run_all_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run %s Tests' % tgt_name,
'productName': tgt_name,
},
parent=self.project)
for run_test_target in all_run_tests:
run_all_target.AddDependency(run_test_target)
# Insert the test runner after the related target.
idx = self.project._properties['targets'].index(xcode_target)
self.project._properties['targets'].insert(idx + 1, run_all_target)
# Update all references to other projects, to make sure that the lists of
# remote products are complete. Otherwise, Xcode will fill them in when
# it opens the project file, which will result in unnecessary diffs.
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._other_pbxprojects.
for other_pbxproject in self.project._other_pbxprojects.keys():
self.project.AddOrGetProjectReference(other_pbxproject)
self.project.SortRemoteProductReferences()
# Give everything an ID.
self.project_file.ComputeIDs()
# Make sure that no two objects in the project file have the same ID. If
# multiple objects wind up with the same ID, upon loading the file, Xcode
# will only recognize one object (the last one in the file?) and the
# results are unpredictable.
self.project_file.EnsureNoIDCollisions()
def Write(self):
# Write the project file to a temporary location first. Xcode watches for
# changes to the project file and presents a UI sheet offering to reload
# the project when it does change. However, in some cases, especially when
# multiple projects are open or when Xcode is busy, things don't work so
# seamlessly. Sometimes, Xcode is able to detect that a project file has
# changed but can't unload it because something else is referencing it.
# To mitigate this problem, and to avoid even having Xcode present the UI
# sheet when an open project is rewritten for inconsequential changes, the
# project file is written to a temporary file in the xcodeproj directory
# first. The new temporary file is then compared to the existing project
# file, if any. If they differ, the new file replaces the old; otherwise,
# the new project file is simply deleted. Xcode properly detects a file
# being renamed over an open project file as a change and so it remains
# able to present the "project file changed" sheet under this system.
# Writing to a temporary file first also avoids the possible problem of
# Xcode rereading an incomplete project file.
(output_fd, new_pbxproj_path) = \
tempfile.mkstemp(suffix='.tmp', prefix='project.pbxproj.gyp.',
dir=self.path)
try:
output_file = os.fdopen(output_fd, 'wb')
self.project_file.Print(output_file)
output_file.close()
pbxproj_path = os.path.join(self.path, 'project.pbxproj')
same = False
try:
same = filecmp.cmp(pbxproj_path, new_pbxproj_path, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(new_pbxproj_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(new_pbxproj_path, 0666 & ~umask)
os.rename(new_pbxproj_path, pbxproj_path)
except Exception:
# Don't leave turds behind. In fact, if this code was responsible for
# creating the xcodeproj directory, get rid of that too.
os.unlink(new_pbxproj_path)
if self.created_dir:
shutil.rmtree(self.path, True)
raise
def AddSourceToTarget(source, type, pbxp, xct):
# TODO(mark): Perhaps source_extensions and library_extensions can be made a
# little bit fancier.
source_extensions = ['c', 'cc', 'cpp', 'cxx', 'm', 'mm', 's', 'swift']
# .o is conceptually more of a "source" than a "library," but Xcode thinks
# of "sources" as things to compile and "libraries" (or "frameworks") as
# things to link with. Adding an object file to an Xcode target's frameworks
# phase works properly.
library_extensions = ['a', 'dylib', 'framework', 'o']
basename = posixpath.basename(source)
(root, ext) = posixpath.splitext(basename)
if ext:
ext = ext[1:].lower()
if ext in source_extensions and type != 'none':
xct.SourcesPhase().AddFile(source)
elif ext in library_extensions and type != 'none':
xct.FrameworksPhase().AddFile(source)
else:
# Files that aren't added to a sources or frameworks build phase can still
# go into the project file, just not as part of a build phase.
pbxp.AddOrGetFileInRootGroup(source)
def AddResourceToTarget(resource, pbxp, xct):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
xct.ResourcesPhase().AddFile(resource)
def AddHeaderToTarget(header, pbxp, xct, is_public):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
settings = '{ATTRIBUTES = (%s, ); }' % ('Private', 'Public')[is_public]
xct.HeadersPhase().AddFile(header, settings)
_xcode_variable_re = re.compile(r'(\$\((.*?)\))')
def ExpandXcodeVariables(string, expansions):
"""Expands Xcode-style $(VARIABLES) in string per the expansions dict.
In some rare cases, it is appropriate to expand Xcode variables when a
project file is generated. For any substring $(VAR) in string, if VAR is a
key in the expansions dict, $(VAR) will be replaced with expansions[VAR].
Any $(VAR) substring in string for which VAR is not a key in the expansions
dict will remain in the returned string.
"""
matches = _xcode_variable_re.findall(string)
if matches == None:
return string
matches.reverse()
for match in matches:
(to_replace, variable) = match
if not variable in expansions:
continue
replacement = expansions[variable]
string = re.sub(re.escape(to_replace), replacement, string)
return string
_xcode_define_re = re.compile(r'([\\\"\' ])')
def EscapeXcodeDefine(s):
"""We must escape the defines that we give to XCode so that it knows not to
split on spaces and to respect backslash and quote literals. However, we
must not quote the define, or Xcode will incorrectly intepret variables
especially $(inherited)."""
return re.sub(_xcode_define_re, r'\\\1', s)
def PerformBuild(data, configurations, params):
options = params['options']
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
for config in configurations:
arguments = ['xcodebuild', '-project', xcodeproj_path]
arguments += ['-configuration', config]
print "Building [%s]: %s" % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
# Optionally configure each spec to use ninja as the external builder.
ninja_wrapper = params.get('flavor') == 'ninja'
if ninja_wrapper:
(target_list, target_dicts, data) = \
gyp.xcode_ninja.CreateWrapper(target_list, target_dicts, data, params)
options = params['options']
generator_flags = params.get('generator_flags', {})
parallel_builds = generator_flags.get('xcode_parallel_builds', True)
serialize_all_tests = \
generator_flags.get('xcode_serialize_all_test_runs', True)
skip_excluded_files = \
not generator_flags.get('xcode_list_excluded_files', True)
xcode_projects = {}
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
xcp = XcodeProject(build_file, xcodeproj_path, build_file_dict)
xcode_projects[build_file] = xcp
pbxp = xcp.project
if parallel_builds:
pbxp.SetProperty('attributes',
{'BuildIndependentTargetsInParallel': 'YES'})
# Add gyp/gypi files to project
if not generator_flags.get('standalone'):
main_group = pbxp.GetProperty('mainGroup')
build_group = gyp.xcodeproj_file.PBXGroup({'name': 'Build'})
main_group.AppendChild(build_group)
for included_file in build_file_dict['included_files']:
build_group.AddOrGetFileByPath(included_file, False)
xcode_targets = {}
xcode_target_to_target_dict = {}
for qualified_target in target_list:
[build_file, target_name, toolset] = \
gyp.common.ParseQualifiedTarget(qualified_target)
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in xcode build (target %s)' %
qualified_target)
configuration_names = [spec['default_configuration']]
for configuration_name in sorted(spec['configurations'].keys()):
if configuration_name not in configuration_names:
configuration_names.append(configuration_name)
xcp = xcode_projects[build_file]
pbxp = xcp.project
# Set up the configurations for the target according to the list of names
# supplied.
xccl = CreateXCConfigurationList(configuration_names)
# Create an XCTarget subclass object for the target. The type with
# "+bundle" appended will be used if the target has "mac_bundle" set.
# loadable_modules not in a mac_bundle are mapped to
# com.googlecode.gyp.xcode.bundle, a pseudo-type that xcode.py interprets
# to create a single-file mh_bundle.
_types = {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.googlecode.gyp.xcode.bundle',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
'executable+bundle': 'com.apple.product-type.application',
'loadable_module+bundle': 'com.apple.product-type.bundle',
'loadable_module+xctest': 'com.apple.product-type.bundle.unit-test',
'shared_library+bundle': 'com.apple.product-type.framework',
'executable+extension+bundle': 'com.apple.product-type.app-extension',
'executable+watch+extension+bundle':
'com.apple.product-type.watchkit-extension',
'executable+watch+bundle': 'com.apple.product-type.application.watchapp',
}
target_properties = {
'buildConfigurationList': xccl,
'name': target_name,
}
type = spec['type']
is_xctest = int(spec.get('mac_xctest_bundle', 0))
is_bundle = int(spec.get('mac_bundle', 0)) or is_xctest
is_app_extension = int(spec.get('ios_app_extension', 0))
is_watchkit_extension = int(spec.get('ios_watchkit_extension', 0))
is_watch_app = int(spec.get('ios_watch_app', 0))
if type != 'none':
type_bundle_key = type
if is_xctest:
type_bundle_key += '+xctest'
assert type == 'loadable_module', (
'mac_xctest_bundle targets must have type loadable_module '
'(target %s)' % target_name)
elif is_app_extension:
assert is_bundle, ('ios_app_extension flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+extension+bundle'
elif is_watchkit_extension:
assert is_bundle, ('ios_watchkit_extension flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+watch+extension+bundle'
elif is_watch_app:
assert is_bundle, ('ios_watch_app flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+watch+bundle'
elif is_bundle:
type_bundle_key += '+bundle'
xctarget_type = gyp.xcodeproj_file.PBXNativeTarget
try:
target_properties['productType'] = _types[type_bundle_key]
except KeyError, e:
gyp.common.ExceptionAppend(e, "-- unknown product type while "
"writing target %s" % target_name)
raise
else:
xctarget_type = gyp.xcodeproj_file.PBXAggregateTarget
assert not is_bundle, (
'mac_bundle targets cannot have type none (target "%s")' %
target_name)
assert not is_xctest, (
'mac_xctest_bundle targets cannot have type none (target "%s")' %
target_name)
target_product_name = spec.get('product_name')
if target_product_name is not None:
target_properties['productName'] = target_product_name
xct = xctarget_type(target_properties, parent=pbxp,
force_outdir=spec.get('product_dir'),
force_prefix=spec.get('product_prefix'),
force_extension=spec.get('product_extension'))
pbxp.AppendProperty('targets', xct)
xcode_targets[qualified_target] = xct
xcode_target_to_target_dict[xct] = spec
spec_actions = spec.get('actions', [])
spec_rules = spec.get('rules', [])
# Xcode has some "issues" with checking dependencies for the "Compile
# sources" step with any source files/headers generated by actions/rules.
# To work around this, if a target is building anything directly (not
# type "none"), then a second target is used to run the GYP actions/rules
# and is made a dependency of this target. This way the work is done
# before the dependency checks for what should be recompiled.
support_xct = None
# The Xcode "issues" don't affect xcode-ninja builds, since the dependency
# logic all happens in ninja. Don't bother creating the extra targets in
# that case.
if type != 'none' and (spec_actions or spec_rules) and not ninja_wrapper:
support_xccl = CreateXCConfigurationList(configuration_names);
support_target_suffix = generator_flags.get(
'support_target_suffix', ' Support')
support_target_properties = {
'buildConfigurationList': support_xccl,
'name': target_name + support_target_suffix,
}
if target_product_name:
support_target_properties['productName'] = \
target_product_name + ' Support'
support_xct = \
gyp.xcodeproj_file.PBXAggregateTarget(support_target_properties,
parent=pbxp)
pbxp.AppendProperty('targets', support_xct)
xct.AddDependency(support_xct)
# Hang the support target off the main target so it can be tested/found
# by the generator during Finalize.
xct.support_target = support_xct
prebuild_index = 0
# Add custom shell script phases for "actions" sections.
for action in spec_actions:
# There's no need to write anything into the script to ensure that the
# output directories already exist, because Xcode will look at the
# declared outputs and automatically ensure that they exist for us.
# Do we have a message to print when this action runs?
message = action.get('message')
if message:
message = 'echo note: ' + gyp.common.EncodePOSIXShellArgument(message)
else:
message = ''
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(action['action'])
# Convert Xcode-type variable references to sh-compatible environment
# variable references.
message_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(message)
action_string_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
action_string)
script = ''
# Include the optional message
if message_sh:
script += message_sh + '\n'
# Be sure the script runs in exec, and that if exec fails, the script
# exits signalling an error.
script += 'exec ' + action_string_sh + '\nexit 1\n'
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': action['inputs'],
'name': 'Action "' + action['action_name'] + '"',
'outputPaths': action['outputs'],
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# TODO(mark): Should verify that at most one of these is specified.
if int(action.get('process_outputs_as_sources', False)):
for output in action['outputs']:
AddSourceToTarget(output, type, pbxp, xct)
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
for output in action['outputs']:
AddResourceToTarget(output, pbxp, xct)
# tgt_mac_bundle_resources holds the list of bundle resources so
# the rule processing can check against it.
if is_bundle:
tgt_mac_bundle_resources = spec.get('mac_bundle_resources', [])
else:
tgt_mac_bundle_resources = []
# Add custom shell script phases driving "make" for "rules" sections.
#
# Xcode's built-in rule support is almost powerful enough to use directly,
# but there are a few significant deficiencies that render them unusable.
# There are workarounds for some of its inadequacies, but in aggregate,
# the workarounds added complexity to the generator, and some workarounds
# actually require input files to be crafted more carefully than I'd like.
# Consequently, until Xcode rules are made more capable, "rules" input
# sections will be handled in Xcode output by shell script build phases
# performed prior to the compilation phase.
#
# The following problems with Xcode rules were found. The numbers are
# Apple radar IDs. I hope that these shortcomings are addressed, I really
# liked having the rules handled directly in Xcode during the period that
# I was prototyping this.
#
# 6588600 Xcode compiles custom script rule outputs too soon, compilation
# fails. This occurs when rule outputs from distinct inputs are
# interdependent. The only workaround is to put rules and their
# inputs in a separate target from the one that compiles the rule
# outputs. This requires input file cooperation and it means that
# process_outputs_as_sources is unusable.
# 6584932 Need to declare that custom rule outputs should be excluded from
# compilation. A possible workaround is to lie to Xcode about a
# rule's output, giving it a dummy file it doesn't know how to
# compile. The rule action script would need to touch the dummy.
# 6584839 I need a way to declare additional inputs to a custom rule.
# A possible workaround is a shell script phase prior to
# compilation that touches a rule's primary input files if any
# would-be additional inputs are newer than the output. Modifying
# the source tree - even just modification times - feels dirty.
# 6564240 Xcode "custom script" build rules always dump all environment
# variables. This is a low-prioroty problem and is not a
# show-stopper.
rules_by_ext = {}
for rule in spec_rules:
rules_by_ext[rule['extension']] = rule
# First, some definitions:
#
# A "rule source" is a file that was listed in a target's "sources"
# list and will have a rule applied to it on the basis of matching the
# rule's "extensions" attribute. Rule sources are direct inputs to
# rules.
#
# Rule definitions may specify additional inputs in their "inputs"
# attribute. These additional inputs are used for dependency tracking
# purposes.
#
# A "concrete output" is a rule output with input-dependent variables
# resolved. For example, given a rule with:
# 'extension': 'ext', 'outputs': ['$(INPUT_FILE_BASE).cc'],
# if the target's "sources" list contained "one.ext" and "two.ext",
# the "concrete output" for rule input "two.ext" would be "two.cc". If
# a rule specifies multiple outputs, each input file that the rule is
# applied to will have the same number of concrete outputs.
#
# If any concrete outputs are outdated or missing relative to their
# corresponding rule_source or to any specified additional input, the
# rule action must be performed to generate the concrete outputs.
# concrete_outputs_by_rule_source will have an item at the same index
# as the rule['rule_sources'] that it corresponds to. Each item is a
# list of all of the concrete outputs for the rule_source.
concrete_outputs_by_rule_source = []
# concrete_outputs_all is a flat list of all concrete outputs that this
# rule is able to produce, given the known set of input files
# (rule_sources) that apply to it.
concrete_outputs_all = []
# messages & actions are keyed by the same indices as rule['rule_sources']
# and concrete_outputs_by_rule_source. They contain the message and
# action to perform after resolving input-dependent variables. The
# message is optional, in which case None is stored for each rule source.
messages = []
actions = []
for rule_source in rule.get('rule_sources', []):
rule_source_dirname, rule_source_basename = \
posixpath.split(rule_source)
(rule_source_root, rule_source_ext) = \
posixpath.splitext(rule_source_basename)
# These are the same variable names that Xcode uses for its own native
# rule support. Because Xcode's rule engine is not being used, they
# need to be expanded as they are written to the makefile.
rule_input_dict = {
'INPUT_FILE_BASE': rule_source_root,
'INPUT_FILE_SUFFIX': rule_source_ext,
'INPUT_FILE_NAME': rule_source_basename,
'INPUT_FILE_PATH': rule_source,
'INPUT_FILE_DIRNAME': rule_source_dirname,
}
concrete_outputs_for_this_rule_source = []
for output in rule.get('outputs', []):
# Fortunately, Xcode and make both use $(VAR) format for their
# variables, so the expansion is the only transformation necessary.
# Any remaning $(VAR)-type variables in the string can be given
# directly to make, which will pick up the correct settings from
# what Xcode puts into the environment.
concrete_output = ExpandXcodeVariables(output, rule_input_dict)
concrete_outputs_for_this_rule_source.append(concrete_output)
# Add all concrete outputs to the project.
pbxp.AddOrGetFileInRootGroup(concrete_output)
concrete_outputs_by_rule_source.append( \
concrete_outputs_for_this_rule_source)
concrete_outputs_all.extend(concrete_outputs_for_this_rule_source)
# TODO(mark): Should verify that at most one of these is specified.
if int(rule.get('process_outputs_as_sources', False)):
for output in concrete_outputs_for_this_rule_source:
AddSourceToTarget(output, type, pbxp, xct)
# If the file came from the mac_bundle_resources list or if the rule
# is marked to process outputs as bundle resource, do so.
was_mac_bundle_resource = rule_source in tgt_mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
for output in concrete_outputs_for_this_rule_source:
AddResourceToTarget(output, pbxp, xct)
# Do we have a message to print when this rule runs?
message = rule.get('message')
if message:
message = gyp.common.EncodePOSIXShellArgument(message)
message = ExpandXcodeVariables(message, rule_input_dict)
messages.append(message)
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(rule['action'])
action = ExpandXcodeVariables(action_string, rule_input_dict)
actions.append(action)
if len(concrete_outputs_all) > 0:
# TODO(mark): There's a possibilty for collision here. Consider
# target "t" rule "A_r" and target "t_A" rule "r".
makefile_name = '%s.make' % re.sub(
'[^a-zA-Z0-9_]', '_' , '%s_%s' % (target_name, rule['rule_name']))
makefile_path = os.path.join(xcode_projects[build_file].path,
makefile_name)
# TODO(mark): try/close? Write to a temporary file and swap it only
# if it's got changes?
makefile = open(makefile_path, 'wb')
# make will build the first target in the makefile by default. By
# convention, it's called "all". List all (or at least one)
# concrete output for each rule source as a prerequisite of the "all"
# target.
makefile.write('all: \\\n')
for concrete_output_index in \
xrange(0, len(concrete_outputs_by_rule_source)):
# Only list the first (index [0]) concrete output of each input
# in the "all" target. Otherwise, a parallel make (-j > 1) would
# attempt to process each input multiple times simultaneously.
# Otherwise, "all" could just contain the entire list of
# concrete_outputs_all.
concrete_output = \
concrete_outputs_by_rule_source[concrete_output_index][0]
if concrete_output_index == len(concrete_outputs_by_rule_source) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (concrete_output, eol))
for (rule_source, concrete_outputs, message, action) in \
zip(rule['rule_sources'], concrete_outputs_by_rule_source,
messages, actions):
makefile.write('\n')
# Add a rule that declares it can build each concrete output of a
# rule source. Collect the names of the directories that are
# required.
concrete_output_dirs = []
for concrete_output_index in xrange(0, len(concrete_outputs)):
concrete_output = concrete_outputs[concrete_output_index]
if concrete_output_index == 0:
bol = ''
else:
bol = ' '
makefile.write('%s%s \\\n' % (bol, concrete_output))
concrete_output_dir = posixpath.dirname(concrete_output)
if (concrete_output_dir and
concrete_output_dir not in concrete_output_dirs):
concrete_output_dirs.append(concrete_output_dir)
makefile.write(' : \\\n')
# The prerequisites for this rule are the rule source itself and
# the set of additional rule inputs, if any.
prerequisites = [rule_source]
prerequisites.extend(rule.get('inputs', []))
for prerequisite_index in xrange(0, len(prerequisites)):
prerequisite = prerequisites[prerequisite_index]
if prerequisite_index == len(prerequisites) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (prerequisite, eol))
# Make sure that output directories exist before executing the rule
# action.
if len(concrete_output_dirs) > 0:
makefile.write('\t@mkdir -p "%s"\n' %
'" "'.join(concrete_output_dirs))
# The rule message and action have already had the necessary variable
# substitutions performed.
if message:
# Mark it with note: so Xcode picks it up in build output.
makefile.write('\t@echo note: %s\n' % message)
makefile.write('\t%s\n' % action)
makefile.close()
# It might be nice to ensure that needed output directories exist
# here rather than in each target in the Makefile, but that wouldn't
# work if there ever was a concrete output that had an input-dependent
# variable anywhere other than in the leaf position.
# Don't declare any inputPaths or outputPaths. If they're present,
# Xcode will provide a slight optimization by only running the script
# phase if any output is missing or outdated relative to any input.
# Unfortunately, it will also assume that all outputs are touched by
# the script, and if the outputs serve as files in a compilation
# phase, they will be unconditionally rebuilt. Since make might not
# rebuild everything that could be declared here as an output, this
# extra compilation activity is unnecessary. With inputPaths and
# outputPaths not supplied, make will always be called, but it knows
# enough to not do anything when everything is up-to-date.
# To help speed things up, pass -j COUNT to make so it does some work
# in parallel. Don't use ncpus because Xcode will build ncpus targets
# in parallel and if each target happens to have a rules step, there
# would be ncpus^2 things going. With a machine that has 2 quad-core
# Xeons, a build can quickly run out of processes based on
# scheduling/other tasks, and randomly failing builds are no good.
script = \
"""JOB_COUNT="$(/usr/sbin/sysctl -n hw.ncpu)"
if [ "${JOB_COUNT}" -gt 4 ]; then
JOB_COUNT=4
fi
exec xcrun make -f "${PROJECT_FILE_PATH}/%s" -j "${JOB_COUNT}"
exit 1
""" % makefile_name
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'name': 'Rule "' + rule['rule_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# Extra rule inputs also go into the project file. Concrete outputs were
# already added when they were computed.
groups = ['inputs', 'inputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for group in groups:
for item in rule.get(group, []):
pbxp.AddOrGetFileInRootGroup(item)
# Add "sources".
for source in spec.get('sources', []):
(source_root, source_extension) = posixpath.splitext(source)
if source_extension[1:] not in rules_by_ext:
# AddSourceToTarget will add the file to a root group if it's not
# already there.
AddSourceToTarget(source, type, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(source)
# Add "mac_bundle_resources" and "mac_framework_private_headers" if
# it's a bundle of any type.
if is_bundle:
for resource in tgt_mac_bundle_resources:
(resource_root, resource_extension) = posixpath.splitext(resource)
if resource_extension[1:] not in rules_by_ext:
AddResourceToTarget(resource, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(resource)
for header in spec.get('mac_framework_private_headers', []):
AddHeaderToTarget(header, pbxp, xct, False)
# Add "mac_framework_headers". These can be valid for both frameworks
# and static libraries.
if is_bundle or type == 'static_library':
for header in spec.get('mac_framework_headers', []):
AddHeaderToTarget(header, pbxp, xct, True)
# Add "copies".
pbxcp_dict = {}
for copy_group in spec.get('copies', []):
dest = copy_group['destination']
if dest[0] not in ('/', '$'):
# Relative paths are relative to $(SRCROOT).
dest = '$(SRCROOT)/' + dest
code_sign = int(copy_group.get('xcode_code_sign', 0))
settings = (None, '{ATTRIBUTES = (CodeSignOnCopy, ); }')[code_sign];
# Coalesce multiple "copies" sections in the same target with the same
# "destination" property into the same PBXCopyFilesBuildPhase, otherwise
# they'll wind up with ID collisions.
pbxcp = pbxcp_dict.get(dest, None)
if pbxcp is None:
pbxcp = gyp.xcodeproj_file.PBXCopyFilesBuildPhase({
'name': 'Copy to ' + copy_group['destination']
},
parent=xct)
pbxcp.SetDestination(dest)
# TODO(mark): The usual comment about this knowing too much about
# gyp.xcodeproj_file internals applies.
xct._properties['buildPhases'].insert(prebuild_index, pbxcp)
pbxcp_dict[dest] = pbxcp
for file in copy_group['files']:
pbxcp.AddFile(file, settings)
# Excluded files can also go into the project file.
if not skip_excluded_files:
for key in ['sources', 'mac_bundle_resources', 'mac_framework_headers',
'mac_framework_private_headers']:
excluded_key = key + '_excluded'
for item in spec.get(excluded_key, []):
pbxp.AddOrGetFileInRootGroup(item)
# So can "inputs" and "outputs" sections of "actions" groups.
groups = ['inputs', 'inputs_excluded', 'outputs', 'outputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for action in spec.get('actions', []):
for group in groups:
for item in action.get(group, []):
# Exclude anything in BUILT_PRODUCTS_DIR. They're products, not
# sources.
if not item.startswith('$(BUILT_PRODUCTS_DIR)/'):
pbxp.AddOrGetFileInRootGroup(item)
for postbuild in spec.get('postbuilds', []):
action_string_sh = gyp.common.EncodePOSIXShellList(postbuild['action'])
script = 'exec ' + action_string_sh + '\nexit 1\n'
# Make the postbuild step depend on the output of ld or ar from this
# target. Apparently putting the script step after the link step isn't
# sufficient to ensure proper ordering in all cases. With an input
# declared but no outputs, the script step should run every time, as
# desired.
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': ['$(BUILT_PRODUCTS_DIR)/$(EXECUTABLE_PATH)'],
'name': 'Postbuild "' + postbuild['postbuild_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
xct.AppendProperty('buildPhases', ssbp)
# Add dependencies before libraries, because adding a dependency may imply
# adding a library. It's preferable to keep dependencies listed first
# during a link phase so that they can override symbols that would
# otherwise be provided by libraries, which will usually include system
# libraries. On some systems, ld is finicky and even requires the
# libraries to be ordered in such a way that unresolved symbols in
# earlier-listed libraries may only be resolved by later-listed libraries.
# The Mac linker doesn't work that way, but other platforms do, and so
# their linker invocations need to be constructed in this way. There's
# no compelling reason for Xcode's linker invocations to differ.
if 'dependencies' in spec:
for dependency in spec['dependencies']:
xct.AddDependency(xcode_targets[dependency])
# The support project also gets the dependencies (in case they are
# needed for the actions/rules to work).
if support_xct:
support_xct.AddDependency(xcode_targets[dependency])
if 'libraries' in spec:
for library in spec['libraries']:
xct.FrameworksPhase().AddFile(library)
# Add the library's directory to LIBRARY_SEARCH_PATHS if necessary.
# I wish Xcode handled this automatically.
library_dir = posixpath.dirname(library)
if library_dir not in xcode_standard_library_dirs and (
not xct.HasBuildSetting(_library_search_paths_var) or
library_dir not in xct.GetBuildSetting(_library_search_paths_var)):
xct.AppendBuildSetting(_library_search_paths_var, library_dir)
for configuration_name in configuration_names:
configuration = spec['configurations'][configuration_name]
xcbc = xct.ConfigurationNamed(configuration_name)
for include_dir in configuration.get('mac_framework_dirs', []):
xcbc.AppendBuildSetting('FRAMEWORK_SEARCH_PATHS', include_dir)
for include_dir in configuration.get('include_dirs', []):
xcbc.AppendBuildSetting('HEADER_SEARCH_PATHS', include_dir)
for library_dir in configuration.get('library_dirs', []):
if library_dir not in xcode_standard_library_dirs and (
not xcbc.HasBuildSetting(_library_search_paths_var) or
library_dir not in xcbc.GetBuildSetting(_library_search_paths_var)):
xcbc.AppendBuildSetting(_library_search_paths_var, library_dir)
if 'defines' in configuration:
for define in configuration['defines']:
set_define = EscapeXcodeDefine(define)
xcbc.AppendBuildSetting('GCC_PREPROCESSOR_DEFINITIONS', set_define)
if 'xcode_settings' in configuration:
for xck, xcv in configuration['xcode_settings'].iteritems():
xcbc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in configuration:
config_ref = pbxp.AddOrGetFileInRootGroup(
configuration['xcode_config_file'])
xcbc.SetBaseConfiguration(config_ref)
build_files = []
for build_file, build_file_dict in data.iteritems():
if build_file.endswith('.gyp'):
build_files.append(build_file)
for build_file in build_files:
xcode_projects[build_file].Finalize1(xcode_targets, serialize_all_tests)
for build_file in build_files:
xcode_projects[build_file].Finalize2(xcode_targets,
xcode_target_to_target_dict)
for build_file in build_files:
xcode_projects[build_file].Write()
| mit | 692,755,188,285,611,800 | 43.939587 | 80 | 0.656561 | false |
teonlamont/mne-python | mne/realtime/tests/test_stim_client_server.py | 2 | 2527 | import threading
import time
import pytest
from mne.realtime import StimServer, StimClient
from mne.externals.six.moves import queue
from mne.utils import requires_good_network, run_tests_if_main
_server = None
_have_put_in_trigger = False
_max_wait = 10.
@requires_good_network
def test_connection():
"""Test TCP/IP connection for StimServer <-> StimClient."""
global _server, _have_put_in_trigger
# have to start a thread to simulate the effect of two
# different computers since stim_server.start() is designed to
# be a blocking method
# use separate queues because timing matters
trig_queue1 = queue.Queue()
trig_queue2 = queue.Queue()
# start a thread to emulate 1st client
thread1 = threading.Thread(target=_connect_client, args=(trig_queue1,))
thread1.daemon = True
# start another thread to emulate 2nd client
thread2 = threading.Thread(target=_connect_client, args=(trig_queue2,))
thread2.daemon = True
thread1.start()
thread2.start()
with StimServer(port=4218, n_clients=2) as stim_server:
_server = stim_server
stim_server.start(timeout=10.0) # don't allow test to hang
# Add the trigger to the queue for both clients
stim_server.add_trigger(20)
_have_put_in_trigger = True # monkey patch
# the assert_equal must be in the test_connection() method
# Hence communication between threads is necessary
trig1 = trig_queue1.get(timeout=_max_wait)
trig2 = trig_queue2.get(timeout=_max_wait)
assert trig1 == 20
# test if both clients receive the same trigger
assert trig1 == trig2
# test timeout for stim_server
with StimServer(port=4218) as stim_server:
pytest.raises(StopIteration, stim_server.start, 0.1)
def _connect_client(trig_queue):
"""Instantiate the StimClient."""
# just wait till the main thread reaches stim_server.start()
t0 = time.time()
while (time.time() - t0 < _max_wait and
(_server is None or not _server._running)):
time.sleep(0.01)
assert _server is not None and _server._running
# instantiate StimClient
stim_client = StimClient('localhost', port=4218)
# wait for script to reach stim_server.add_trigger()
t0 = time.time()
while (time.time() - t0 < _max_wait and not _have_put_in_trigger):
time.sleep(0.01)
assert _have_put_in_trigger
trig_queue.put(stim_client.get_trigger())
stim_client.close()
run_tests_if_main()
| bsd-3-clause | -9,078,651,910,061,496,000 | 29.817073 | 75 | 0.672339 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.