repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
nvoron23/hue | refs/heads/master | desktop/core/ext-py/Paste-1.7.2/tests/urlparser_data/python/sub/__init__.py | 9480 | #
|
m0tive/cug | refs/heads/master | scons/scons-local-1.3.0/SCons/Tool/sunf90.py | 5 | """SCons.Tool.sunf90
Tool-specific initialization for sunf90, the Sun Studio F90 compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunf90.py 4720 2010/03/24 03:14:11 jars"
import SCons.Util
from FortranCommon import add_all_to_env
compilers = ['sunf90', 'f90']
def generate(env):
"""Add Builders and construction variables for sun f90 compiler to an
Environment."""
add_all_to_env(env)
fcomp = env.Detect(compilers) or 'f90'
env['FORTRAN'] = fcomp
env['F90'] = fcomp
env['SHFORTRAN'] = '$FORTRAN'
env['SHF90'] = '$F90'
env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS -KPIC')
env['SHF90FLAGS'] = SCons.Util.CLVar('$F90FLAGS -KPIC')
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Jonnymcc/ansible | refs/heads/devel | test/units/executor/test_task_result.py | 104 | # (c) 2016, James Cammarata <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.executor.task_result import TaskResult
class TestTaskResult(unittest.TestCase):
def test_task_result_basic(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test loading a result with a dict
tr = TaskResult(mock_host, mock_task, dict())
# test loading a result with a JSON string
with patch('ansible.parsing.dataloader.DataLoader.load') as p:
tr = TaskResult(mock_host, mock_task, '{}')
def test_task_result_is_changed(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no changed in result
tr = TaskResult(mock_host, mock_task, dict())
self.assertFalse(tr.is_changed())
# test with changed in the result
tr = TaskResult(mock_host, mock_task, dict(changed=True))
self.assertTrue(tr.is_changed())
# test with multiple results but none changed
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True]))
self.assertFalse(tr.is_changed())
# test with multiple results and one changed
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(changed=False), dict(changed=True), dict(some_key=False)]))
self.assertTrue(tr.is_changed())
def test_task_result_is_skipped(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no skipped in result
tr = TaskResult(mock_host, mock_task, dict())
self.assertFalse(tr.is_skipped())
# test with skipped in the result
tr = TaskResult(mock_host, mock_task, dict(skipped=True))
self.assertTrue(tr.is_skipped())
# test with multiple results but none skipped
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True]))
self.assertFalse(tr.is_skipped())
# test with multiple results and one skipped
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(skipped=False), dict(skipped=True), dict(some_key=False)]))
self.assertFalse(tr.is_skipped())
# test with multiple results and all skipped
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(skipped=True), dict(skipped=True), dict(skipped=True)]))
self.assertTrue(tr.is_skipped())
# test with multiple squashed results (list of strings)
# first with the main result having skipped=False
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=["a", "b", "c"], skipped=False))
self.assertFalse(tr.is_skipped())
# then with the main result having skipped=True
tr = TaskResult(mock_host, mock_task, dict(results=["a", "b", "c"], skipped=True))
self.assertTrue(tr.is_skipped())
def test_task_result_is_unreachable(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no unreachable in result
tr = TaskResult(mock_host, mock_task, dict())
self.assertFalse(tr.is_unreachable())
# test with unreachable in the result
tr = TaskResult(mock_host, mock_task, dict(unreachable=True))
self.assertTrue(tr.is_unreachable())
# test with multiple results but none unreachable
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True]))
self.assertFalse(tr.is_unreachable())
# test with multiple results and one unreachable
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(unreachable=False), dict(unreachable=True), dict(some_key=False)]))
self.assertTrue(tr.is_unreachable())
def test_task_result_is_failed(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no failed in result
tr = TaskResult(mock_host, mock_task, dict())
self.assertFalse(tr.is_failed())
# test failed result with rc values
tr = TaskResult(mock_host, mock_task, dict(rc=0))
self.assertFalse(tr.is_failed())
tr = TaskResult(mock_host, mock_task, dict(rc=1))
self.assertTrue(tr.is_failed())
# test with failed in result
tr = TaskResult(mock_host, mock_task, dict(failed=True))
self.assertTrue(tr.is_failed())
# test with failed_when in result
tr = TaskResult(mock_host, mock_task, dict(failed_when_result=True))
self.assertTrue(tr.is_failed())
|
olemis/brython | refs/heads/master | www/src/Lib/test/test_filecmp.py | 38 |
import os, filecmp, shutil, tempfile
import unittest
from test import support
class FileCompareTestCase(unittest.TestCase):
def setUp(self):
self.name = support.TESTFN
self.name_same = support.TESTFN + '-same'
self.name_diff = support.TESTFN + '-diff'
data = 'Contents of file go here.\n'
for name in [self.name, self.name_same, self.name_diff]:
output = open(name, 'w')
output.write(data)
output.close()
output = open(self.name_diff, 'a+')
output.write('An extra line.\n')
output.close()
self.dir = tempfile.gettempdir()
def tearDown(self):
os.unlink(self.name)
os.unlink(self.name_same)
os.unlink(self.name_diff)
def test_matching(self):
self.assertTrue(filecmp.cmp(self.name, self.name_same),
"Comparing file to itself fails")
self.assertTrue(filecmp.cmp(self.name, self.name_same, shallow=False),
"Comparing file to itself fails")
self.assertTrue(filecmp.cmp(self.name, self.name, shallow=False),
"Comparing file to identical file fails")
self.assertTrue(filecmp.cmp(self.name, self.name),
"Comparing file to identical file fails")
def test_different(self):
self.assertFalse(filecmp.cmp(self.name, self.name_diff),
"Mismatched files compare as equal")
self.assertFalse(filecmp.cmp(self.name, self.dir),
"File and directory compare as equal")
class DirCompareTestCase(unittest.TestCase):
def setUp(self):
tmpdir = tempfile.gettempdir()
self.dir = os.path.join(tmpdir, 'dir')
self.dir_same = os.path.join(tmpdir, 'dir-same')
self.dir_diff = os.path.join(tmpdir, 'dir-diff')
self.caseinsensitive = os.path.normcase('A') == os.path.normcase('a')
data = 'Contents of file go here.\n'
for dir in [self.dir, self.dir_same, self.dir_diff]:
shutil.rmtree(dir, True)
os.mkdir(dir)
if self.caseinsensitive and dir is self.dir_same:
fn = 'FiLe' # Verify case-insensitive comparison
else:
fn = 'file'
output = open(os.path.join(dir, fn), 'w')
output.write(data)
output.close()
output = open(os.path.join(self.dir_diff, 'file2'), 'w')
output.write('An extra file.\n')
output.close()
def tearDown(self):
shutil.rmtree(self.dir)
shutil.rmtree(self.dir_same)
shutil.rmtree(self.dir_diff)
def test_cmpfiles(self):
self.assertTrue(filecmp.cmpfiles(self.dir, self.dir, ['file']) ==
(['file'], [], []),
"Comparing directory to itself fails")
self.assertTrue(filecmp.cmpfiles(self.dir, self.dir_same, ['file']) ==
(['file'], [], []),
"Comparing directory to same fails")
# Try it with shallow=False
self.assertTrue(filecmp.cmpfiles(self.dir, self.dir, ['file'],
shallow=False) ==
(['file'], [], []),
"Comparing directory to itself fails")
self.assertTrue(filecmp.cmpfiles(self.dir, self.dir_same, ['file'],
shallow=False),
"Comparing directory to same fails")
# Add different file2
output = open(os.path.join(self.dir, 'file2'), 'w')
output.write('Different contents.\n')
output.close()
self.assertFalse(filecmp.cmpfiles(self.dir, self.dir_same,
['file', 'file2']) ==
(['file'], ['file2'], []),
"Comparing mismatched directories fails")
def test_dircmp(self):
# Check attributes for comparison of two identical directories
left_dir, right_dir = self.dir, self.dir_same
d = filecmp.dircmp(left_dir, right_dir)
self.assertEqual(d.left, left_dir)
self.assertEqual(d.right, right_dir)
if self.caseinsensitive:
self.assertEqual([d.left_list, d.right_list],[['file'], ['FiLe']])
else:
self.assertEqual([d.left_list, d.right_list],[['file'], ['file']])
self.assertEqual(d.common, ['file'])
self.assertTrue(d.left_only == d.right_only == [])
self.assertEqual(d.same_files, ['file'])
self.assertEqual(d.diff_files, [])
# Check attributes for comparison of two different directories
left_dir, right_dir = self.dir, self.dir_diff
d = filecmp.dircmp(left_dir, right_dir)
self.assertEqual(d.left, left_dir)
self.assertEqual(d.right, right_dir)
self.assertEqual(d.left_list, ['file'])
self.assertTrue(d.right_list == ['file', 'file2'])
self.assertEqual(d.common, ['file'])
self.assertEqual(d.left_only, [])
self.assertEqual(d.right_only, ['file2'])
self.assertEqual(d.same_files, ['file'])
self.assertEqual(d.diff_files, [])
# Add different file2
output = open(os.path.join(self.dir, 'file2'), 'w')
output.write('Different contents.\n')
output.close()
d = filecmp.dircmp(self.dir, self.dir_diff)
self.assertEqual(d.same_files, ['file'])
self.assertEqual(d.diff_files, ['file2'])
def test_main():
support.run_unittest(FileCompareTestCase, DirCompareTestCase)
if __name__ == "__main__":
test_main()
|
luiseduardohdbackup/elasticsearch | refs/heads/master | dev-tools/build_release.py | 27 | # Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
import tempfile
import shutil
import os
import datetime
import json
import time
import sys
import argparse
import hmac
import urllib
import fnmatch
import socket
import urllib.request
import subprocess
from functools import partial
from http.client import HTTPConnection
from http.client import HTTPSConnection
"""
This tool builds a release from the a given elasticsearch branch.
In order to execute it go in the top level directory and run:
$ python3 dev_tools/build_release.py --branch 0.90 --publish --remote origin
By default this script runs in 'dry' mode which essentially simulates a release. If the
'--publish' option is set the actual release is done. The script takes over almost all
steps necessary for a release from a high level point of view it does the following things:
- run prerequisit checks ie. check for Java 1.7 being presend or S3 credentials available as env variables
- detect the version to release from the specified branch (--branch) or the current branch
- creates a release branch & updates pom.xml and Version.java to point to a release version rather than a snapshot
- builds the artifacts and runs smoke-tests on the build zip & tar.gz files
- commits the new version and merges the release branch into the source branch
- creates a tag and pushes the commit to the specified origin (--remote)
- publishes the releases to Sonatype and S3
Once it's done it will print all the remaining steps.
Prerequisites:
- Python 3k for script execution
- Boto for S3 Upload ($ apt-get install python-boto)
- RPM for RPM building ($ apt-get install rpm)
- S3 keys exported via ENV variables (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
- GPG data exported via ENV variables (GPG_KEY_ID, GPG_PASSPHRASE, optionally GPG_KEYRING)
- S3 target repository via ENV variables (S3_BUCKET_SYNC_TO, optionally S3_BUCKET_SYNC_FROM)
"""
env = os.environ
PLUGINS = [('license', 'elasticsearch/license/latest'),
('bigdesk', 'lukas-vlcek/bigdesk'),
('paramedic', 'karmi/elasticsearch-paramedic'),
('segmentspy', 'polyfractal/elasticsearch-segmentspy'),
('inquisitor', 'polyfractal/elasticsearch-inquisitor'),
('head', 'mobz/elasticsearch-head')]
LOG = env.get('ES_RELEASE_LOG', '/tmp/elasticsearch_release.log')
# console colors
COLOR_OK = '\033[92m'
COLOR_END = '\033[0m'
COLOR_FAIL = '\033[91m'
def log(msg):
log_plain('\n%s' % msg)
def log_plain(msg):
f = open(LOG, mode='ab')
f.write(msg.encode('utf-8'))
f.close()
def run(command, quiet=False):
log('%s: RUN: %s\n' % (datetime.datetime.now(), command))
if os.system('%s >> %s 2>&1' % (command, LOG)):
msg = ' FAILED: %s [see log %s]' % (command, LOG)
if not quiet:
print(msg)
raise RuntimeError(msg)
try:
JAVA_HOME = env['JAVA_HOME']
except KeyError:
raise RuntimeError("""
Please set JAVA_HOME in the env before running release tool
On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.7*'`""")
try:
JAVA_HOME = env['JAVA7_HOME']
except KeyError:
pass #no JAVA7_HOME - we rely on JAVA_HOME
try:
# make sure mvn3 is used if mvn3 is available
# some systems use maven 2 as default
subprocess.check_output('mvn3 --version', shell=True, stderr=subprocess.STDOUT)
MVN = 'mvn3'
except subprocess.CalledProcessError:
MVN = 'mvn'
def java_exe():
path = JAVA_HOME
return 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (path, path, path)
def verify_java_version(version):
s = os.popen('%s; java -version 2>&1' % java_exe()).read()
if ' version "%s.' % version not in s:
raise RuntimeError('got wrong version for java %s:\n%s' % (version, s))
# Verifies the java version. We guarantee that we run with Java 1.7
# If 1.7 is not available fail the build!
def verify_mvn_java_version(version, mvn):
s = os.popen('%s; %s --version 2>&1' % (java_exe(), mvn)).read()
if 'Java version: %s' % version not in s:
raise RuntimeError('got wrong java version for %s %s:\n%s' % (mvn, version, s))
# Returns the hash of the current git HEAD revision
def get_head_hash():
return os.popen(' git rev-parse --verify HEAD 2>&1').read().strip()
# Returns the hash of the given tag revision
def get_tag_hash(tag):
return os.popen('git show-ref --tags %s --hash 2>&1' % (tag)).read().strip()
# Returns the name of the current branch
def get_current_branch():
return os.popen('git rev-parse --abbrev-ref HEAD 2>&1').read().strip()
# Utility that returns the name of the release branch for a given version
def release_branch(version):
return 'release_branch_%s' % version
# runs get fetch on the given remote
def fetch(remote):
run('git fetch %s' % remote)
# Creates a new release branch from the given source branch
# and rebases the source branch from the remote before creating
# the release branch. Note: This fails if the source branch
# doesn't exist on the provided remote.
def create_release_branch(remote, src_branch, release):
run('git checkout %s' % src_branch)
run('git pull --rebase %s %s' % (remote, src_branch))
run('git checkout -b %s' % (release_branch(release)))
# Reads the given file and applies the
# callback to it. If the callback changed
# a line the given file is replaced with
# the modified input.
def process_file(file_path, line_callback):
fh, abs_path = tempfile.mkstemp()
modified = False
with open(abs_path,'w', encoding='utf-8') as new_file:
with open(file_path, encoding='utf-8') as old_file:
for line in old_file:
new_line = line_callback(line)
modified = modified or (new_line != line)
new_file.write(new_line)
os.close(fh)
if modified:
#Remove original file
os.remove(file_path)
#Move new file
shutil.move(abs_path, file_path)
return True
else:
# nothing to do - just remove the tmp file
os.remove(abs_path)
return False
# Walks the given directory path (defaults to 'docs')
# and replaces all 'coming[$version]' tags with
# 'added[$version]'. This method only accesses asciidoc files.
def update_reference_docs(release_version, path='docs'):
pattern = 'coming[%s' % (release_version)
replacement = 'added[%s' % (release_version)
pending_files = []
def callback(line):
return line.replace(pattern, replacement)
for root, _, file_names in os.walk(path):
for file_name in fnmatch.filter(file_names, '*.asciidoc'):
full_path = os.path.join(root, file_name)
if process_file(full_path, callback):
pending_files.append(os.path.join(root, file_name))
return pending_files
# Moves the pom.xml file from a snapshot to a release
def remove_maven_snapshot(pom, release):
pattern = '<version>%s-SNAPSHOT</version>' % (release)
replacement = '<version>%s</version>' % (release)
def callback(line):
return line.replace(pattern, replacement)
process_file(pom, callback)
# Moves the Version.java file from a snapshot to a release
def remove_version_snapshot(version_file, release):
# 1.0.0.Beta1 -> 1_0_0_Beta1
release = release.replace('.', '_')
pattern = 'new Version(V_%s_ID, true' % (release)
replacement = 'new Version(V_%s_ID, false' % (release)
def callback(line):
return line.replace(pattern, replacement)
process_file(version_file, callback)
# Stages the given files for the next git commit
def add_pending_files(*files):
for file in files:
run('git add %s' % (file))
# Executes a git commit with 'release [version]' as the commit message
def commit_release(release):
run('git commit -m "release [%s]"' % release)
def commit_feature_flags(release):
run('git commit -m "Update Documentation Feature Flags [%s]"' % release)
def tag_release(release):
run('git tag -a v%s -m "Tag release version %s"' % (release, release))
def run_mvn(*cmd):
for c in cmd:
run('%s; %s %s' % (java_exe(), MVN, c))
def build_release(release_version, run_tests=False, dry_run=True, cpus=1, bwc_version=None):
target = 'deploy'
if dry_run:
target = 'package'
if run_tests:
run_mvn('clean',
'test -Dtests.jvms=%s -Des.node.mode=local' % (cpus),
'test -Dtests.jvms=%s -Des.node.mode=network' % (cpus))
if bwc_version:
print('Running Backwards compatibility tests against version [%s]' % (bwc_version))
run_mvn('clean', 'test -Dtests.filter=@backwards -Dtests.bwc.version=%s -Dtests.bwc=true -Dtests.jvms=1' % bwc_version)
run_mvn('clean test-compile -Dforbidden.test.signatures="org.apache.lucene.util.LuceneTestCase\$AwaitsFix @ Please fix all bugs before release"')
# dont sign the RPM, so older distros will be able to use the uploaded RPM package
gpg_args = '-Dgpg.key="%s" -Dgpg.passphrase="%s" -Ddeb.sign=true -Drpm.sign=false' % (env.get('GPG_KEY_ID'), env.get('GPG_PASSPHRASE'))
if env.get('GPG_KEYRING'):
gpg_args += ' -Dgpg.keyring="%s"' % env.get('GPG_KEYRING')
run_mvn('clean %s -DskipTests %s' % (target, gpg_args))
success = False
try:
# create additional signed RPM for the repositories
run_mvn('-f distribution/rpm/pom.xml package -DskipTests -Dsign.rpm=true -Drpm.outputDirectory=target/releases/signed/ %s' % (gpg_args))
rpm = os.path.join('target/releases/signed', 'elasticsearch-%s.rpm' % release_version)
if os.path.isfile(rpm):
log('Signed RPM [%s] contains: ' % rpm)
run('rpm -pqli %s' % rpm)
success = True
finally:
if not success:
print("""
RPM Bulding failed make sure "rpm" tools are installed.
Use on of the following commands to install:
$ brew install rpm # on OSX
$ apt-get install rpm # on Ubuntu et.al
""")
# Uses the github API to fetch open tickets for the given release version
# if it finds any tickets open for that version it will throw an exception
def ensure_no_open_tickets(version):
version = "v%s" % version
conn = HTTPSConnection('api.github.com')
try:
log('Checking for open tickets on Github for version %s' % version)
log('Check if node is available')
conn.request('GET', '/repos/elastic/elasticsearch/issues?state=open&labels=%s' % version, headers= {'User-Agent' : 'Elasticsearch version checker'})
res = conn.getresponse()
if res.status == 200:
issues = json.loads(res.read().decode("utf-8"))
if issues:
urls = []
for issue in issues:
urls.append(issue['html_url'])
raise RuntimeError('Found open issues for release version %s:\n%s' % (version, '\n'.join(urls)))
else:
log("No open issues found for version %s" % version)
else:
raise RuntimeError('Failed to fetch issue list from Github for release version %s' % version)
except socket.error as e:
log("Failed to fetch issue list from Github for release version %s' % version - Exception: [%s]" % (version, e))
#that is ok it might not be there yet
finally:
conn.close()
def wait_for_node_startup(host='127.0.0.1', port=9200,timeout=15):
for _ in range(timeout):
conn = HTTPConnection(host, port, timeout)
try:
log('Waiting until node becomes available for 1 second')
time.sleep(1)
log('Check if node is available')
conn.request('GET', '')
res = conn.getresponse()
if res.status == 200:
return True
except socket.error as e:
log("Failed while waiting for node - Exception: [%s]" % e)
#that is ok it might not be there yet
finally:
conn.close()
return False
# Ensures we are using a true Lucene release, not a snapshot build:
def verify_lucene_version():
s = open('pom.xml', encoding='utf-8').read()
if 'download.elastic.co/lucenesnapshots' in s:
raise RuntimeError('pom.xml contains download.elastic.co/lucenesnapshots repository: remove that before releasing')
m = re.search(r'<lucene.version>(.*?)</lucene.version>', s)
if m is None:
raise RuntimeError('unable to locate lucene.version in pom.xml')
lucene_version = m.group(1)
m = re.search(r'<lucene.maven.version>(.*?)</lucene.maven.version>', s)
if m is None:
raise RuntimeError('unable to locate lucene.maven.version in pom.xml')
lucene_maven_version = m.group(1)
if lucene_version != lucene_maven_version:
raise RuntimeError('pom.xml is still using a snapshot release of lucene (%s): cutover to a real lucene release before releasing' % lucene_maven_version)
# Checks the pom.xml for the release version.
# This method fails if the pom file has no SNAPSHOT version set ie.
# if the version is already on a release version we fail.
# Returns the next version string ie. 0.90.7
def find_release_version(src_branch):
run('git checkout %s' % src_branch)
with open('pom.xml', encoding='utf-8') as file:
for line in file:
match = re.search(r'<version>(.+)-SNAPSHOT</version>', line)
if match:
return match.group(1)
raise RuntimeError('Could not find release version in branch %s' % src_branch)
def artifact_names(release):
artifacts = []
artifacts.append(os.path.join('distribution/zip/target/releases', 'elasticsearch-%s.zip' % (release)))
artifacts.append(os.path.join('distribution/tar/target/releases', 'elasticsearch-%s.tar.gz' % (release)))
artifacts.append(os.path.join('distribution/deb/target/releases', 'elasticsearch-%s.deb' % (release)))
artifacts.append(os.path.join('distribution/rpm/target/releases', 'elasticsearch-%s.rpm' % (release)))
return artifacts
def get_artifacts(release):
common_artifacts = artifact_names(release)
for f in common_artifacts:
if not os.path.isfile(f):
raise RuntimeError('Could not find required artifact at %s' % f)
return common_artifacts
# Sample URL:
# http://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/elasticsearch-rpm/2.0.0-beta1-SNAPSHOT/elasticsearch-rpm-2.0.0-beta1-SNAPSHOT.rpm
def download_and_verify(release, files, plugins=None, base_url='https://download.elastic.co/elasticsearch/release/org/elasticsearch/distribution'):
print('Downloading and verifying release %s from %s' % (release, base_url))
tmp_dir = tempfile.mkdtemp()
try:
downloaded_files = []
for file in files:
name = os.path.basename(file)
if name.endswith('tar.gz'):
url = '%s/tar/elasticsearch/%s/%s' % (base_url, release, name)
elif name.endswith('zip'):
url = '%s/zip/elasticsearch/%s/%s' % (base_url, release, name)
elif name.endswith('rpm'):
url = '%s/rpm/elasticsearch/%s/%s' % (base_url, release, name)
elif name.endswith('deb'):
url = '%s/deb/elasticsearch/%s/%s' % (base_url, release, name)
abs_file_path = os.path.join(tmp_dir, name)
print(' Downloading %s' % (url))
downloaded_files.append(abs_file_path)
urllib.request.urlretrieve(url, abs_file_path)
url = ''.join([url, '.sha1'])
checksum_file = os.path.join(tmp_dir, ''.join([abs_file_path, '.sha1']))
urllib.request.urlretrieve(url, checksum_file)
print(' Verifying checksum %s' % (checksum_file))
run('cd %s && sha1sum -c %s' % (tmp_dir, os.path.basename(checksum_file)))
smoke_test_release(release, downloaded_files, get_tag_hash('v%s' % release), plugins)
print(' SUCCESS')
finally:
shutil.rmtree(tmp_dir)
def smoke_test_release(release, files, expected_hash, plugins):
for release_file in files:
if not os.path.isfile(release_file):
raise RuntimeError('Smoketest failed missing file %s' % (release_file))
tmp_dir = tempfile.mkdtemp()
if release_file.endswith('tar.gz'):
run('tar -xzf %s -C %s' % (release_file, tmp_dir))
elif release_file.endswith('zip'):
run('unzip %s -d %s' % (release_file, tmp_dir))
else:
log('Skip SmokeTest for [%s]' % release_file)
continue # nothing to do here
es_run_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'bin/elasticsearch')
print(' Smoke testing package [%s]' % release_file)
es_plugin_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release),'bin/plugin')
plugin_names = {}
for name, plugin in plugins:
print(' Install plugin [%s] from [%s]' % (name, plugin))
run('%s; %s install %s' % (java_exe(), es_plugin_path, plugin))
plugin_names[name] = True
background = '-d'
print(' Starting elasticsearch deamon from [%s]' % os.path.join(tmp_dir, 'elasticsearch-%s' % release))
run('%s; %s -Des.node.name=smoke_tester -Des.cluster.name=prepare_release -Des.discovery.zen.ping.multicast.enabled=false -Des.script.inline=on -Des.script.indexed=on %s'
% (java_exe(), es_run_path, background))
conn = HTTPConnection('127.0.0.1', 9200, 20);
wait_for_node_startup()
try:
try:
conn.request('GET', '')
res = conn.getresponse()
if res.status == 200:
version = json.loads(res.read().decode("utf-8"))['version']
if release != version['number']:
raise RuntimeError('Expected version [%s] but was [%s]' % (release, version['number']))
if version['build_snapshot']:
raise RuntimeError('Expected non snapshot version')
if version['build_hash'].strip() != expected_hash:
raise RuntimeError('HEAD hash does not match expected [%s] but got [%s]' % (expected_hash, version['build_hash']))
print(' Running REST Spec tests against package [%s]' % release_file)
run_mvn('test -Dtests.cluster=%s -Dtests.jvms=1 -Dtests.class=*.*RestTests' % ("127.0.0.1:9300"))
print(' Verify if plugins are listed in _nodes')
conn.request('GET', '/_nodes?plugin=true&pretty=true')
res = conn.getresponse()
if res.status == 200:
nodes = json.loads(res.read().decode("utf-8"))['nodes']
for _, node in nodes.items():
node_plugins = node['plugins']
for node_plugin in node_plugins:
if not plugin_names.get(node_plugin['name'], False):
raise RuntimeError('Unexpeced plugin %s' % node_plugin['name'])
del plugin_names[node_plugin['name']]
if plugin_names:
raise RuntimeError('Plugins not loaded %s' % list(plugin_names.keys()))
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
finally:
conn.request('POST', '/_cluster/nodes/_local/_shutdown')
time.sleep(1) # give the node some time to shut down
if conn.getresponse().status != 200:
raise RuntimeError('Expected HTTP 200 but got %s on node shutdown' % res.status)
finally:
conn.close()
shutil.rmtree(tmp_dir)
def merge_tag_push(remote, src_branch, release_version, dry_run):
run('git checkout %s' % src_branch)
run('git merge %s' % release_branch(release_version))
run('git tag v%s' % release_version)
if not dry_run:
run('git push %s %s' % (remote, src_branch)) # push the commit
run('git push %s v%s' % (remote, release_version)) # push the tag
else:
print(' dryrun [True] -- skipping push to remote %s' % remote)
def publish_repositories(version, dry_run=True):
if dry_run:
print('Skipping package repository update')
else:
print('Triggering repository update for version %s - calling dev-tools/build_repositories.sh %s' % (version, src_branch))
# src_branch is a version like 1.5/1.6/2.0/etc.. so we can use this
run('dev-tools/build_repositories.sh %s' % src_branch)
def print_sonatype_notice():
settings = os.path.join(os.path.expanduser('~'), '.m2/settings.xml')
if os.path.isfile(settings):
with open(settings, encoding='utf-8') as settings_file:
for line in settings_file:
if line.strip() == '<id>sonatype-nexus-snapshots</id>':
# moving out - we found the indicator no need to print the warning
return
print("""
NOTE: No sonatype settings detected, make sure you have configured
your sonatype credentials in '~/.m2/settings.xml':
<settings>
...
<servers>
<server>
<id>sonatype-nexus-snapshots</id>
<username>your-jira-id</username>
<password>your-jira-pwd</password>
</server>
<server>
<id>sonatype-nexus-staging</id>
<username>your-jira-id</username>
<password>your-jira-pwd</password>
</server>
</servers>
...
</settings>
""")
def check_command_exists(name, cmd):
try:
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
raise RuntimeError('Could not run command %s - please make sure it is installed' % (name))
VERSION_FILE = 'src/main/java/org/elasticsearch/Version.java'
POM_FILE = 'pom.xml'
# finds the highest available bwc version to test against
def find_bwc_version(release_version, bwc_dir='backwards'):
log(' Lookup bwc version in directory [%s]' % bwc_dir)
bwc_version = None
if os.path.exists(bwc_dir) and os.path.isdir(bwc_dir):
max_version = [int(x) for x in release_version.split('.')]
for dir in os.listdir(bwc_dir):
if os.path.isdir(os.path.join(bwc_dir, dir)) and dir.startswith('elasticsearch-'):
version = [int(x) for x in dir[len('elasticsearch-'):].split('.')]
if version < max_version: # bwc tests only against smaller versions
if (not bwc_version) or version > [int(x) for x in bwc_version.split('.')]:
bwc_version = dir[len('elasticsearch-'):]
log(' Using bwc version [%s]' % bwc_version)
else:
log(' bwc directory [%s] does not exists or is not a directory - skipping' % bwc_dir)
return bwc_version
def ensure_checkout_is_clean(branchName):
# Make sure no local mods:
s = subprocess.check_output('git diff --shortstat', shell=True)
if len(s) > 0:
raise RuntimeError('git diff --shortstat is non-empty: got:\n%s' % s)
# Make sure no untracked files:
s = subprocess.check_output('git status', shell=True).decode('utf-8', errors='replace')
if 'Untracked files:' in s:
raise RuntimeError('git status shows untracked files: got:\n%s' % s)
# Make sure we are on the right branch (NOTE: a bit weak, since we default to current branch):
if 'On branch %s' % branchName not in s:
raise RuntimeError('git status does not show branch %s: got:\n%s' % (branchName, s))
# Make sure we have all changes from origin:
if 'is behind' in s:
raise RuntimeError('git status shows not all changes pulled from origin; try running "git pull origin %s": got:\n%s' % (branchName, s))
# Make sure we no local unpushed changes (this is supposed to be a clean area):
if 'is ahead' in s:
raise RuntimeError('git status shows local commits; try running "git fetch origin", "git checkout %s", "git reset --hard origin/%s": got:\n%s' % (branchName, branchName, s))
# Checks all source files for //NORELEASE comments
def check_norelease(path='src'):
pattern = re.compile(r'\bnorelease\b', re.IGNORECASE)
for root, _, file_names in os.walk(path):
for file_name in fnmatch.filter(file_names, '*.java'):
full_path = os.path.join(root, file_name)
line_number = 0
with open(full_path, 'r', encoding='utf-8') as current_file:
for line in current_file:
line_number = line_number + 1
if pattern.search(line):
raise RuntimeError('Found //norelease comment in %s line %s' % (full_path, line_number))
def run_and_print(text, run_function):
try:
print(text, end='')
run_function()
print(COLOR_OK + 'OK' + COLOR_END)
return True
except RuntimeError:
print(COLOR_FAIL + 'NOT OK' + COLOR_END)
return False
def check_env_var(text, env_var):
try:
print(text, end='')
env[env_var]
print(COLOR_OK + 'OK' + COLOR_END)
return True
except KeyError:
print(COLOR_FAIL + 'NOT OK' + COLOR_END)
return False
def check_environment_and_commandline_tools(check_only):
checks = list()
checks.append(check_env_var('Checking for AWS env configuration AWS_SECRET_ACCESS_KEY_ID... ', 'AWS_SECRET_ACCESS_KEY'))
checks.append(check_env_var('Checking for AWS env configuration AWS_ACCESS_KEY_ID... ', 'AWS_ACCESS_KEY_ID'))
checks.append(check_env_var('Checking for SONATYPE env configuration SONATYPE_USERNAME... ', 'SONATYPE_USERNAME'))
checks.append(check_env_var('Checking for SONATYPE env configuration SONATYPE_PASSWORD... ', 'SONATYPE_PASSWORD'))
checks.append(check_env_var('Checking for GPG env configuration GPG_KEY_ID... ', 'GPG_KEY_ID'))
checks.append(check_env_var('Checking for GPG env configuration GPG_PASSPHRASE... ', 'GPG_PASSPHRASE'))
checks.append(check_env_var('Checking for S3 repo upload env configuration S3_BUCKET_SYNC_TO... ', 'S3_BUCKET_SYNC_TO'))
checks.append(check_env_var('Checking for git env configuration GIT_AUTHOR_NAME... ', 'GIT_AUTHOR_NAME'))
checks.append(check_env_var('Checking for git env configuration GIT_AUTHOR_EMAIL... ', 'GIT_AUTHOR_EMAIL'))
checks.append(run_and_print('Checking command: rpm... ', partial(check_command_exists, 'rpm', 'rpm --version')))
checks.append(run_and_print('Checking command: dpkg... ', partial(check_command_exists, 'dpkg', 'dpkg --version')))
checks.append(run_and_print('Checking command: gpg... ', partial(check_command_exists, 'gpg', 'gpg --version')))
checks.append(run_and_print('Checking command: expect... ', partial(check_command_exists, 'expect', 'expect -v')))
checks.append(run_and_print('Checking command: createrepo... ', partial(check_command_exists, 'createrepo', 'createrepo --version')))
checks.append(run_and_print('Checking command: s3cmd... ', partial(check_command_exists, 's3cmd', 's3cmd --version')))
checks.append(run_and_print('Checking command: apt-ftparchive... ', partial(check_command_exists, 'apt-ftparchive', 'apt-ftparchive --version')))
# boto, check error code being returned
location = os.path.dirname(os.path.realpath(__file__))
command = 'python %s/upload-s3.py -h' % (location)
checks.append(run_and_print('Testing boto python dependency... ', partial(check_command_exists, 'python-boto', command)))
checks.append(run_and_print('Checking java version... ', partial(verify_java_version, '1.7')))
checks.append(run_and_print('Checking java mvn version... ', partial(verify_mvn_java_version, '1.7', MVN)))
if check_only:
sys.exit(0)
if False in checks:
print("Exiting due to failing checks")
sys.exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Builds and publishes a Elasticsearch Release')
parser.add_argument('--branch', '-b', metavar='RELEASE_BRANCH', default=get_current_branch(),
help='The branch to release from. Defaults to the current branch.')
parser.add_argument('--cpus', '-c', metavar='1', default=1,
help='The number of cpus to use for running the test. Default is [1]')
parser.add_argument('--skiptests', '-t', dest='tests', action='store_false',
help='Skips tests before release. Tests are run by default.')
parser.set_defaults(tests=True)
parser.add_argument('--remote', '-r', metavar='origin', default='origin',
help='The remote to push the release commit and tag to. Default is [origin]')
parser.add_argument('--publish', '-d', dest='dryrun', action='store_false',
help='Publishes the release. Disable by default.')
parser.add_argument('--smoke', '-s', dest='smoke', default='',
help='Smoke tests the given release')
parser.add_argument('--bwc', '-w', dest='bwc', metavar='backwards', default='backwards',
help='Backwards compatibility version path to use to run compatibility tests against')
parser.add_argument('--check-only', dest='check_only', action='store_true',
help='Checks and reports for all requirements and then exits')
parser.set_defaults(dryrun=True)
parser.set_defaults(smoke=None)
parser.set_defaults(check_only=False)
args = parser.parse_args()
bwc_path = args.bwc
src_branch = args.branch
remote = args.remote
run_tests = args.tests
dry_run = args.dryrun
cpus = args.cpus
build = not args.smoke
smoke_test_version = args.smoke
check_environment_and_commandline_tools(args.check_only)
# we print a notice if we can not find the relevant infos in the ~/.m2/settings.xml
print_sonatype_notice()
# we require to build with 1.7
verify_java_version('1.7')
verify_mvn_java_version('1.7', MVN)
if os.path.exists(LOG):
raise RuntimeError('please remove old release log %s first' % LOG)
if not dry_run:
print('WARNING: dryrun is set to "false" - this will push and publish the release')
input('Press Enter to continue...')
print(''.join(['-' for _ in range(80)]))
print('Preparing Release from branch [%s] running tests: [%s] dryrun: [%s]' % (src_branch, run_tests, dry_run))
print(' JAVA_HOME is [%s]' % JAVA_HOME)
print(' Running with maven command: [%s] ' % (MVN))
if build:
check_norelease(path='src')
ensure_checkout_is_clean(src_branch)
verify_lucene_version()
release_version = find_release_version(src_branch)
ensure_no_open_tickets(release_version)
if not dry_run:
smoke_test_version = release_version
head_hash = get_head_hash()
run_mvn('clean') # clean the env!
print(' Release version: [%s]' % release_version)
create_release_branch(remote, src_branch, release_version)
print(' Created release branch [%s]' % (release_branch(release_version)))
success = False
try:
pending_files = [POM_FILE, VERSION_FILE]
remove_maven_snapshot(POM_FILE, release_version)
remove_version_snapshot(VERSION_FILE, release_version)
print(' Done removing snapshot version')
add_pending_files(*pending_files) # expects var args use * to expand
commit_release(release_version)
pending_files = update_reference_docs(release_version)
version_head_hash = None
# split commits for docs and version to enable easy cherry-picking
if pending_files:
add_pending_files(*pending_files) # expects var args use * to expand
commit_feature_flags(release_version)
version_head_hash = get_head_hash()
print(' Committed release version [%s]' % release_version)
print(''.join(['-' for _ in range(80)]))
print('Building Release candidate')
input('Press Enter to continue...')
if not dry_run:
print(' Running maven builds now and publish to Sonatype and S3 - run-tests [%s]' % run_tests)
else:
print(' Running maven builds now run-tests [%s]' % run_tests)
build_release(release_version, run_tests=run_tests, dry_run=dry_run, cpus=cpus, bwc_version=find_bwc_version(release_version, bwc_path))
artifacts = get_artifacts(release_version)
smoke_test_release(release_version, artifacts, get_head_hash(), PLUGINS)
print(''.join(['-' for _ in range(80)]))
print('Finish Release -- dry_run: %s' % dry_run)
input('Press Enter to continue...')
print(' merge release branch, tag and push to %s %s -- dry_run: %s' % (remote, src_branch, dry_run))
merge_tag_push(remote, src_branch, release_version, dry_run)
print(' Updating package repositories -- dry_run: %s' % dry_run)
publish_repositories(src_branch, dry_run=dry_run)
cherry_pick_command = '.'
if version_head_hash:
cherry_pick_command = ' and cherry-pick the documentation changes: \'git cherry-pick %s\' to the development branch' % (version_head_hash)
pending_msg = """
Release successful pending steps:
* create a new vX.Y.Z label on github for the next release, with label color #dddddd (https://github.com/elastic/elasticsearch/labels)
* publish the maven artifacts on Sonatype: https://oss.sonatype.org/index.html
- here is a guide: http://central.sonatype.org/pages/releasing-the-deployment.html
* check if the release is there https://oss.sonatype.org/content/repositories/releases/org/elasticsearch/elasticsearch/%(version)s
* announce the release on the website / blog post
* tweet about the release
* announce the release in the google group/mailinglist
* Move to a Snapshot version to the current branch for the next point release%(cherry_pick)s
"""
print(pending_msg % { 'version' : release_version, 'cherry_pick' : cherry_pick_command} )
success = True
finally:
if not success:
run('git reset --hard HEAD')
run('git checkout %s' % src_branch)
elif dry_run:
run('git reset --hard %s' % head_hash)
run('git tag -d v%s' % release_version)
# we delete this one anyways
run('git branch -D %s' % (release_branch(release_version)))
else:
print("Skipping build - smoketest only against version %s" % smoke_test_version)
run_mvn('clean') # clean the env!
if smoke_test_version:
fetch(remote)
download_and_verify(smoke_test_version, artifact_names(smoke_test_version), plugins=PLUGINS)
|
maaku/scenariotest | refs/heads/master | xunit/__init__.py | 1 | # -*- coding: utf-8 -*-
# Copyright © 2013 by its contributors. See AUTHORS for details.
# Distributed under the MIT/X11 software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
import unittest2 as unittest
from scenariotest import *
## Unit of code to test ##
def is_numeric(value_in):
try:
float(value_in)
return True
except Exception:
return False
## Test Case ##
class TestIsNumeric(unittest.TestCase):
__metaclass__ = ScenarioMeta
class is_numeric_basic(ScenarioTest):
scenarios = [
dict(val="1", expected=True),
dict(val="-1", expected=True),
dict(val=unicode("123" * 3), expected=True),
dict(val="Bad String", expected=False),
dict(val="Speaks Volumes", expected=False)
]
scenarios += [(dict(val=unicode(x), expected=True),
"check_unicode_%s" % x) for x in range(-2, 3)]
def __test__(self, val, expected):
actual = is_numeric(val)
if expected:
self.assertTrue(actual)
else:
self.assertFalse(actual)
|
narekgharibyan/keyvi | refs/heads/master | pykeyvi/tests/completion/multiword_completion_test.py | 3 | # -*- coding: utf-8 -*-
# Usage: py.test tests
import pykeyvi
import sys
import os
root = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(root, "../"))
from test_tools import tmp_dictionary
MULTIWORD_QUERY_SEPARATOR = '\x1b'
def test_mw_completion():
c = pykeyvi.CompletionDictionaryCompiler({"memory_limit_mb":"10"})
c.Add("mozilla firefox" + '\x1b' + "mozilla firefox", 80)
c.Add("mozilla footprint" + '\x1b' + "mozilla footprint", 30)
c.Add("mozilla fans" + '\x1b' + "mozilla fans", 43)
c.Add("mozilla firebird" + '\x1b' + "mozilla firebird", 12)
c.Add("internet microsoft explorer" + '\x1b' + "microsoft internet explorer", 21)
c.Add("google chrome" + '\x1b' + "google chrome", 54)
c.Add("netscape navigator" + '\x1b' + "netscape navigator", 10)
with tmp_dictionary(c, 'mw_completion.kv') as d:
mw = pykeyvi.MultiWordCompletion(d)
matches = sorted([(match.GetAttribute('weight'), match.GetMatchedString())
for match in mw.GetCompletions("mozilla f")], reverse=True)
assert len(matches) == 4
assert matches[0][1] == b'mozilla firefox'
assert matches[1][1] == b'mozilla fans'
assert matches[2][1] == b'mozilla footprint'
assert matches[3][1] == b'mozilla firebird'
def test_overlong_completion():
c = pykeyvi.CompletionDictionaryCompiler({"memory_limit_mb":"10"})
c.Add("html disable" + MULTIWORD_QUERY_SEPARATOR + "html disable", 30075)
c.Add("html disabled" + MULTIWORD_QUERY_SEPARATOR + "html disabled", 29650)
c.Add("html display=main&referer=3c6120640656e466f726e26616d703b726566657265723d336336313230363436313734363132643631366136313738336432373636363136633733363532373230363837323635363633643237326636363732363136643635326536613733373032373230373436393734366336353364323735333734363137323734373336353639373436353237336535333734363137323734373336353639373436353363326636313365323032363637373433623230336336313230363436313734363132643631366136313738336432373636363136633733363532373230363837323635363632303364323732663733363537323736366336353734326636363666373236353665336636663730363536653436366637323635366535343732363536353364333132363631366437303362363436393733373036633631373933643664363136393665323636313664373033623734363137323637363537343639363433643330323636313664373033623734363137323637363537343734373937303635336433303236363136643730336236333664363433643338333032373230373436393734366336353364323737613735373232363735373536643663336236333662323037613735373232303436366637323635366532363735373536643663336236323635373237333639363336383734323733653436366637323635366532363735373536643663336236323635373237333639363336383734336332663631336532303230323636373734336232303463363536383732363736313665363737333636366637323635366526616d703b616a61783d3126616d703b6d6f62696c653d3026616d703b706167653d3026616d703b6f70656e466f72656e547265653d3127203e204c65687267616e6773666f72656e3c2f613e20&openforentree=1&targetid=130&targettype=1&cmd=6&page=null&fromhistory=1"
+ MULTIWORD_QUERY_SEPARATOR +
"html display=main&referer=3c6120640656e466f726e26616d703b726566657265723d336336313230363436313734363132643631366136313738336432373636363136633733363532373230363837323635363633643237326636363732363136643635326536613733373032373230373436393734366336353364323735333734363137323734373336353639373436353237336535333734363137323734373336353639373436353363326636313365323032363637373433623230336336313230363436313734363132643631366136313738336432373636363136633733363532373230363837323635363632303364323732663733363537323736366336353734326636363666373236353665336636663730363536653436366637323635366535343732363536353364333132363631366437303362363436393733373036633631373933643664363136393665323636313664373033623734363137323637363537343639363433643330323636313664373033623734363137323637363537343734373937303635336433303236363136643730336236333664363433643338333032373230373436393734366336353364323737613735373232363735373536643663336236333662323037613735373232303436366637323635366532363735373536643663336236323635373237333639363336383734323733653436366637323635366532363735373536643663336236323635373237333639363336383734336332663631336532303230323636373734336232303463363536383732363736313665363737333636366637323635366526616d703b616a61783d3126616d703b6d6f62696c653d3026616d703b706167653d3026616d703b6f70656e466f72656e547265653d3127203e204c65687267616e6773666f72656e3c2f613e20&openforentree=1&targetid=130&targettype=1&cmd=6&page=null&fromhistory=1"
, 23732)
with tmp_dictionary(c, 'mw_overlong_completion.kv') as d:
mw = pykeyvi.MultiWordCompletion(d)
matches = sorted([(match.GetAttribute('weight'), match.GetMatchedString())
for match in mw.GetCompletions("html dis")], reverse=True)
assert len(matches) == 3
assert matches[0][1] == b'html disable'
assert matches[1][1] == b'html disabled'
assert matches[2][1] == b"html display=main&referer=3c6120640656e466f726e26616d703b726566657265723d336336313230363436313734363132643631366136313738336432373636363136633733363532373230363837323635363633643237326636363732363136643635326536613733373032373230373436393734366336353364323735333734363137323734373336353639373436353237336535333734363137323734373336353639373436353363326636313365323032363637373433623230336336313230363436313734363132643631366136313738336432373636363136633733363532373230363837323635363632303364323732663733363537323736366336353734326636363666373236353665336636663730363536653436366637323635366535343732363536353364333132363631366437303362363436393733373036633631373933643664363136393665323636313664373033623734363137323637363537343639363433643330323636313664373033623734363137323637363537343734373937303635336433303236363136643730336236333664363433643338333032373230373436393734366336353364323737613735373232363735373536643663336236333662323037613735373232303436366637323635366532363735373536643663336236323635373237333639363336383734323733653436366637323635366532363735373536643663336236323635373237333639363336383734336332663631336532303230323636373734336232303463363536383732363736313665363737333636366637323635366526616d703b616a61783d3126616d703b6d6f62696c653d3026616d703b706167653d3026616d703b6f70656e466f72656e547265653d3127203e204c65687267616e6773666f72656e3c2f613e20&openforentree=1&targetid=130&targettype=1&cmd=6&page=null&fromhistory=1"
def test_exact_match_without_completion():
c = pykeyvi.CompletionDictionaryCompiler({"memory_limit_mb": "10"})
c.Add("mr" + '\x1b' + "mr", 80)
c.Add("mozilla firefox" + '\x1b' + "mozilla firefox", 80)
c.Add("maa" + '\x1b' + "maa", 80)
with tmp_dictionary(c, 'test_exact_match_without_completion.kv') as d:
mw = pykeyvi.MultiWordCompletion(d)
for m in mw.GetCompletions("mr "):
assert m.GetMatchedString() == b'mr'
|
tommyip/zulip | refs/heads/master | zilencer/management/__init__.py | 12133432 | |
ivanlyon/exercises | refs/heads/master | test/test_hexdec.py | 1 | import unittest
from general import hexdec
###############################################################################
class ValidInput(unittest.TestCase):
pair = ( (1,'1'),
(2,'2'),
(3,'3'),
(4,'4'),
(5,'5'),
(6,'6'),
(7,'7'),
(8,'8'),
(9,'9'),
(10,'A'),
(11,'B'),
(12,'C'),
(13,'D'),
(14,'E'),
(15,'F'),
(16,'10'),
(30,'1E'),
(127,'7F'),
(255,'FF'),
(500,'1F4'),
(1024,'400'),
(5100,'13EC'),
(65535,'FFFF'),
(65536,'10000') )
def testToHex(self):
for d, h in self.pair:
self.assertEqual(hexdec.toDec(h), str(d))
self.assertEqual(hexdec.toHex(str(d)), h)
###############################################################################
class InvalidInput(unittest.TestCase):
def testToHexNegative(self):
'''toHex should fail when the input < 0'''
self.assertRaises(hexdec.NotIntegerError, hexdec.toHex, str(-1))
def testToHexNonInteger(self):
'''toHex should fail with non-integer input'''
self.assertRaises(hexdec.NotIntegerError, hexdec.toHex, str(0.5))
def testToDecNegative(self):
'''toDec should fail when the input < 0'''
self.assertRaises(hexdec.NotHexadecimalError, hexdec.toDec, str(-1))
def testToDecNonInteger(self):
'''toDec should fail with non-integer input'''
self.assertRaises(hexdec.NotHexadecimalError, hexdec.toDec, str(0.5))
###############################################################################
if __name__ == '__main__':
unittest.main()
|
tempbottle/Firefly | refs/heads/master | gfirefly/management/commands/reloadmodule.py | 16 | #coding:utf8
'''
Created on 2013-8-12
@author: lan (www.9miao.com)
'''
import urllib,sys
def execute(*args):
"""
"""
if not args:
masterport =9998
else:
masterport = int(args[0])
url = "http://localhost:%s/reloadmodule"%masterport
try:
response = urllib.urlopen(url)
except:
response = None
if response:
sys.stdout.write("reload module success \n")
else:
sys.stdout.write("reload module failed \n") |
RicardoJohann/um | refs/heads/master | erpnext/hr/doctype/appraisal/appraisal.py | 35 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, getdate
from frappe import _
from frappe.model.mapper import get_mapped_doc
from frappe.model.document import Document
from erpnext.hr.utils import set_employee_name
class Appraisal(Document):
def validate(self):
if not self.status:
self.status = "Draft"
set_employee_name(self)
self.validate_dates()
self.validate_existing_appraisal()
self.calculate_total()
def get_employee_name(self):
self.employee_name = frappe.db.get_value("Employee", self.employee, "employee_name")
return self.employee_name
def validate_dates(self):
if getdate(self.start_date) > getdate(self.end_date):
frappe.throw(_("End Date can not be less than Start Date"))
def validate_existing_appraisal(self):
chk = frappe.db.sql("""select name from `tabAppraisal` where employee=%s
and (status='Submitted' or status='Completed')
and ((start_date>=%s and start_date<=%s)
or (end_date>=%s and end_date<=%s))""",
(self.employee,self.start_date,self.end_date,self.start_date,self.end_date))
if chk:
frappe.throw(_("Appraisal {0} created for Employee {1} in the given date range").format(chk[0][0], self.employee_name))
def calculate_total(self):
total, total_w = 0, 0
for d in self.get('appraisal_details'):
if d.score:
d.score_earned = flt(d.score) * flt(d.per_weightage) / 100
total = total + d.score_earned
total_w += flt(d.per_weightage)
if int(total_w) != 100:
frappe.throw(_("Total weightage assigned should be 100%. It is {0}").format(str(total_w) + "%"))
if frappe.db.get_value("Employee", self.employee, "user_id") != \
frappe.session.user and total == 0:
frappe.throw(_("Total cannot be zero"))
self.total_score = total
def on_submit(self):
frappe.db.set(self, 'status', 'Submitted')
def on_cancel(self):
frappe.db.set(self, 'status', 'Cancelled')
@frappe.whitelist()
def fetch_appraisal_template(source_name, target_doc=None):
target_doc = get_mapped_doc("Appraisal Template", source_name, {
"Appraisal Template": {
"doctype": "Appraisal",
},
"Appraisal Template Goal": {
"doctype": "Appraisal Goal",
}
}, target_doc)
return target_doc
|
SiriusBizniss/evetowerthing | refs/heads/master | chardet/chardetect.py | 1785 | #!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import sys
from io import open
from chardet import __version__
from chardet.universaldetector import UniversalDetector
def description_of(lines, name='stdin'):
"""
Return a string describing the probable encoding of a file or
list of strings.
:param lines: The lines to get the encoding of.
:type lines: Iterable of bytes
:param name: Name of file or collection of lines
:type name: str
"""
u = UniversalDetector()
for line in lines:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
result['confidence'])
else:
return '{0}: no result'.format(name)
def main(argv=None):
'''
Handles command line arguments and gets things started.
:param argv: List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
:type argv: list of str
'''
# Get command line arguments
parser = argparse.ArgumentParser(
description="Takes one or more file paths and reports their detected \
encodings",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument('input',
help='File whose encoding we would like to determine.',
type=argparse.FileType('rb'), nargs='*',
default=[sys.stdin])
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
args = parser.parse_args(argv)
for f in args.input:
if f.isatty():
print("You are running chardetect interactively. Press " +
"CTRL-D twice at the start of a blank line to signal the " +
"end of your input. If you want help, run chardetect " +
"--help\n", file=sys.stderr)
print(description_of(f, f.name))
if __name__ == '__main__':
main()
|
yangacer/majordomo | refs/heads/master | majordomo/client.py | 1 | import zmq
import functools
from errno import ETIME, EPROTO
from mdp import constants as consts
from mdp import MDPError
from stream import mdp_stream
from utils import verbose
class Client(object):
socket = None
def __init__(self, zmq_ctx, broker):
self.zmq_ctx = zmq_ctx
self.broker = broker
self.poller = zmq.Poller()
self.reconnect()
def reconnect(self):
self.close()
self.socket = self.zmq_ctx.socket(zmq.REQ)
self.socket.connect(self.broker)
self.poller.register(self.socket, zmq.POLLIN)
def request(self, service, msg, timeout=3.5, retry=0):
assert retry >= 0
req = [consts.client, service]
if isinstance(msg, list):
req.extend(msg)
else:
req.append(msg)
for i in xrange(0, retry + 1):
self.socket.send_multipart(req)
verbose(req)
items = self.poller.poll(timeout * 1000)
if items:
msg = self.socket.recv_multipart()
verbose(msg)
if len(msg) < 3:
raise MDPError('Partial reply')
verbose(msg)
protocol, rep_service = msg[0:2]
if protocol != consts.client or rep_service != service:
raise MDPError('Mismatched reply')
del msg[:2]
return msg
else:
self.reconnect()
raise OSError(ETIME, 'Request timeout')
def discovery(self, service):
return self.request('mmi.service', service)
def close(self):
if self.socket:
self.poller.unregister(self.socket)
self.socket.close()
pass
class AsyncClient(object):
stream = None
def __init__(self, zmq_ctx, broker):
self.zmq_ctx = zmq_ctx
self.broker = broker
socket = self.zmq_ctx.socket(zmq.DEALER)
self.stream = mdp_stream(socket)
self.stream.connect(self.broker)
def request(self, service, msg, callback, timeout=3.5, retry=0):
"""
Request a service asynchronously.
Request for a `service` within `msg`. `callback` will be invoked
asynchronously when reponse is received, reponse is malformed, or
timeout occured.
Signature of `callback` is `callback(error_code, msg)`.
If error_code is None, then response is passed as `msg`. Otherwise,
error_code is an instance of OSError.
"""
assert retry >= 0
req = [b'', consts.client, service]
if isinstance(msg, list):
req.extend(msg)
else:
req.append(msg)
# Bind parameters
on_timeout = functools.partial(self.on_timeout, callback)
deadline = self.stream.io_loop.time() + timeout
timeout_hdl = self.stream.io_loop.add_timeout(deadline, on_timeout)
on_recv = functools.partial(self.on_recv, callback, service,
timeout_hdl)
self.stream.on_recv(on_recv)
self.stream.send_multipart(req)
verbose(req)
pass
def on_recv(self, callback, service, timeout_hdl, msg):
verbose(msg)
self.stream.io_loop.remove_timeout(timeout_hdl)
ec = None
try:
if len(msg) < 4:
raise MDPError('Partial reply')
_, protocol, rep_service = msg[0:3]
if protocol != consts.client or rep_service != service:
raise MDPError('Mismatched reply')
del msg[:3]
except MDPError as e:
ec = OSError(EPROTO, e.message)
callback(ec, msg)
pass
def on_timeout(self, callback):
self.stream.stop_on_recv()
callback(OSError(ETIME, 'Request timeout'), None)
pass
def close(self):
self.stream.close()
pass
|
geometalab/osmaxx | refs/heads/develop | osmaxx/contrib/auth/frontend_permissions.py | 2 | from django.contrib.auth.decorators import user_passes_test
from django.core.urlresolvers import reverse_lazy
from django.utils.decorators import method_decorator
from rest_framework import permissions
from osmaxx.profile.models import Profile
def _may_user_access_this_excerpt(user, excerpt):
return excerpt.is_public or excerpt.owner == user
def _may_user_access_this_export(user, export):
return export.extraction_order.orderer == user
def _user_has_validated_email(user):
try:
profile = Profile.objects.get(associated_user=user)
except Profile.DoesNotExist:
return False
return profile.has_validated_email()
def validated_email_required(function=None):
"""
Decorator for views that checks that the user has set a validated email,
redirecting to the profile page if necessary.
"""
profile_url = reverse_lazy('profile:edit_view')
actual_decorator = user_passes_test(
_user_has_validated_email,
login_url=profile_url
)
if function:
return actual_decorator(function)
return actual_decorator
class EmailRequiredMixin(object):
"""
Frontend Access Check Mixin for Class Based Views.
"""
@method_decorator(validated_email_required)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
class AuthenticatedAndAccessPermission(permissions.BasePermission):
"""
Allows access only to authenticated users with confirmed email address.
"""
def has_permission(self, request, view):
return request.user.is_authenticated and _user_has_validated_email(request.user)
class HasBBoxAccessPermission(permissions.BasePermission):
message = 'Accessing this bounding box is not allowed.'
def has_object_permission(self, request, view, obj):
return _may_user_access_this_excerpt(request.user, obj.excerpt)
class HasExcerptAccessPermission(permissions.BasePermission):
message = 'Accessing this excerpt is not allowed.'
def has_object_permission(self, request, view, obj):
return _may_user_access_this_excerpt(request.user, obj)
class HasExportAccessPermission(permissions.BasePermission):
message = 'Accessing this export is not allowed.'
def has_object_permission(self, request, view, obj):
return _may_user_access_this_export(request.user, obj)
|
mvesper/invenio | refs/heads/master | modules/webalert/lib/alert_engine_config.py | 16 | # This file is part of Invenio.
# Copyright (C) 2006, 2007, 2008, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio Alert Engine config parameters."""
__revision__ = \
"$Id$"
# are we debugging?
# 0 = production, nothing on the console, email sent
# 1 = messages on the console, email sent
# 2 = messages on the console, no email sent
# 3 = many messages on the console, no email sent
# 4 = many messages on the console, email sent to CFG_SITE_ADMIN_EMAIL
CFG_WEBALERT_DEBUG_LEVEL = 0
|
taknevski/tensorflow-xsmm | refs/heads/master | tensorflow/python/kernel_tests/sparse_xent_op_test.py | 103 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseSoftmaxCrossEntropyWithLogits op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import time
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import app
from tensorflow.python.platform import test
class SparseXentTest(test.TestCase):
def _npXent(self, features, labels):
features = np.reshape(features, [-1, features.shape[-1]])
labels = np.reshape(labels, [-1])
batch_dim = 0
class_dim = 1
batch_size = features.shape[batch_dim]
e = np.exp(features - np.reshape(
np.amax(
features, axis=class_dim), [batch_size, 1]))
probs = e / np.reshape(np.sum(e, axis=class_dim), [batch_size, 1])
labels_mat = np.zeros_like(probs).astype(probs.dtype)
labels_mat[np.arange(batch_size), labels] = 1.0
bp = (probs - labels_mat)
l = -np.sum(labels_mat * np.log(probs + 1.0e-20), axis=1)
return l, bp
def _testXent(self, np_features, np_labels):
np_loss, np_backprop = self._npXent(np_features, np_labels)
with self.test_session(use_gpu=True) as sess:
loss, backprop = gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
np_features, np_labels)
tf_loss, tf_backprop = sess.run([loss, backprop])
self.assertAllCloseAccordingToType(np_loss, tf_loss)
self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
def testSingleClass(self):
for label_dtype in np.int32, np.int64:
with self.test_session(use_gpu=True) as sess:
loss, backprop = gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
np.array([[1.], [-1.], [0.]]).astype(np.float32),
np.array([0, 0, 0]).astype(label_dtype))
tf_loss, tf_backprop = sess.run([loss, backprop])
self.assertAllClose([0.0, 0.0, 0.0], tf_loss)
self.assertAllClose([[0.0], [0.0], [0.0]], tf_backprop)
def testInvalidLabel(self):
features = [[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 2., 3., 4.],
[1., 2., 3., 4.]]
labels = [4, 3, 0, -1]
if test.is_built_with_cuda() and test.is_gpu_available():
with self.test_session(use_gpu=True) as sess:
loss, backprop = (gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
features, labels))
tf_loss, tf_backprop = sess.run([loss, backprop])
self.assertAllClose(
[[np.nan] * 4, [0.25, 0.25, 0.25, -0.75],
[-0.968, 0.087, 0.237, 0.6439], [np.nan] * 4],
tf_backprop,
rtol=1e-3,
atol=1e-3)
self.assertAllClose(
[np.nan, 1.3862, 3.4420, np.nan], tf_loss, rtol=1e-3, atol=1e-3)
with self.test_session(use_gpu=False) as sess:
loss, backprop = (gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
features, labels))
with self.assertRaisesOpError("Received a label value of"):
sess.run([loss, backprop])
def testNpXent(self):
# We create 2 batches of logits for testing.
# batch 0 is the boring uniform distribution: 1, 1, 1, 1, with target 3.
# batch 1 has a bit of difference: 1, 2, 3, 4, with target 0.
features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
labels = [3, 0]
# For batch 0, we expect the uniform distribution: 0.25, 0.25, 0.25, 0.25
# With a hard target 3, the backprop is [0.25, 0.25, 0.25, -0.75]
# The loss for this batch is -log(0.25) = 1.386
#
# For batch 1, we have:
# exp(0) = 1
# exp(1) = 2.718
# exp(2) = 7.389
# exp(3) = 20.085
# SUM = 31.192
# So we have as probabilities:
# exp(0) / SUM = 0.032
# exp(1) / SUM = 0.087
# exp(2) / SUM = 0.237
# exp(3) / SUM = 0.644
# With a hard 1, the backprop is [0.032 - 1.0 = -0.968, 0.087, 0.237, 0.644]
# The loss for this batch is [1.0 * -log(0.25), 1.0 * -log(0.032)]
# = [1.3862, 3.4420]
np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))
self.assertAllClose(
np.array([[0.25, 0.25, 0.25, -0.75], [-0.968, 0.087, 0.237, 0.6439]]),
np_backprop,
rtol=1.e-3,
atol=1.e-3)
self.assertAllClose(
np.array([1.3862, 3.4420]), np_loss, rtol=1.e-3, atol=1.e-3)
def testShapeMismatch(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, ".*Rank mismatch:*"):
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=[[0, 2]], logits=[[0., 1.], [2., 3.], [2., 3.]])
def testScalar(self):
with self.test_session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, ".*Logits cannot be scalars*"):
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=constant_op.constant(0), logits=constant_op.constant(1.0))
def testLabelsPlaceholderScalar(self):
with self.test_session(use_gpu=True):
labels = array_ops.placeholder(np.int32)
y = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=[[7.]])
with self.assertRaisesOpError("labels must be 1-D"):
y.eval(feed_dict={labels: 0})
def testVector(self):
with self.test_session(use_gpu=True):
loss = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=constant_op.constant(0), logits=constant_op.constant([1.0]))
self.assertAllClose(0.0, loss.eval())
def testFloat(self):
for label_dtype in np.int32, np.int64:
self._testXent(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32),
np.array([3, 0]).astype(label_dtype))
def testDouble(self):
for label_dtype in np.int32, np.int64:
self._testXent(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64),
np.array([0, 3]).astype(label_dtype))
def testHalf(self):
for label_dtype in np.int32, np.int64:
self._testXent(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16),
np.array([3, 0]).astype(label_dtype))
def testEmpty(self):
self._testXent(np.zeros((0, 3)), np.zeros((0,), dtype=np.int32))
def testGradient(self):
with self.test_session(use_gpu=True):
l = constant_op.constant([3, 0, 1], name="l")
f = constant_op.constant(
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
shape=[3, 4],
dtype=dtypes.float64,
name="f")
x = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=l, logits=f, name="xent")
err = gradient_checker.compute_gradient_error(f, [3, 4], x, [3])
print("cross entropy gradient err = ", err)
self.assertLess(err, 5e-8)
def testSecondGradient(self):
images_placeholder = array_ops.placeholder(dtypes.float32, shape=(3, 2))
labels_placeholder = array_ops.placeholder(dtypes.int32, shape=(3))
weights = variables.Variable(random_ops.truncated_normal([2], stddev=1.0))
weights_with_zeros = array_ops.stack([array_ops.zeros([2]), weights],
axis=1)
logits = math_ops.matmul(images_placeholder, weights_with_zeros)
cross_entropy = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=labels_placeholder, logits=logits)
loss = math_ops.reduce_mean(cross_entropy)
# Taking ths second gradient should fail, since it is not
# yet supported.
with self.assertRaisesRegexp(LookupError,
"explicitly disabled"):
_ = gradients_impl.hessians(loss, [weights])
def _testHighDim(self, features, labels):
np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))
# manually reshape loss
np_loss = np.reshape(np_loss, np.array(labels).shape)
with self.test_session(use_gpu=True) as sess:
loss = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=features)
backprop = loss.op.inputs[0].op.outputs[1]
tf_loss, tf_backprop = sess.run([loss, backprop])
self.assertAllCloseAccordingToType(np_loss, tf_loss)
self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
def testHighDim(self):
features = [[[1., 1., 1., 1.]], [[1., 2., 3., 4.]]]
labels = [[3], [0]]
self._testHighDim(features, labels)
def testHighDim2(self):
features = [[[1., 1., 1., 1.], [2., 2., 2., 2.]],
[[1., 2., 3., 4.], [5., 6., 7., 8.]]]
labels = [[3, 2], [0, 3]]
self._testHighDim(features, labels)
def testScalarHandling(self):
with self.test_session(use_gpu=False) as sess:
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
".*labels must be 1-D.*"):
labels = array_ops.placeholder(dtypes.int32, shape=[None, 1])
logits = array_ops.placeholder(dtypes.float32, shape=[None, 3])
ce = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=array_ops.squeeze(labels), logits=logits)
labels_v2 = np.zeros((1, 1), dtype=np.int32)
logits_v2 = np.random.randn(1, 3)
sess.run([ce], feed_dict={labels: labels_v2, logits: logits_v2})
def _sparse_vs_dense_xent_benchmark_dense(labels, logits):
labels = array_ops.identity(labels)
logits = array_ops.identity(logits)
with ops_lib.device("/cpu:0"): # Sparse-to-dense must be on CPU
batch_size = array_ops.shape(logits)[0]
num_entries = array_ops.shape(logits)[1]
length = batch_size * num_entries
labels += num_entries * math_ops.range(batch_size)
target = sparse_ops.sparse_to_dense(labels,
array_ops.stack([length]), 1.0, 0.0)
target = array_ops.reshape(target, array_ops.stack([-1, num_entries]))
crossent = nn_ops.softmax_cross_entropy_with_logits(
labels=target, logits=logits, name="SequenceLoss/CrossEntropy")
crossent_sum = math_ops.reduce_sum(crossent)
grads = gradients_impl.gradients([crossent_sum], [logits])[0]
return (crossent_sum, grads)
def _sparse_vs_dense_xent_benchmark_sparse(labels, logits):
# Using sparse_softmax_cross_entropy_with_logits
labels = labels.astype(np.int64)
labels = array_ops.identity(labels)
logits = array_ops.identity(logits)
crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits, labels, name="SequenceLoss/CrossEntropy")
crossent_sum = math_ops.reduce_sum(crossent)
grads = gradients_impl.gradients([crossent_sum], [logits])[0]
return (crossent_sum, grads)
def sparse_vs_dense_xent_benchmark(batch_size, num_entries, use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
config.gpu_options.per_process_gpu_memory_fraction = 0.3
labels = np.random.randint(num_entries, size=batch_size).astype(np.int32)
logits = np.random.randn(batch_size, num_entries).astype(np.float32)
def _timer(sess, ops):
# Warm in
for _ in range(20):
sess.run(ops)
# Timing run
start = time.time()
for _ in range(20):
sess.run(ops)
end = time.time()
return (end - start) / 20.0 # Average runtime per iteration
# Using sparse_to_dense and softmax_cross_entropy_with_logits
with session.Session(config=config) as sess:
if not use_gpu:
with ops_lib.device("/cpu:0"):
ops = _sparse_vs_dense_xent_benchmark_dense(labels, logits)
else:
ops = _sparse_vs_dense_xent_benchmark_dense(labels, logits)
delta_dense = _timer(sess, ops)
# Using sparse_softmax_cross_entropy_with_logits
with session.Session(config=config) as sess:
if not use_gpu:
with ops_lib.device("/cpu:0"):
ops = _sparse_vs_dense_xent_benchmark_sparse(labels, logits)
else:
ops = _sparse_vs_dense_xent_benchmark_sparse(labels, logits)
delta_sparse = _timer(sess, ops)
print("%d \t %d \t %s \t %f \t %f \t %f" % (batch_size, num_entries, use_gpu,
delta_dense, delta_sparse,
delta_sparse / delta_dense))
def main(_):
print("Sparse Xent vs. SparseToDense + Xent")
print("batch \t depth \t gpu \t dt(dense) \t dt(sparse) "
"\t dt(sparse)/dt(dense)")
for use_gpu in (False, True):
for batch_size in (32, 64, 128):
for num_entries in (100, 1000, 10000):
sparse_vs_dense_xent_benchmark(batch_size, num_entries, use_gpu)
sparse_vs_dense_xent_benchmark(32, 100000, use_gpu)
sparse_vs_dense_xent_benchmark(8, 1000000, use_gpu)
if __name__ == "__main__":
if "--benchmarks" in sys.argv:
sys.argv.remove("--benchmarks")
app.run()
else:
test.main()
|
shivam1111/odoo | refs/heads/8.0 | openerp/http.py | 77 | # -*- coding: utf-8 -*-
#----------------------------------------------------------
# OpenERP HTTP layer
#----------------------------------------------------------
import ast
import collections
import contextlib
import datetime
import errno
import functools
import getpass
import inspect
import logging
import mimetypes
import os
import pprint
import random
import re
import sys
import tempfile
import threading
import time
import traceback
import urlparse
import warnings
from zlib import adler32
import babel.core
import psycopg2
import simplejson
import werkzeug.contrib.sessions
import werkzeug.datastructures
import werkzeug.exceptions
import werkzeug.local
import werkzeug.routing
import werkzeug.wrappers
import werkzeug.wsgi
from werkzeug.wsgi import wrap_file
try:
import psutil
except ImportError:
psutil = None
import openerp
from openerp import SUPERUSER_ID
from openerp.service.server import memory_info
from openerp.service import security, model as service_model
from openerp.tools.func import lazy_property
from openerp.tools import ustr
_logger = logging.getLogger(__name__)
rpc_request = logging.getLogger(__name__ + '.rpc.request')
rpc_response = logging.getLogger(__name__ + '.rpc.response')
# 1 week cache for statics as advised by Google Page Speed
STATIC_CACHE = 60 * 60 * 24 * 7
#----------------------------------------------------------
# RequestHandler
#----------------------------------------------------------
# Thread local global request object
_request_stack = werkzeug.local.LocalStack()
request = _request_stack()
"""
A global proxy that always redirect to the current request object.
"""
def replace_request_password(args):
# password is always 3rd argument in a request, we replace it in RPC logs
# so it's easier to forward logs for diagnostics/debugging purposes...
if len(args) > 2:
args = list(args)
args[2] = '*'
return tuple(args)
# don't trigger debugger for those exceptions, they carry user-facing warnings
# and indications, they're not necessarily indicative of anything being
# *broken*
NO_POSTMORTEM = (openerp.osv.orm.except_orm,
openerp.exceptions.AccessError,
openerp.exceptions.AccessDenied,
openerp.exceptions.Warning,
openerp.exceptions.RedirectWarning)
def dispatch_rpc(service_name, method, params):
""" Handle a RPC call.
This is pure Python code, the actual marshalling (from/to XML-RPC) is done
in a upper layer.
"""
try:
rpc_request_flag = rpc_request.isEnabledFor(logging.DEBUG)
rpc_response_flag = rpc_response.isEnabledFor(logging.DEBUG)
if rpc_request_flag or rpc_response_flag:
start_time = time.time()
start_rss, start_vms = 0, 0
if psutil:
start_rss, start_vms = memory_info(psutil.Process(os.getpid()))
if rpc_request and rpc_response_flag:
openerp.netsvc.log(rpc_request, logging.DEBUG, '%s.%s' % (service_name, method), replace_request_password(params))
threading.current_thread().uid = None
threading.current_thread().dbname = None
if service_name == 'common':
dispatch = openerp.service.common.dispatch
elif service_name == 'db':
dispatch = openerp.service.db.dispatch
elif service_name == 'object':
dispatch = openerp.service.model.dispatch
elif service_name == 'report':
dispatch = openerp.service.report.dispatch
else:
dispatch = openerp.service.wsgi_server.rpc_handlers.get(service_name)
result = dispatch(method, params)
if rpc_request_flag or rpc_response_flag:
end_time = time.time()
end_rss, end_vms = 0, 0
if psutil:
end_rss, end_vms = memory_info(psutil.Process(os.getpid()))
logline = '%s.%s time:%.3fs mem: %sk -> %sk (diff: %sk)' % (service_name, method, end_time - start_time, start_vms / 1024, end_vms / 1024, (end_vms - start_vms)/1024)
if rpc_response_flag:
openerp.netsvc.log(rpc_response, logging.DEBUG, logline, result)
else:
openerp.netsvc.log(rpc_request, logging.DEBUG, logline, replace_request_password(params), depth=1)
return result
except NO_POSTMORTEM:
raise
except openerp.exceptions.DeferredException, e:
_logger.exception(openerp.tools.exception_to_unicode(e))
openerp.tools.debugger.post_mortem(openerp.tools.config, e.traceback)
raise
except Exception, e:
_logger.exception(openerp.tools.exception_to_unicode(e))
openerp.tools.debugger.post_mortem(openerp.tools.config, sys.exc_info())
raise
def local_redirect(path, query=None, keep_hash=False, forward_debug=True, code=303):
url = path
if not query:
query = {}
if forward_debug and request and request.debug:
query['debug'] = None
if query:
url += '?' + werkzeug.url_encode(query)
if keep_hash:
return redirect_with_hash(url, code)
else:
return werkzeug.utils.redirect(url, code)
def redirect_with_hash(url, code=303):
# Most IE and Safari versions decided not to preserve location.hash upon
# redirect. And even if IE10 pretends to support it, it still fails
# inexplicably in case of multiple redirects (and we do have some).
# See extensive test page at http://greenbytes.de/tech/tc/httpredirects/
if request.httprequest.user_agent.browser in ('firefox',):
return werkzeug.utils.redirect(url, code)
return "<html><head><script>window.location = '%s' + location.hash;</script></head></html>" % url
class WebRequest(object):
""" Parent class for all Odoo Web request types, mostly deals with
initialization and setup of the request object (the dispatching itself has
to be handled by the subclasses)
:param httprequest: a wrapped werkzeug Request object
:type httprequest: :class:`werkzeug.wrappers.BaseRequest`
.. attribute:: httprequest
the original :class:`werkzeug.wrappers.Request` object provided to the
request
.. attribute:: params
:class:`~collections.Mapping` of request parameters, not generally
useful as they're provided directly to the handler method as keyword
arguments
"""
def __init__(self, httprequest):
self.httprequest = httprequest
self.httpresponse = None
self.httpsession = httprequest.session
self.disable_db = False
self.uid = None
self.endpoint = None
self.auth_method = None
self._cr = None
# prevents transaction commit, use when you catch an exception during handling
self._failed = None
# set db/uid trackers - they're cleaned up at the WSGI
# dispatching phase in openerp.service.wsgi_server.application
if self.db:
threading.current_thread().dbname = self.db
if self.session.uid:
threading.current_thread().uid = self.session.uid
@lazy_property
def env(self):
"""
The :class:`~openerp.api.Environment` bound to current request.
Raises a :class:`RuntimeError` if the current requests is not bound
to a database.
"""
if not self.db:
return RuntimeError('request not bound to a database')
return openerp.api.Environment(self.cr, self.uid, self.context)
@lazy_property
def context(self):
"""
:class:`~collections.Mapping` of context values for the current
request
"""
return dict(self.session.context)
@lazy_property
def lang(self):
self.session._fix_lang(self.context)
return self.context["lang"]
@lazy_property
def session(self):
"""
a :class:`OpenERPSession` holding the HTTP session data for the
current http session
"""
return self.httprequest.session
@property
def cr(self):
"""
:class:`~openerp.sql_db.Cursor` initialized for the current method
call.
Accessing the cursor when the current request uses the ``none``
authentication will raise an exception.
"""
# can not be a lazy_property because manual rollback in _call_function
# if already set (?)
if not self.db:
return RuntimeError('request not bound to a database')
if not self._cr:
self._cr = self.registry.cursor()
return self._cr
def __enter__(self):
_request_stack.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
_request_stack.pop()
if self._cr:
if exc_type is None and not self._failed:
self._cr.commit()
self._cr.close()
# just to be sure no one tries to re-use the request
self.disable_db = True
self.uid = None
def set_handler(self, endpoint, arguments, auth):
# is this needed ?
arguments = dict((k, v) for k, v in arguments.iteritems()
if not k.startswith("_ignored_"))
endpoint.arguments = arguments
self.endpoint = endpoint
self.auth_method = auth
def _handle_exception(self, exception):
"""Called within an except block to allow converting exceptions
to abitrary responses. Anything returned (except None) will
be used as response."""
self._failed = exception # prevent tx commit
if not isinstance(exception, NO_POSTMORTEM) \
and not isinstance(exception, werkzeug.exceptions.HTTPException):
openerp.tools.debugger.post_mortem(
openerp.tools.config, sys.exc_info())
raise
def _call_function(self, *args, **kwargs):
request = self
if self.endpoint.routing['type'] != self._request_type:
msg = "%s, %s: Function declared as capable of handling request of type '%s' but called with a request of type '%s'"
params = (self.endpoint.original, self.httprequest.path, self.endpoint.routing['type'], self._request_type)
_logger.error(msg, *params)
raise werkzeug.exceptions.BadRequest(msg % params)
kwargs.update(self.endpoint.arguments)
# Backward for 7.0
if self.endpoint.first_arg_is_req:
args = (request,) + args
# Correct exception handling and concurency retry
@service_model.check
def checked_call(___dbname, *a, **kw):
# The decorator can call us more than once if there is an database error. In this
# case, the request cursor is unusable. Rollback transaction to create a new one.
if self._cr:
self._cr.rollback()
return self.endpoint(*a, **kw)
if self.db:
return checked_call(self.db, *args, **kwargs)
return self.endpoint(*args, **kwargs)
@property
def debug(self):
""" Indicates whether the current request is in "debug" mode
"""
return 'debug' in self.httprequest.args
@contextlib.contextmanager
def registry_cr(self):
warnings.warn('please use request.registry and request.cr directly', DeprecationWarning)
yield (self.registry, self.cr)
@lazy_property
def session_id(self):
"""
opaque identifier for the :class:`OpenERPSession` instance of
the current request
.. deprecated:: 8.0
Use the ``sid`` attribute on :attr:`.session`
"""
return self.session.sid
@property
def registry(self):
"""
The registry to the database linked to this request. Can be ``None``
if the current request uses the ``none`` authentication.
.. deprecated:: 8.0
use :attr:`.env`
"""
return openerp.modules.registry.RegistryManager.get(self.db) if self.db else None
@property
def db(self):
"""
The database linked to this request. Can be ``None``
if the current request uses the ``none`` authentication.
"""
return self.session.db if not self.disable_db else None
@lazy_property
def httpsession(self):
""" HTTP session data
.. deprecated:: 8.0
Use :attr:`.session` instead.
"""
return self.session
def route(route=None, **kw):
"""
Decorator marking the decorated method as being a handler for
requests. The method must be part of a subclass of ``Controller``.
:param route: string or array. The route part that will determine which
http requests will match the decorated method. Can be a
single string or an array of strings. See werkzeug's routing
documentation for the format of route expression (
http://werkzeug.pocoo.org/docs/routing/ ).
:param type: The type of request, can be ``'http'`` or ``'json'``.
:param auth: The type of authentication method, can on of the following:
* ``user``: The user must be authenticated and the current request
will perform using the rights of the user.
* ``admin``: The user may not be authenticated and the current request
will perform using the admin user.
* ``none``: The method is always active, even if there is no
database. Mainly used by the framework and authentication
modules. There request code will not have any facilities to access
the database nor have any configuration indicating the current
database nor the current user.
:param methods: A sequence of http methods this route applies to. If not
specified, all methods are allowed.
:param cors: The Access-Control-Allow-Origin cors directive value.
"""
routing = kw.copy()
assert not 'type' in routing or routing['type'] in ("http", "json")
def decorator(f):
if route:
if isinstance(route, list):
routes = route
else:
routes = [route]
routing['routes'] = routes
@functools.wraps(f)
def response_wrap(*args, **kw):
response = f(*args, **kw)
if isinstance(response, Response) or f.routing_type == 'json':
return response
if isinstance(response, basestring):
return Response(response)
if isinstance(response, werkzeug.exceptions.HTTPException):
response = response.get_response(request.httprequest.environ)
if isinstance(response, werkzeug.wrappers.BaseResponse):
response = Response.force_type(response)
response.set_default()
return response
_logger.warn("<function %s.%s> returns an invalid response type for an http request" % (f.__module__, f.__name__))
return response
response_wrap.routing = routing
response_wrap.original_func = f
return response_wrap
return decorator
class JsonRequest(WebRequest):
""" Request handler for `JSON-RPC 2
<http://www.jsonrpc.org/specification>`_ over HTTP
* ``method`` is ignored
* ``params`` must be a JSON object (not an array) and is passed as keyword
arguments to the handler method
* the handler method's result is returned as JSON-RPC ``result`` and
wrapped in the `JSON-RPC Response
<http://www.jsonrpc.org/specification#response_object>`_
Sucessful request::
--> {"jsonrpc": "2.0",
"method": "call",
"params": {"context": {},
"arg1": "val1" },
"id": null}
<-- {"jsonrpc": "2.0",
"result": { "res1": "val1" },
"id": null}
Request producing a error::
--> {"jsonrpc": "2.0",
"method": "call",
"params": {"context": {},
"arg1": "val1" },
"id": null}
<-- {"jsonrpc": "2.0",
"error": {"code": 1,
"message": "End user error message.",
"data": {"code": "codestring",
"debug": "traceback" } },
"id": null}
"""
_request_type = "json"
def __init__(self, *args):
super(JsonRequest, self).__init__(*args)
self.jsonp_handler = None
args = self.httprequest.args
jsonp = args.get('jsonp')
self.jsonp = jsonp
request = None
request_id = args.get('id')
if jsonp and self.httprequest.method == 'POST':
# jsonp 2 steps step1 POST: save call
def handler():
self.session['jsonp_request_%s' % (request_id,)] = self.httprequest.form['r']
self.session.modified = True
headers=[('Content-Type', 'text/plain; charset=utf-8')]
r = werkzeug.wrappers.Response(request_id, headers=headers)
return r
self.jsonp_handler = handler
return
elif jsonp and args.get('r'):
# jsonp method GET
request = args.get('r')
elif jsonp and request_id:
# jsonp 2 steps step2 GET: run and return result
request = self.session.pop('jsonp_request_%s' % (request_id,), '{}')
else:
# regular jsonrpc2
request = self.httprequest.stream.read()
# Read POST content or POST Form Data named "request"
try:
self.jsonrequest = simplejson.loads(request)
except simplejson.JSONDecodeError:
msg = 'Invalid JSON data: %r' % (request,)
_logger.error('%s: %s', self.httprequest.path, msg)
raise werkzeug.exceptions.BadRequest(msg)
self.params = dict(self.jsonrequest.get("params", {}))
self.context = self.params.pop('context', dict(self.session.context))
def _json_response(self, result=None, error=None):
response = {
'jsonrpc': '2.0',
'id': self.jsonrequest.get('id')
}
if error is not None:
response['error'] = error
if result is not None:
response['result'] = result
if self.jsonp:
# If we use jsonp, that's mean we are called from another host
# Some browser (IE and Safari) do no allow third party cookies
# We need then to manage http sessions manually.
response['session_id'] = self.session_id
mime = 'application/javascript'
body = "%s(%s);" % (self.jsonp, simplejson.dumps(response),)
else:
mime = 'application/json'
body = simplejson.dumps(response)
return Response(
body, headers=[('Content-Type', mime),
('Content-Length', len(body))])
def _handle_exception(self, exception):
"""Called within an except block to allow converting exceptions
to arbitrary responses. Anything returned (except None) will
be used as response."""
try:
return super(JsonRequest, self)._handle_exception(exception)
except Exception:
if not isinstance(exception, (openerp.exceptions.Warning, SessionExpiredException)):
_logger.exception("Exception during JSON request handling.")
error = {
'code': 200,
'message': "Odoo Server Error",
'data': serialize_exception(exception)
}
if isinstance(exception, AuthenticationError):
error['code'] = 100
error['message'] = "Odoo Session Invalid"
if isinstance(exception, SessionExpiredException):
error['code'] = 100
error['message'] = "Odoo Session Expired"
return self._json_response(error=error)
def dispatch(self):
if self.jsonp_handler:
return self.jsonp_handler()
try:
rpc_request_flag = rpc_request.isEnabledFor(logging.DEBUG)
rpc_response_flag = rpc_response.isEnabledFor(logging.DEBUG)
if rpc_request_flag or rpc_response_flag:
endpoint = self.endpoint.method.__name__
model = self.params.get('model')
method = self.params.get('method')
args = self.params.get('args', [])
start_time = time.time()
_, start_vms = 0, 0
if psutil:
_, start_vms = memory_info(psutil.Process(os.getpid()))
if rpc_request and rpc_response_flag:
rpc_request.debug('%s: %s %s, %s',
endpoint, model, method, pprint.pformat(args))
result = self._call_function(**self.params)
if rpc_request_flag or rpc_response_flag:
end_time = time.time()
_, end_vms = 0, 0
if psutil:
_, end_vms = memory_info(psutil.Process(os.getpid()))
logline = '%s: %s %s: time:%.3fs mem: %sk -> %sk (diff: %sk)' % (
endpoint, model, method, end_time - start_time, start_vms / 1024, end_vms / 1024, (end_vms - start_vms)/1024)
if rpc_response_flag:
rpc_response.debug('%s, %s', logline, pprint.pformat(result))
else:
rpc_request.debug(logline)
return self._json_response(result)
except Exception, e:
return self._handle_exception(e)
def serialize_exception(e):
tmp = {
"name": type(e).__module__ + "." + type(e).__name__ if type(e).__module__ else type(e).__name__,
"debug": traceback.format_exc(),
"message": ustr(e),
"arguments": to_jsonable(e.args),
}
if isinstance(e, openerp.osv.osv.except_osv):
tmp["exception_type"] = "except_osv"
elif isinstance(e, openerp.exceptions.Warning):
tmp["exception_type"] = "warning"
elif isinstance(e, openerp.exceptions.AccessError):
tmp["exception_type"] = "access_error"
elif isinstance(e, openerp.exceptions.AccessDenied):
tmp["exception_type"] = "access_denied"
return tmp
def to_jsonable(o):
if isinstance(o, str) or isinstance(o,unicode) or isinstance(o, int) or isinstance(o, long) \
or isinstance(o, bool) or o is None or isinstance(o, float):
return o
if isinstance(o, list) or isinstance(o, tuple):
return [to_jsonable(x) for x in o]
if isinstance(o, dict):
tmp = {}
for k, v in o.items():
tmp[u"%s" % k] = to_jsonable(v)
return tmp
return ustr(o)
def jsonrequest(f):
"""
.. deprecated:: 8.0
Use the :func:`~openerp.http.route` decorator instead.
"""
base = f.__name__.lstrip('/')
if f.__name__ == "index":
base = ""
return route([base, base + "/<path:_ignored_path>"], type="json", auth="user", combine=True)(f)
class HttpRequest(WebRequest):
""" Handler for the ``http`` request type.
matched routing parameters, query string parameters, form_ parameters
and files are passed to the handler method as keyword arguments.
In case of name conflict, routing parameters have priority.
The handler method's result can be:
* a falsy value, in which case the HTTP response will be an
`HTTP 204`_ (No Content)
* a werkzeug Response object, which is returned as-is
* a ``str`` or ``unicode``, will be wrapped in a Response object and
interpreted as HTML
.. _form: http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4.2
.. _HTTP 204: http://tools.ietf.org/html/rfc7231#section-6.3.5
"""
_request_type = "http"
def __init__(self, *args):
super(HttpRequest, self).__init__(*args)
params = self.httprequest.args.to_dict()
params.update(self.httprequest.form.to_dict())
params.update(self.httprequest.files.to_dict())
params.pop('session_id', None)
self.params = params
def _handle_exception(self, exception):
"""Called within an except block to allow converting exceptions
to abitrary responses. Anything returned (except None) will
be used as response."""
try:
return super(HttpRequest, self)._handle_exception(exception)
except SessionExpiredException:
if not request.params.get('noredirect'):
query = werkzeug.urls.url_encode({
'redirect': request.httprequest.url,
})
return werkzeug.utils.redirect('/web/login?%s' % query)
except werkzeug.exceptions.HTTPException, e:
return e
def dispatch(self):
if request.httprequest.method == 'OPTIONS' and request.endpoint and request.endpoint.routing.get('cors'):
headers = {
'Access-Control-Max-Age': 60 * 60 * 24,
'Access-Control-Allow-Headers': 'Origin, X-Requested-With, Content-Type, Accept'
}
return Response(status=200, headers=headers)
r = self._call_function(**self.params)
if not r:
r = Response(status=204) # no content
return r
def make_response(self, data, headers=None, cookies=None):
""" Helper for non-HTML responses, or HTML responses with custom
response headers or cookies.
While handlers can just return the HTML markup of a page they want to
send as a string if non-HTML data is returned they need to create a
complete response object, or the returned data will not be correctly
interpreted by the clients.
:param basestring data: response body
:param headers: HTTP headers to set on the response
:type headers: ``[(name, value)]``
:param collections.Mapping cookies: cookies to set on the client
"""
response = Response(data, headers=headers)
if cookies:
for k, v in cookies.iteritems():
response.set_cookie(k, v)
return response
def render(self, template, qcontext=None, lazy=True, **kw):
""" Lazy render of a QWeb template.
The actual rendering of the given template will occur at then end of
the dispatching. Meanwhile, the template and/or qcontext can be
altered or even replaced by a static response.
:param basestring template: template to render
:param dict qcontext: Rendering context to use
:param bool lazy: whether the template rendering should be deferred
until the last possible moment
:param kw: forwarded to werkzeug's Response object
"""
response = Response(template=template, qcontext=qcontext, **kw)
if not lazy:
return response.render()
return response
def not_found(self, description=None):
""" Shortcut for a `HTTP 404
<http://tools.ietf.org/html/rfc7231#section-6.5.4>`_ (Not Found)
response
"""
return werkzeug.exceptions.NotFound(description)
def httprequest(f):
"""
.. deprecated:: 8.0
Use the :func:`~openerp.http.route` decorator instead.
"""
base = f.__name__.lstrip('/')
if f.__name__ == "index":
base = ""
return route([base, base + "/<path:_ignored_path>"], type="http", auth="user", combine=True)(f)
#----------------------------------------------------------
# Controller and route registration
#----------------------------------------------------------
addons_module = {}
addons_manifest = {}
controllers_per_module = collections.defaultdict(list)
class ControllerType(type):
def __init__(cls, name, bases, attrs):
super(ControllerType, cls).__init__(name, bases, attrs)
# flag old-style methods with req as first argument
for k, v in attrs.items():
if inspect.isfunction(v) and hasattr(v, 'original_func'):
# Set routing type on original functions
routing_type = v.routing.get('type')
parent = [claz for claz in bases if isinstance(claz, ControllerType) and hasattr(claz, k)]
parent_routing_type = getattr(parent[0], k).original_func.routing_type if parent else routing_type or 'http'
if routing_type is not None and routing_type is not parent_routing_type:
routing_type = parent_routing_type
_logger.warn("Subclass re-defines <function %s.%s.%s> with different type than original."
" Will use original type: %r" % (cls.__module__, cls.__name__, k, parent_routing_type))
v.original_func.routing_type = routing_type or parent_routing_type
spec = inspect.getargspec(v.original_func)
first_arg = spec.args[1] if len(spec.args) >= 2 else None
if first_arg in ["req", "request"]:
v._first_arg_is_req = True
# store the controller in the controllers list
name_class = ("%s.%s" % (cls.__module__, cls.__name__), cls)
class_path = name_class[0].split(".")
if not class_path[:2] == ["openerp", "addons"]:
module = ""
else:
# we want to know all modules that have controllers
module = class_path[2]
# but we only store controllers directly inheriting from Controller
if not "Controller" in globals() or not Controller in bases:
return
controllers_per_module[module].append(name_class)
class Controller(object):
__metaclass__ = ControllerType
class EndPoint(object):
def __init__(self, method, routing):
self.method = method
self.original = getattr(method, 'original_func', method)
self.routing = routing
self.arguments = {}
@property
def first_arg_is_req(self):
# Backward for 7.0
return getattr(self.method, '_first_arg_is_req', False)
def __call__(self, *args, **kw):
return self.method(*args, **kw)
def routing_map(modules, nodb_only, converters=None):
routing_map = werkzeug.routing.Map(strict_slashes=False, converters=converters)
def get_subclasses(klass):
def valid(c):
return c.__module__.startswith('openerp.addons.') and c.__module__.split(".")[2] in modules
subclasses = klass.__subclasses__()
result = []
for subclass in subclasses:
if valid(subclass):
result.extend(get_subclasses(subclass))
if not result and valid(klass):
result = [klass]
return result
uniq = lambda it: collections.OrderedDict((id(x), x) for x in it).values()
for module in modules:
if module not in controllers_per_module:
continue
for _, cls in controllers_per_module[module]:
subclasses = uniq(c for c in get_subclasses(cls) if c is not cls)
if subclasses:
name = "%s (extended by %s)" % (cls.__name__, ', '.join(sub.__name__ for sub in subclasses))
cls = type(name, tuple(reversed(subclasses)), {})
o = cls()
members = inspect.getmembers(o, inspect.ismethod)
for _, mv in members:
if hasattr(mv, 'routing'):
routing = dict(type='http', auth='user', methods=None, routes=None)
methods_done = list()
# update routing attributes from subclasses(auth, methods...)
for claz in reversed(mv.im_class.mro()):
fn = getattr(claz, mv.func_name, None)
if fn and hasattr(fn, 'routing') and fn not in methods_done:
methods_done.append(fn)
routing.update(fn.routing)
if not nodb_only or routing['auth'] == "none":
assert routing['routes'], "Method %r has not route defined" % mv
endpoint = EndPoint(mv, routing)
for url in routing['routes']:
if routing.get("combine", False):
# deprecated v7 declaration
url = o._cp_path.rstrip('/') + '/' + url.lstrip('/')
if url.endswith("/") and len(url) > 1:
url = url[: -1]
xtra_keys = 'defaults subdomain build_only strict_slashes redirect_to alias host'.split()
kw = {k: routing[k] for k in xtra_keys if k in routing}
routing_map.add(werkzeug.routing.Rule(url, endpoint=endpoint, methods=routing['methods'], **kw))
return routing_map
#----------------------------------------------------------
# HTTP Sessions
#----------------------------------------------------------
class AuthenticationError(Exception):
pass
class SessionExpiredException(Exception):
pass
class Service(object):
"""
.. deprecated:: 8.0
Use :func:`dispatch_rpc` instead.
"""
def __init__(self, session, service_name):
self.session = session
self.service_name = service_name
def __getattr__(self, method):
def proxy_method(*args):
result = dispatch_rpc(self.service_name, method, args)
return result
return proxy_method
class Model(object):
"""
.. deprecated:: 8.0
Use the registry and cursor in :data:`request` instead.
"""
def __init__(self, session, model):
self.session = session
self.model = model
self.proxy = self.session.proxy('object')
def __getattr__(self, method):
self.session.assert_valid()
def proxy(*args, **kw):
# Can't provide any retro-compatibility for this case, so we check it and raise an Exception
# to tell the programmer to adapt his code
if not request.db or not request.uid or self.session.db != request.db \
or self.session.uid != request.uid:
raise Exception("Trying to use Model with badly configured database or user.")
if method.startswith('_'):
raise Exception("Access denied")
mod = request.registry[self.model]
meth = getattr(mod, method)
# make sure to instantiate an environment
cr = request.env.cr
result = meth(cr, request.uid, *args, **kw)
# reorder read
if method == "read":
if isinstance(result, list) and len(result) > 0 and "id" in result[0]:
index = {}
for r in result:
index[r['id']] = r
result = [index[x] for x in args[0] if x in index]
return result
return proxy
class OpenERPSession(werkzeug.contrib.sessions.Session):
def __init__(self, *args, **kwargs):
self.inited = False
self.modified = False
self.rotate = False
super(OpenERPSession, self).__init__(*args, **kwargs)
self.inited = True
self._default_values()
self.modified = False
def __getattr__(self, attr):
return self.get(attr, None)
def __setattr__(self, k, v):
if getattr(self, "inited", False):
try:
object.__getattribute__(self, k)
except:
return self.__setitem__(k, v)
object.__setattr__(self, k, v)
def authenticate(self, db, login=None, password=None, uid=None):
"""
Authenticate the current user with the given db, login and
password. If successful, store the authentication parameters in the
current session and request.
:param uid: If not None, that user id will be used instead the login
to authenticate the user.
"""
if uid is None:
wsgienv = request.httprequest.environ
env = dict(
base_location=request.httprequest.url_root.rstrip('/'),
HTTP_HOST=wsgienv['HTTP_HOST'],
REMOTE_ADDR=wsgienv['REMOTE_ADDR'],
)
uid = dispatch_rpc('common', 'authenticate', [db, login, password, env])
else:
security.check(db, uid, password)
self.db = db
self.uid = uid
self.login = login
self.password = password
request.uid = uid
request.disable_db = False
if uid: self.get_context()
return uid
def check_security(self):
"""
Check the current authentication parameters to know if those are still
valid. This method should be called at each request. If the
authentication fails, a :exc:`SessionExpiredException` is raised.
"""
if not self.db or not self.uid:
raise SessionExpiredException("Session expired")
security.check(self.db, self.uid, self.password)
def logout(self, keep_db=False):
for k in self.keys():
if not (keep_db and k == 'db'):
del self[k]
self._default_values()
self.rotate = True
def _default_values(self):
self.setdefault("db", None)
self.setdefault("uid", None)
self.setdefault("login", None)
self.setdefault("password", None)
self.setdefault("context", {})
def get_context(self):
"""
Re-initializes the current user's session context (based on his
preferences) by calling res.users.get_context() with the old context.
:returns: the new context
"""
assert self.uid, "The user needs to be logged-in to initialize his context"
self.context = request.registry.get('res.users').context_get(request.cr, request.uid) or {}
self.context['uid'] = self.uid
self._fix_lang(self.context)
return self.context
def _fix_lang(self, context):
""" OpenERP provides languages which may not make sense and/or may not
be understood by the web client's libraries.
Fix those here.
:param dict context: context to fix
"""
lang = context.get('lang')
# inane OpenERP locale
if lang == 'ar_AR':
lang = 'ar'
# lang to lang_REGION (datejs only handles lang_REGION, no bare langs)
if lang in babel.core.LOCALE_ALIASES:
lang = babel.core.LOCALE_ALIASES[lang]
context['lang'] = lang or 'en_US'
# Deprecated to be removed in 9
"""
Damn properties for retro-compatibility. All of that is deprecated,
all of that.
"""
@property
def _db(self):
return self.db
@_db.setter
def _db(self, value):
self.db = value
@property
def _uid(self):
return self.uid
@_uid.setter
def _uid(self, value):
self.uid = value
@property
def _login(self):
return self.login
@_login.setter
def _login(self, value):
self.login = value
@property
def _password(self):
return self.password
@_password.setter
def _password(self, value):
self.password = value
def send(self, service_name, method, *args):
"""
.. deprecated:: 8.0
Use :func:`dispatch_rpc` instead.
"""
return dispatch_rpc(service_name, method, args)
def proxy(self, service):
"""
.. deprecated:: 8.0
Use :func:`dispatch_rpc` instead.
"""
return Service(self, service)
def assert_valid(self, force=False):
"""
.. deprecated:: 8.0
Use :meth:`check_security` instead.
Ensures this session is valid (logged into the openerp server)
"""
if self.uid and not force:
return
# TODO use authenticate instead of login
self.uid = self.proxy("common").login(self.db, self.login, self.password)
if not self.uid:
raise AuthenticationError("Authentication failure")
def ensure_valid(self):
"""
.. deprecated:: 8.0
Use :meth:`check_security` instead.
"""
if self.uid:
try:
self.assert_valid(True)
except Exception:
self.uid = None
def execute(self, model, func, *l, **d):
"""
.. deprecated:: 8.0
Use the registry and cursor in :data:`request` instead.
"""
model = self.model(model)
r = getattr(model, func)(*l, **d)
return r
def exec_workflow(self, model, id, signal):
"""
.. deprecated:: 8.0
Use the registry and cursor in :data:`request` instead.
"""
self.assert_valid()
r = self.proxy('object').exec_workflow(self.db, self.uid, self.password, model, signal, id)
return r
def model(self, model):
"""
.. deprecated:: 8.0
Use the registry and cursor in :data:`request` instead.
Get an RPC proxy for the object ``model``, bound to this session.
:param model: an OpenERP model name
:type model: str
:rtype: a model object
"""
if not self.db:
raise SessionExpiredException("Session expired")
return Model(self, model)
def save_action(self, action):
"""
This method store an action object in the session and returns an integer
identifying that action. The method get_action() can be used to get
back the action.
:param the_action: The action to save in the session.
:type the_action: anything
:return: A key identifying the saved action.
:rtype: integer
"""
saved_actions = self.setdefault('saved_actions', {"next": 1, "actions": {}})
# we don't allow more than 10 stored actions
if len(saved_actions["actions"]) >= 10:
del saved_actions["actions"][min(saved_actions["actions"])]
key = saved_actions["next"]
saved_actions["actions"][key] = action
saved_actions["next"] = key + 1
self.modified = True
return key
def get_action(self, key):
"""
Gets back a previously saved action. This method can return None if the action
was saved since too much time (this case should be handled in a smart way).
:param key: The key given by save_action()
:type key: integer
:return: The saved action or None.
:rtype: anything
"""
saved_actions = self.get('saved_actions', {})
return saved_actions.get("actions", {}).get(key)
def session_gc(session_store):
if random.random() < 0.001:
# we keep session one week
last_week = time.time() - 60*60*24*7
for fname in os.listdir(session_store.path):
path = os.path.join(session_store.path, fname)
try:
if os.path.getmtime(path) < last_week:
os.unlink(path)
except OSError:
pass
#----------------------------------------------------------
# WSGI Layer
#----------------------------------------------------------
# Add potentially missing (older ubuntu) font mime types
mimetypes.add_type('application/font-woff', '.woff')
mimetypes.add_type('application/vnd.ms-fontobject', '.eot')
mimetypes.add_type('application/x-font-ttf', '.ttf')
class Response(werkzeug.wrappers.Response):
""" Response object passed through controller route chain.
In addition to the :class:`werkzeug.wrappers.Response` parameters, this
class's constructor can take the following additional parameters
for QWeb Lazy Rendering.
:param basestring template: template to render
:param dict qcontext: Rendering context to use
:param int uid: User id to use for the ir.ui.view render call,
``None`` to use the request's user (the default)
these attributes are available as parameters on the Response object and
can be altered at any time before rendering
Also exposes all the attributes and methods of
:class:`werkzeug.wrappers.Response`.
"""
default_mimetype = 'text/html'
def __init__(self, *args, **kw):
template = kw.pop('template', None)
qcontext = kw.pop('qcontext', None)
uid = kw.pop('uid', None)
super(Response, self).__init__(*args, **kw)
self.set_default(template, qcontext, uid)
def set_default(self, template=None, qcontext=None, uid=None):
self.template = template
self.qcontext = qcontext or dict()
self.uid = uid
# Support for Cross-Origin Resource Sharing
if request.endpoint and 'cors' in request.endpoint.routing:
self.headers.set('Access-Control-Allow-Origin', request.endpoint.routing['cors'])
methods = 'GET, POST'
if request.endpoint.routing['type'] == 'json':
methods = 'POST'
elif request.endpoint.routing.get('methods'):
methods = ', '.join(request.endpoint.routing['methods'])
self.headers.set('Access-Control-Allow-Methods', methods)
@property
def is_qweb(self):
return self.template is not None
def render(self):
""" Renders the Response's template, returns the result
"""
view_obj = request.registry["ir.ui.view"]
uid = self.uid or request.uid or openerp.SUPERUSER_ID
return view_obj.render(
request.cr, uid, self.template, self.qcontext,
context=request.context)
def flatten(self):
""" Forces the rendering of the response's template, sets the result
as response body and unsets :attr:`.template`
"""
self.response.append(self.render())
self.template = None
class DisableCacheMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
def start_wrapped(status, headers):
referer = environ.get('HTTP_REFERER', '')
parsed = urlparse.urlparse(referer)
debug = parsed.query.count('debug') >= 1
new_headers = []
unwanted_keys = ['Last-Modified']
if debug:
new_headers = [('Cache-Control', 'no-cache')]
unwanted_keys += ['Expires', 'Etag', 'Cache-Control']
for k, v in headers:
if k not in unwanted_keys:
new_headers.append((k, v))
start_response(status, new_headers)
return self.app(environ, start_wrapped)
class Root(object):
"""Root WSGI application for the OpenERP Web Client.
"""
def __init__(self):
self._loaded = False
@lazy_property
def session_store(self):
# Setup http sessions
path = openerp.tools.config.session_dir
_logger.debug('HTTP sessions stored in: %s', path)
return werkzeug.contrib.sessions.FilesystemSessionStore(path, session_class=OpenERPSession)
@lazy_property
def nodb_routing_map(self):
_logger.info("Generating nondb routing")
return routing_map([''] + openerp.conf.server_wide_modules, True)
def __call__(self, environ, start_response):
""" Handle a WSGI request
"""
if not self._loaded:
self._loaded = True
self.load_addons()
return self.dispatch(environ, start_response)
def load_addons(self):
""" Load all addons from addons path containing static files and
controllers and configure them. """
# TODO should we move this to ir.http so that only configured modules are served ?
statics = {}
for addons_path in openerp.modules.module.ad_paths:
for module in sorted(os.listdir(str(addons_path))):
if module not in addons_module:
manifest_path = os.path.join(addons_path, module, '__openerp__.py')
path_static = os.path.join(addons_path, module, 'static')
if os.path.isfile(manifest_path) and os.path.isdir(path_static):
manifest = ast.literal_eval(open(manifest_path).read())
manifest['addons_path'] = addons_path
_logger.debug("Loading %s", module)
if 'openerp.addons' in sys.modules:
m = __import__('openerp.addons.' + module)
else:
m = None
addons_module[module] = m
addons_manifest[module] = manifest
statics['/%s/static' % module] = path_static
if statics:
_logger.info("HTTP Configuring static files")
app = werkzeug.wsgi.SharedDataMiddleware(self.dispatch, statics, cache_timeout=STATIC_CACHE)
self.dispatch = DisableCacheMiddleware(app)
def setup_session(self, httprequest):
# recover or create session
session_gc(self.session_store)
sid = httprequest.args.get('session_id')
explicit_session = True
if not sid:
sid = httprequest.headers.get("X-Openerp-Session-Id")
if not sid:
sid = httprequest.cookies.get('session_id')
explicit_session = False
if sid is None:
httprequest.session = self.session_store.new()
else:
httprequest.session = self.session_store.get(sid)
return explicit_session
def setup_db(self, httprequest):
db = httprequest.session.db
# Check if session.db is legit
if db:
if db not in db_filter([db], httprequest=httprequest):
_logger.warn("Logged into database '%s', but dbfilter "
"rejects it; logging session out.", db)
httprequest.session.logout()
db = None
if not db:
httprequest.session.db = db_monodb(httprequest)
def setup_lang(self, httprequest):
if not "lang" in httprequest.session.context:
lang = httprequest.accept_languages.best or "en_US"
lang = babel.core.LOCALE_ALIASES.get(lang, lang).replace('-', '_')
httprequest.session.context["lang"] = lang
def get_request(self, httprequest):
# deduce type of request
if httprequest.args.get('jsonp'):
return JsonRequest(httprequest)
if httprequest.mimetype in ("application/json", "application/json-rpc"):
return JsonRequest(httprequest)
else:
return HttpRequest(httprequest)
def get_response(self, httprequest, result, explicit_session):
if isinstance(result, Response) and result.is_qweb:
try:
result.flatten()
except(Exception), e:
if request.db:
result = request.registry['ir.http']._handle_exception(e)
else:
raise
if isinstance(result, basestring):
response = Response(result, mimetype='text/html')
else:
response = result
if httprequest.session.should_save:
if httprequest.session.rotate:
self.session_store.delete(httprequest.session)
httprequest.session.sid = self.session_store.generate_key()
httprequest.session.modified = True
self.session_store.save(httprequest.session)
# We must not set the cookie if the session id was specified using a http header or a GET parameter.
# There are two reasons to this:
# - When using one of those two means we consider that we are overriding the cookie, which means creating a new
# session on top of an already existing session and we don't want to create a mess with the 'normal' session
# (the one using the cookie). That is a special feature of the Session Javascript class.
# - It could allow session fixation attacks.
if not explicit_session and hasattr(response, 'set_cookie'):
response.set_cookie('session_id', httprequest.session.sid, max_age=90 * 24 * 60 * 60)
return response
def dispatch(self, environ, start_response):
"""
Performs the actual WSGI dispatching for the application.
"""
try:
httprequest = werkzeug.wrappers.Request(environ)
httprequest.app = self
explicit_session = self.setup_session(httprequest)
self.setup_db(httprequest)
self.setup_lang(httprequest)
request = self.get_request(httprequest)
def _dispatch_nodb():
try:
func, arguments = self.nodb_routing_map.bind_to_environ(request.httprequest.environ).match()
except werkzeug.exceptions.HTTPException, e:
return request._handle_exception(e)
request.set_handler(func, arguments, "none")
result = request.dispatch()
return result
with request:
db = request.session.db
if db:
openerp.modules.registry.RegistryManager.check_registry_signaling(db)
try:
with openerp.tools.mute_logger('openerp.sql_db'):
ir_http = request.registry['ir.http']
except (AttributeError, psycopg2.OperationalError):
# psycopg2 error or attribute error while constructing
# the registry. That means the database probably does
# not exists anymore or the code doesnt match the db.
# Log the user out and fall back to nodb
request.session.logout()
result = _dispatch_nodb()
else:
result = ir_http._dispatch()
openerp.modules.registry.RegistryManager.signal_caches_change(db)
else:
result = _dispatch_nodb()
response = self.get_response(httprequest, result, explicit_session)
return response(environ, start_response)
except werkzeug.exceptions.HTTPException, e:
return e(environ, start_response)
def get_db_router(self, db):
if not db:
return self.nodb_routing_map
return request.registry['ir.http'].routing_map()
def db_list(force=False, httprequest=None):
dbs = dispatch_rpc("db", "list", [force])
return db_filter(dbs, httprequest=httprequest)
def db_filter(dbs, httprequest=None):
httprequest = httprequest or request.httprequest
h = httprequest.environ.get('HTTP_HOST', '').split(':')[0]
d, _, r = h.partition('.')
if d == "www" and r:
d = r.partition('.')[0]
r = openerp.tools.config['dbfilter'].replace('%h', h).replace('%d', d)
dbs = [i for i in dbs if re.match(r, i)]
return dbs
def db_monodb(httprequest=None):
"""
Magic function to find the current database.
Implementation details:
* Magic
* More magic
Returns ``None`` if the magic is not magic enough.
"""
httprequest = httprequest or request.httprequest
dbs = db_list(True, httprequest)
# try the db already in the session
db_session = httprequest.session.db
if db_session in dbs:
return db_session
# if there is only one possible db, we take that one
if len(dbs) == 1:
return dbs[0]
return None
def send_file(filepath_or_fp, mimetype=None, as_attachment=False, filename=None, mtime=None,
add_etags=True, cache_timeout=STATIC_CACHE, conditional=True):
"""This is a modified version of Flask's send_file()
Sends the contents of a file to the client. This will use the
most efficient method available and configured. By default it will
try to use the WSGI server's file_wrapper support.
By default it will try to guess the mimetype for you, but you can
also explicitly provide one. For extra security you probably want
to send certain files as attachment (HTML for instance). The mimetype
guessing requires a `filename` or an `attachment_filename` to be
provided.
Please never pass filenames to this function from user sources without
checking them first.
:param filepath_or_fp: the filename of the file to send.
Alternatively a file object might be provided
in which case `X-Sendfile` might not work and
fall back to the traditional method. Make sure
that the file pointer is positioned at the start
of data to send before calling :func:`send_file`.
:param mimetype: the mimetype of the file if provided, otherwise
auto detection happens.
:param as_attachment: set to `True` if you want to send this file with
a ``Content-Disposition: attachment`` header.
:param filename: the filename for the attachment if it differs from the file's filename or
if using file object without 'name' attribute (eg: E-tags with StringIO).
:param mtime: last modification time to use for contitional response.
:param add_etags: set to `False` to disable attaching of etags.
:param conditional: set to `False` to disable conditional responses.
:param cache_timeout: the timeout in seconds for the headers.
"""
if isinstance(filepath_or_fp, (str, unicode)):
if not filename:
filename = os.path.basename(filepath_or_fp)
file = open(filepath_or_fp, 'rb')
if not mtime:
mtime = os.path.getmtime(filepath_or_fp)
else:
file = filepath_or_fp
if not filename:
filename = getattr(file, 'name', None)
file.seek(0, 2)
size = file.tell()
file.seek(0)
if mimetype is None and filename:
mimetype = mimetypes.guess_type(filename)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
headers = werkzeug.datastructures.Headers()
if as_attachment:
if filename is None:
raise TypeError('filename unavailable, required for sending as attachment')
headers.add('Content-Disposition', 'attachment', filename=filename)
headers['Content-Length'] = size
data = wrap_file(request.httprequest.environ, file)
rv = Response(data, mimetype=mimetype, headers=headers,
direct_passthrough=True)
if isinstance(mtime, str):
try:
server_format = openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT
mtime = datetime.datetime.strptime(mtime.split('.')[0], server_format)
except Exception:
mtime = None
if mtime is not None:
rv.last_modified = mtime
rv.cache_control.public = True
if cache_timeout:
rv.cache_control.max_age = cache_timeout
rv.expires = int(time.time() + cache_timeout)
if add_etags and filename and mtime:
rv.set_etag('odoo-%s-%s-%s' % (
mtime,
size,
adler32(
filename.encode('utf-8') if isinstance(filename, unicode)
else filename
) & 0xffffffff
))
if conditional:
rv = rv.make_conditional(request.httprequest)
# make sure we don't send x-sendfile for servers that
# ignore the 304 status code for x-sendfile.
if rv.status_code == 304:
rv.headers.pop('x-sendfile', None)
return rv
#----------------------------------------------------------
# RPC controller
#----------------------------------------------------------
class CommonController(Controller):
@route('/jsonrpc', type='json', auth="none")
def jsonrpc(self, service, method, args):
""" Method used by client APIs to contact OpenERP. """
return dispatch_rpc(service, method, args)
@route('/gen_session_id', type='json', auth="none")
def gen_session_id(self):
nsession = root.session_store.new()
return nsession.sid
# register main wsgi handler
root = Root()
openerp.service.wsgi_server.register_wsgi_handler(root)
# vim:et:ts=4:sw=4:
|
zoeyangyy/event-extraction | refs/heads/master | tf_test/tf_rnn.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Time : 2017/10/1 下午4:15
# @Author : Zoe
# @File : tf_rnn.py
# @Description : https://www.bilibili.com/video/av10118932/#page=28
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
lr = 0.001
training_iters = 100000
batch_size = 128
n_inputs = 28
n_steps = 28
n_hidden_unis = 128 # 神经元数目
n_classes = 10
x = tf.placeholder(tf.float32,[None, n_steps,n_inputs])
y = tf.placeholder(tf.float32,[None, n_classes])
weights = {
# (28,128)
'in': tf.Variable(tf.random_normal([n_inputs,n_hidden_unis])),
# (128,10)
'out': tf.Variable(tf.random_normal([n_hidden_unis,n_classes]))
}
biases = {
'in': tf.Variable(tf.constant(0.1, shape=[n_hidden_unis])),
'out': tf.Variable(tf.constant(0.1, shape=[n_classes]))
}
def RNN(X, weights, biases):
# hidden layer for input to cell
# x = (128 batch, 28 step, 28 input) => (128*28, 28 input)
X = tf.reshape(X, [-1, n_inputs])
# => (128*28, 128 hidden) => (128 batch, 28 steps, 128 hidden)
X_in = tf.matmul(X, weights['in'])+biases['in']
X_in = tf.reshape(X_in, [-1, n_steps, n_hidden_unis])
# cell
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden_unis, forget_bias=1.0, state_is_tuple=True)
_init_state = lstm_cell.zero_state(batch_size, dtype=tf.float32)
outputs, states = tf.nn.dynamic_rnn(lstm_cell, X_in, initial_state=_init_state, time_major=False)
# hidden layer for output as the final results
results = tf.matmul(states[1], weights['out']) + biases['out']
return outputs, states, results
# outputs::::::
# (128, 28, 128)
# states::::::
# (128, 128)
# results::::::
# (128, 10)
outputs, states, pred = RNN(x, weights, biases)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
train_op = tf.train.AdamOptimizer(lr).minimize(cost)
correct_pred = tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
step = 0
# while step * batch_size < training_iters:
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
print("batch_xs::::::\n",batch_xs.shape, batch_xs)
print("batch_ys::::::\n",batch_ys.shape, batch_ys)
batch_xs = batch_xs.reshape([batch_size, n_steps, n_inputs])
sess.run([train_op],feed_dict={
x: batch_xs,
y: batch_ys,
})
a = sess.run(outputs,feed_dict={
x: batch_xs,
y: batch_ys,
})
print("outputs::::::\n",a.shape)
b = sess.run(states,feed_dict={
x: batch_xs,
y: batch_ys,
})
print("states::::::\n",b[1].shape)
c = sess.run(pred,feed_dict={
x: batch_xs,
y: batch_ys,
})
print("results::::::\n",c.shape, c[0])
# outputs::::::
# (128, 28, 128)
# states::::::
# (128, 128)
# results::::::
# (128, 10)
if step % 20 == 0:
print(sess.run(accuracy, feed_dict={
x: batch_xs,
y: batch_ys
}))
step += 1
|
slswee/google_python_exercise | refs/heads/master | basic/solution/list2.py | 207 | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic list exercises
# D. Given a list of numbers, return a list where
# all adjacent == elements have been reduced to a single element,
# so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or
# modify the passed in list.
def remove_adjacent(nums):
# +++your code here+++
# LAB(begin solution)
result = []
for num in nums:
if len(result) == 0 or num != result[-1]:
result.append(num)
return result
# LAB(replace solution)
# return
# LAB(end solution)
# E. Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
def linear_merge(list1, list2):
# +++your code here+++
# LAB(begin solution)
result = []
# Look at the two lists so long as both are non-empty.
# Take whichever element [0] is smaller.
while len(list1) and len(list2):
if list1[0] < list2[0]:
result.append(list1.pop(0))
else:
result.append(list2.pop(0))
# Now tack on what's left
result.extend(list1)
result.extend(list2)
return result
# LAB(replace solution)
# return
# LAB(end solution)
# Note: the solution above is kind of cute, but unforunately list.pop(0)
# is not constant time with the standard python list implementation, so
# the above is not strictly linear time.
# An alternate approach uses pop(-1) to remove the endmost elements
# from each list, building a solution list which is backwards.
# Then use reversed() to put the result back in the correct order. That
# solution works in linear time, but is more ugly.
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'remove_adjacent'
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
test(remove_adjacent([]), [])
print
print 'linear_merge'
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
if __name__ == '__main__':
main()
|
simonwydooghe/ansible | refs/heads/devel | test/units/modules/network/check_point/test_cp_mgmt_dns_domain.py | 19 | # Ansible module to manage CheckPoint Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleExitJson
from ansible.module_utils import basic
from ansible.modules.network.check_point import cp_mgmt_dns_domain
OBJECT = {
"name": ".www.example.com",
"is_sub_domain": False
}
CREATE_PAYLOAD = {
"name": ".www.example.com",
"is_sub_domain": False
}
UPDATE_PAYLOAD = {
"name": ".www.example.com",
"is_sub_domain": True
}
OBJECT_AFTER_UPDATE = UPDATE_PAYLOAD
DELETE_PAYLOAD = {
"name": ".www.example.com",
"state": "absent"
}
function_path = 'ansible.modules.network.check_point.cp_mgmt_dns_domain.api_call'
api_call_object = 'dns-domain'
class TestCheckpointDnsDomain(object):
module = cp_mgmt_dns_domain
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.module_utils.network.checkpoint.checkpoint.Connection')
return connection_class_mock.return_value
def test_create(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True, api_call_object: OBJECT}
result = self._run_module(CREATE_PAYLOAD)
assert result['changed']
assert OBJECT.items() == result[api_call_object].items()
def test_create_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False, api_call_object: OBJECT}
result = self._run_module(CREATE_PAYLOAD)
assert not result['changed']
def test_update(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True, api_call_object: OBJECT_AFTER_UPDATE}
result = self._run_module(UPDATE_PAYLOAD)
assert result['changed']
assert OBJECT_AFTER_UPDATE.items() == result[api_call_object].items()
def test_update_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False, api_call_object: OBJECT_AFTER_UPDATE}
result = self._run_module(UPDATE_PAYLOAD)
assert not result['changed']
def test_delete(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True}
result = self._run_module(DELETE_PAYLOAD)
assert result['changed']
def test_delete_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False}
result = self._run_module(DELETE_PAYLOAD)
assert not result['changed']
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
|
yongshengwang/hue | refs/heads/master | desktop/core/ext-py/boto-2.38.0/boto/ec2/image.py | 92 | # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.ec2.ec2object import EC2Object, TaggedEC2Object
from boto.ec2.blockdevicemapping import BlockDeviceMapping
class ProductCodes(list):
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'productCode':
self.append(value)
class BillingProducts(list):
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'billingProduct':
self.append(value)
class Image(TaggedEC2Object):
"""
Represents an EC2 Image
"""
def __init__(self, connection=None):
super(Image, self).__init__(connection)
self.id = None
self.location = None
self.state = None
self.ownerId = None # for backwards compatibility
self.owner_id = None
self.owner_alias = None
self.is_public = False
self.architecture = None
self.platform = None
self.type = None
self.kernel_id = None
self.ramdisk_id = None
self.name = None
self.description = None
self.product_codes = ProductCodes()
self.billing_products = BillingProducts()
self.block_device_mapping = None
self.root_device_type = None
self.root_device_name = None
self.virtualization_type = None
self.hypervisor = None
self.instance_lifecycle = None
self.sriov_net_support = None
def __repr__(self):
return 'Image:%s' % self.id
def startElement(self, name, attrs, connection):
retval = super(Image, self).startElement(name, attrs, connection)
if retval is not None:
return retval
if name == 'blockDeviceMapping':
self.block_device_mapping = BlockDeviceMapping()
return self.block_device_mapping
elif name == 'productCodes':
return self.product_codes
elif name == 'billingProducts':
return self.billing_products
else:
return None
def endElement(self, name, value, connection):
if name == 'imageId':
self.id = value
elif name == 'imageLocation':
self.location = value
elif name == 'imageState':
self.state = value
elif name == 'imageOwnerId':
self.ownerId = value # for backwards compatibility
self.owner_id = value
elif name == 'isPublic':
if value == 'false':
self.is_public = False
elif value == 'true':
self.is_public = True
else:
raise Exception(
'Unexpected value of isPublic %s for image %s' % (
value,
self.id
)
)
elif name == 'architecture':
self.architecture = value
elif name == 'imageType':
self.type = value
elif name == 'kernelId':
self.kernel_id = value
elif name == 'ramdiskId':
self.ramdisk_id = value
elif name == 'imageOwnerAlias':
self.owner_alias = value
elif name == 'platform':
self.platform = value
elif name == 'name':
self.name = value
elif name == 'description':
self.description = value
elif name == 'rootDeviceType':
self.root_device_type = value
elif name == 'rootDeviceName':
self.root_device_name = value
elif name == 'virtualizationType':
self.virtualization_type = value
elif name == 'hypervisor':
self.hypervisor = value
elif name == 'instanceLifecycle':
self.instance_lifecycle = value
elif name == 'sriovNetSupport':
self.sriov_net_support = value
else:
setattr(self, name, value)
def _update(self, updated):
self.__dict__.update(updated.__dict__)
def update(self, validate=False, dry_run=False):
"""
Update the image's state information by making a call to fetch
the current image attributes from the service.
:type validate: bool
:param validate: By default, if EC2 returns no data about the
image the update method returns quietly. If
the validate param is True, however, it will
raise a ValueError exception if no data is
returned from EC2.
"""
rs = self.connection.get_all_images([self.id], dry_run=dry_run)
if len(rs) > 0:
img = rs[0]
if img.id == self.id:
self._update(img)
elif validate:
raise ValueError('%s is not a valid Image ID' % self.id)
return self.state
def run(self, min_count=1, max_count=1, key_name=None,
security_groups=None, user_data=None,
addressing_type=None, instance_type='m1.small', placement=None,
kernel_id=None, ramdisk_id=None,
monitoring_enabled=False, subnet_id=None,
block_device_map=None,
disable_api_termination=False,
instance_initiated_shutdown_behavior=None,
private_ip_address=None,
placement_group=None, security_group_ids=None,
additional_info=None, instance_profile_name=None,
instance_profile_arn=None, tenancy=None, dry_run=False):
"""
Runs this instance.
:type min_count: int
:param min_count: The minimum number of instances to start
:type max_count: int
:param max_count: The maximum number of instances to start
:type key_name: string
:param key_name: The name of the key pair with which to
launch instances.
:type security_groups: list of strings
:param security_groups: The names of the security groups with which to
associate instances.
:type user_data: string
:param user_data: The Base64-encoded MIME user data to be made
available to the instance(s) in this reservation.
:type instance_type: string
:param instance_type: The type of instance to run:
* t1.micro
* m1.small
* m1.medium
* m1.large
* m1.xlarge
* m3.medium
* m3.large
* m3.xlarge
* m3.2xlarge
* c1.medium
* c1.xlarge
* m2.xlarge
* m2.2xlarge
* m2.4xlarge
* cr1.8xlarge
* hi1.4xlarge
* hs1.8xlarge
* cc1.4xlarge
* cg1.4xlarge
* cc2.8xlarge
* g2.2xlarge
* c3.large
* c3.xlarge
* c3.2xlarge
* c3.4xlarge
* c3.8xlarge
* i2.xlarge
* i2.2xlarge
* i2.4xlarge
* i2.8xlarge
* t2.micro
* t2.small
* t2.medium
:type placement: string
:param placement: The Availability Zone to launch the instance into.
:type kernel_id: string
:param kernel_id: The ID of the kernel with which to launch the
instances.
:type ramdisk_id: string
:param ramdisk_id: The ID of the RAM disk with which to launch the
instances.
:type monitoring_enabled: bool
:param monitoring_enabled: Enable CloudWatch monitoring on
the instance.
:type subnet_id: string
:param subnet_id: The subnet ID within which to launch the instances
for VPC.
:type private_ip_address: string
:param private_ip_address: If you're using VPC, you can
optionally use this parameter to assign the instance a
specific available IP address from the subnet (e.g.,
10.0.0.25).
:type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping`
:param block_device_map: A BlockDeviceMapping data structure
describing the EBS volumes associated with the Image.
:type disable_api_termination: bool
:param disable_api_termination: If True, the instances will be locked
and will not be able to be terminated via the API.
:type instance_initiated_shutdown_behavior: string
:param instance_initiated_shutdown_behavior: Specifies whether the
instance stops or terminates on instance-initiated shutdown.
Valid values are:
* stop
* terminate
:type placement_group: string
:param placement_group: If specified, this is the name of the placement
group in which the instance(s) will be launched.
:type additional_info: string
:param additional_info: Specifies additional information to make
available to the instance(s).
:type security_group_ids: list of strings
:param security_group_ids: The ID of the VPC security groups with
which to associate instances.
:type instance_profile_name: string
:param instance_profile_name: The name of
the IAM Instance Profile (IIP) to associate with the instances.
:type instance_profile_arn: string
:param instance_profile_arn: The Amazon resource name (ARN) of
the IAM Instance Profile (IIP) to associate with the instances.
:type tenancy: string
:param tenancy: The tenancy of the instance you want to
launch. An instance with a tenancy of 'dedicated' runs on
single-tenant hardware and can only be launched into a
VPC. Valid values are:"default" or "dedicated".
NOTE: To use dedicated tenancy you MUST specify a VPC
subnet-ID as well.
:rtype: Reservation
:return: The :class:`boto.ec2.instance.Reservation` associated with
the request for machines
"""
return self.connection.run_instances(self.id, min_count, max_count,
key_name, security_groups,
user_data, addressing_type,
instance_type, placement,
kernel_id, ramdisk_id,
monitoring_enabled, subnet_id,
block_device_map, disable_api_termination,
instance_initiated_shutdown_behavior,
private_ip_address, placement_group,
security_group_ids=security_group_ids,
additional_info=additional_info,
instance_profile_name=instance_profile_name,
instance_profile_arn=instance_profile_arn,
tenancy=tenancy, dry_run=dry_run)
def deregister(self, delete_snapshot=False, dry_run=False):
return self.connection.deregister_image(
self.id,
delete_snapshot,
dry_run=dry_run
)
def get_launch_permissions(self, dry_run=False):
img_attrs = self.connection.get_image_attribute(
self.id,
'launchPermission',
dry_run=dry_run
)
return img_attrs.attrs
def set_launch_permissions(self, user_ids=None, group_names=None,
dry_run=False):
return self.connection.modify_image_attribute(self.id,
'launchPermission',
'add',
user_ids,
group_names,
dry_run=dry_run)
def remove_launch_permissions(self, user_ids=None, group_names=None,
dry_run=False):
return self.connection.modify_image_attribute(self.id,
'launchPermission',
'remove',
user_ids,
group_names,
dry_run=dry_run)
def reset_launch_attributes(self, dry_run=False):
return self.connection.reset_image_attribute(
self.id,
'launchPermission',
dry_run=dry_run
)
def get_kernel(self, dry_run=False):
img_attrs = self.connection.get_image_attribute(
self.id,
'kernel',
dry_run=dry_run
)
return img_attrs.kernel
def get_ramdisk(self, dry_run=False):
img_attrs = self.connection.get_image_attribute(
self.id,
'ramdisk',
dry_run=dry_run
)
return img_attrs.ramdisk
class ImageAttribute(object):
def __init__(self, parent=None):
self.name = None
self.kernel = None
self.ramdisk = None
self.attrs = {}
def startElement(self, name, attrs, connection):
if name == 'blockDeviceMapping':
self.attrs['block_device_mapping'] = BlockDeviceMapping()
return self.attrs['block_device_mapping']
else:
return None
def endElement(self, name, value, connection):
if name == 'launchPermission':
self.name = 'launch_permission'
elif name == 'group':
if 'groups' in self.attrs:
self.attrs['groups'].append(value)
else:
self.attrs['groups'] = [value]
elif name == 'userId':
if 'user_ids' in self.attrs:
self.attrs['user_ids'].append(value)
else:
self.attrs['user_ids'] = [value]
elif name == 'productCode':
if 'product_codes' in self.attrs:
self.attrs['product_codes'].append(value)
else:
self.attrs['product_codes'] = [value]
elif name == 'imageId':
self.image_id = value
elif name == 'kernel':
self.kernel = value
elif name == 'ramdisk':
self.ramdisk = value
else:
setattr(self, name, value)
class CopyImage(object):
def __init__(self, parent=None):
self._parent = parent
self.image_id = None
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'imageId':
self.image_id = value
|
yasoob/PythonRSSReader | refs/heads/master | venv/lib/python2.7/dist-packages/twisted/test/stdio_test_producer.py | 40 | # -*- test-case-name: twisted.test.test_stdio.StandardInputOutputTestCase.test_producer -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Main program for the child process run by
L{twisted.test.test_stdio.StandardInputOutputTestCase.test_producer} to test
that process transports implement IProducer properly.
"""
import sys, _preamble
from twisted.internet import stdio, protocol
from twisted.python import log, reflect
class ProducerChild(protocol.Protocol):
_paused = False
buf = ''
def connectionLost(self, reason):
log.msg("*****OVER*****")
reactor.callLater(1, reactor.stop)
# reactor.stop()
def dataReceived(self, bytes):
self.buf += bytes
if self._paused:
log.startLogging(sys.stderr)
log.msg("dataReceived while transport paused!")
self.transport.loseConnection()
else:
self.transport.write(bytes)
if self.buf.endswith('\n0\n'):
self.transport.loseConnection()
else:
self.pause()
def pause(self):
self._paused = True
self.transport.pauseProducing()
reactor.callLater(0.01, self.unpause)
def unpause(self):
self._paused = False
self.transport.resumeProducing()
if __name__ == '__main__':
reflect.namedAny(sys.argv[1]).install()
from twisted.internet import reactor
stdio.StandardIO(ProducerChild())
reactor.run()
|
phalax4/CarnotKE | refs/heads/master | jyhton/lib-python/2.7/markupbase.py | 173 | """Shared support for scanning document type declarations in HTML and XHTML.
This module is used as a foundation for the HTMLParser and sgmllib
modules (indirectly, for htmllib as well). It has no documented
public API and should not be used directly.
"""
import re
_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
_commentclose = re.compile(r'--\s*>')
_markedsectionclose = re.compile(r']\s*]\s*>')
# An analysis of the MS-Word extensions is available at
# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
_msmarkedsectionclose = re.compile(r']\s*>')
del re
class ParserBase:
"""Parser base class which provides some common support methods used
by the SGML/HTML and XHTML parsers."""
def __init__(self):
if self.__class__ is ParserBase:
raise RuntimeError(
"markupbase.ParserBase must be subclassed")
def error(self, message):
raise NotImplementedError(
"subclasses of ParserBase must override error()")
def reset(self):
self.lineno = 1
self.offset = 0
def getpos(self):
"""Return current line number and offset."""
return self.lineno, self.offset
# Internal -- update line number and offset. This should be
# called for each piece of data exactly once, in order -- in other
# words the concatenation of all the input strings to this
# function should be exactly the entire input.
def updatepos(self, i, j):
if i >= j:
return j
rawdata = self.rawdata
nlines = rawdata.count("\n", i, j)
if nlines:
self.lineno = self.lineno + nlines
pos = rawdata.rindex("\n", i, j) # Should not fail
self.offset = j-(pos+1)
else:
self.offset = self.offset + j-i
return j
_decl_otherchars = ''
# Internal -- parse declaration (for use by subclasses).
def parse_declaration(self, i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
# ISO 8879:1986, however, has more complex
# declaration syntax for elements in <!...>, including:
# --comment--
# [marked section]
# name in the following list: ENTITY, DOCTYPE, ELEMENT,
# ATTLIST, NOTATION, SHORTREF, USEMAP,
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
rawdata = self.rawdata
j = i + 2
assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
if rawdata[j:j+1] == ">":
# the empty comment <!>
return j + 1
if rawdata[j:j+1] in ("-", ""):
# Start of comment followed by buffer boundary,
# or just a buffer boundary.
return -1
# A simple, practical version could look like: ((name|stringlit) S*) + '>'
n = len(rawdata)
if rawdata[j:j+2] == '--': #comment
# Locate --.*-- as the body of the comment
return self.parse_comment(i)
elif rawdata[j] == '[': #marked section
# Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
# Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
# Note that this is extended by Microsoft Office "Save as Web" function
# to include [if...] and [endif].
return self.parse_marked_section(i)
else: #all other declaration elements
decltype, j = self._scan_name(j, i)
if j < 0:
return j
if decltype == "doctype":
self._decl_otherchars = ''
while j < n:
c = rawdata[j]
if c == ">":
# end of declaration syntax
data = rawdata[i+2:j]
if decltype == "doctype":
self.handle_decl(data)
else:
# According to the HTML5 specs sections "8.2.4.44 Bogus
# comment state" and "8.2.4.45 Markup declaration open
# state", a comment token should be emitted.
# Calling unknown_decl provides more flexibility though.
self.unknown_decl(data)
return j + 1
if c in "\"'":
m = _declstringlit_match(rawdata, j)
if not m:
return -1 # incomplete
j = m.end()
elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
name, j = self._scan_name(j, i)
elif c in self._decl_otherchars:
j = j + 1
elif c == "[":
# this could be handled in a separate doctype parser
if decltype == "doctype":
j = self._parse_doctype_subset(j + 1, i)
elif decltype in ("attlist", "linktype", "link", "element"):
# must tolerate []'d groups in a content model in an element declaration
# also in data attribute specifications of attlist declaration
# also link type declaration subsets in linktype declarations
# also link attribute specification lists in link declarations
self.error("unsupported '[' char in %s declaration" % decltype)
else:
self.error("unexpected '[' char in declaration")
else:
self.error(
"unexpected %r char in declaration" % rawdata[j])
if j < 0:
return j
return -1 # incomplete
# Internal -- parse a marked section
# Override this to handle MS-word extension syntax <![if word]>content<![endif]>
def parse_marked_section(self, i, report=1):
rawdata= self.rawdata
assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
sectName, j = self._scan_name( i+3, i )
if j < 0:
return j
if sectName in ("temp", "cdata", "ignore", "include", "rcdata"):
# look for standard ]]> ending
match= _markedsectionclose.search(rawdata, i+3)
elif sectName in ("if", "else", "endif"):
# look for MS Office ]> ending
match= _msmarkedsectionclose.search(rawdata, i+3)
else:
self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
if not match:
return -1
if report:
j = match.start(0)
self.unknown_decl(rawdata[i+3: j])
return match.end(0)
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i, report=1):
rawdata = self.rawdata
if rawdata[i:i+4] != '<!--':
self.error('unexpected call to parse_comment()')
match = _commentclose.search(rawdata, i+4)
if not match:
return -1
if report:
j = match.start(0)
self.handle_comment(rawdata[i+4: j])
return match.end(0)
# Internal -- scan past the internal subset in a <!DOCTYPE declaration,
# returning the index just past any whitespace following the trailing ']'.
def _parse_doctype_subset(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
j = i
while j < n:
c = rawdata[j]
if c == "<":
s = rawdata[j:j+2]
if s == "<":
# end of buffer; incomplete
return -1
if s != "<!":
self.updatepos(declstartpos, j + 1)
self.error("unexpected char in internal subset (in %r)" % s)
if (j + 2) == n:
# end of buffer; incomplete
return -1
if (j + 4) > n:
# end of buffer; incomplete
return -1
if rawdata[j:j+4] == "<!--":
j = self.parse_comment(j, report=0)
if j < 0:
return j
continue
name, j = self._scan_name(j + 2, declstartpos)
if j == -1:
return -1
if name not in ("attlist", "element", "entity", "notation"):
self.updatepos(declstartpos, j + 2)
self.error(
"unknown declaration %r in internal subset" % name)
# handle the individual names
meth = getattr(self, "_parse_doctype_" + name)
j = meth(j, declstartpos)
if j < 0:
return j
elif c == "%":
# parameter entity reference
if (j + 1) == n:
# end of buffer; incomplete
return -1
s, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
if rawdata[j] == ";":
j = j + 1
elif c == "]":
j = j + 1
while j < n and rawdata[j].isspace():
j = j + 1
if j < n:
if rawdata[j] == ">":
return j
self.updatepos(declstartpos, j)
self.error("unexpected char after internal subset")
else:
return -1
elif c.isspace():
j = j + 1
else:
self.updatepos(declstartpos, j)
self.error("unexpected char %r in internal subset" % c)
# end of buffer reached
return -1
# Internal -- scan past <!ELEMENT declarations
def _parse_doctype_element(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j == -1:
return -1
# style content model; just skip until '>'
rawdata = self.rawdata
if '>' in rawdata[j:]:
return rawdata.find(">", j) + 1
return -1
# Internal -- scan past <!ATTLIST declarations
def _parse_doctype_attlist(self, i, declstartpos):
rawdata = self.rawdata
name, j = self._scan_name(i, declstartpos)
c = rawdata[j:j+1]
if c == "":
return -1
if c == ">":
return j + 1
while 1:
# scan a series of attribute descriptions; simplified:
# name type [value] [#constraint]
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if c == "":
return -1
if c == "(":
# an enumerated type; look for ')'
if ")" in rawdata[j:]:
j = rawdata.find(")", j) + 1
else:
return -1
while rawdata[j:j+1].isspace():
j = j + 1
if not rawdata[j:]:
# end of buffer, incomplete
return -1
else:
name, j = self._scan_name(j, declstartpos)
c = rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1
c = rawdata[j:j+1]
if not c:
return -1
if c == "#":
if rawdata[j:] == "#":
# end of buffer
return -1
name, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if not c:
return -1
if c == '>':
# all done
return j + 1
# Internal -- scan past <!NOTATION declarations
def _parse_doctype_notation(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j < 0:
return j
rawdata = self.rawdata
while 1:
c = rawdata[j:j+1]
if not c:
# end of buffer; incomplete
return -1
if c == '>':
return j + 1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if not m:
return -1
j = m.end()
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan past <!ENTITY declarations
def _parse_doctype_entity(self, i, declstartpos):
rawdata = self.rawdata
if rawdata[i:i+1] == "%":
j = i + 1
while 1:
c = rawdata[j:j+1]
if not c:
return -1
if c.isspace():
j = j + 1
else:
break
else:
j = i
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
while 1:
c = self.rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1 # incomplete
elif c == ">":
return j + 1
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan a name token and the new position and the token, or
# return -1 if we've reached the end of the buffer.
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = _declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.updatepos(declstartpos, i)
self.error("expected name token at %r"
% rawdata[declstartpos:declstartpos+20])
# To be overridden -- handlers for unknown objects
def unknown_decl(self, data):
pass
|
maljac/odoomrp-wip | refs/heads/8.0 | stock_lock_lot/__init__.py | 240 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from . import models
from . import wizard
|
exercism/xpython | refs/heads/master | exercises/rectangles/rectangles_test.py | 1 | import unittest
from rectangles import rectangles
# Tests adapted from `problem-specifications//canonical-data.json`
class RectanglesTest(unittest.TestCase):
def test_no_rows(self):
self.assertEqual(rectangles([]), 0)
def test_no_columns(self):
self.assertEqual(rectangles([""]), 0)
def test_no_rectangles(self):
self.assertEqual(rectangles([" "]), 0)
def test_one_rectangle(self):
self.assertEqual(rectangles(["+-+", "| |", "+-+"]), 1)
def test_two_rectangles_without_shared_parts(self):
self.assertEqual(rectangles([" +-+", " | |", "+-+-+", "| | ", "+-+ "]), 2)
def test_five_rectangles_with_shared_parts(self):
self.assertEqual(rectangles([" +-+", " | |", "+-+-+", "| | |", "+-+-+"]), 5)
def test_rectangle_of_height_1_is_counted(self):
self.assertEqual(rectangles(["+--+", "+--+"]), 1)
def test_rectangle_of_width_1_is_counted(self):
self.assertEqual(rectangles(["++", "||", "++"]), 1)
def test_1x1_square_is_counted(self):
self.assertEqual(rectangles(["++", "++"]), 1)
def test_only_complete_rectangles_are_counted(self):
self.assertEqual(rectangles([" +-+", " |", "+-+-+", "| | -", "+-+-+"]), 1)
def test_rectangles_can_be_of_different_sizes(self):
self.assertEqual(
rectangles(
[
"+------+----+",
"| | |",
"+---+--+ |",
"| | |",
"+---+-------+",
]
),
3,
)
def test_corner_is_required_for_a_rectangle_to_be_complete(self):
self.assertEqual(
rectangles(
[
"+------+----+",
"| | |",
"+------+ |",
"| | |",
"+---+-------+",
]
),
2,
)
def test_large_input_with_many_rectangles(self):
self.assertEqual(
rectangles(
[
"+---+--+----+",
"| +--+----+",
"+---+--+ |",
"| +--+----+",
"+---+--+--+-+",
"+---+--+--+-+",
"+------+ | |",
" +-+",
]
),
60,
)
if __name__ == "__main__":
unittest.main()
|
LiveZenLK/CeygateERP | refs/heads/master | addons/mrp/wizard/__init__.py | 47 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import mrp_product_produce
import change_production_qty
import stock_move
|
brianjgeiger/osf.io | refs/heads/develop | scripts/migration/migrate_share_registration_data.py | 17 | # TODO: Consider rewriting as management command
import logging
import sys
import django
from django.db import transaction
django.setup()
from osf.models import Registration
from scripts import utils as script_utils
from website import settings
from website.app import init_app
from website.project.tasks import update_node_share
logger = logging.getLogger(__name__)
def migrate(dry_run):
assert settings.SHARE_URL, 'SHARE_URL must be set to migrate.'
assert settings.SHARE_API_TOKEN, 'SHARE_API_TOKEN must be set to migrate.'
registrations = Registration.objects.filter(is_deleted=False, is_public=True)
registrations_count = registrations.count()
count = 0
logger.info('Preparing to migrate {} registrations.'.format(registrations_count))
for registration in registrations.iterator():
count += 1
logger.info('{}/{} - {}'.format(count, registrations_count, registration._id))
if not dry_run:
update_node_share(registration)
logger.info('Registration {} was sent to SHARE.'.format(registration._id))
def main():
dry_run = '--dry' in sys.argv
if not dry_run:
script_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
with transaction.atomic():
migrate(dry_run)
if __name__ == '__main__':
main()
|
markodolancic/selenium | refs/heads/master | py/selenium/webdriver/blackberry/__init__.py | 2454 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
|
garyjyao1/ansible | refs/heads/devel | lib/ansible/modules/extras/database/vertica/vertica_configuration.py | 148 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: vertica_configuration
version_added: '2.0'
short_description: Updates Vertica configuration parameters.
description:
- Updates Vertica configuration parameters.
options:
name:
description:
- Name of the parameter to update.
required: true
value:
description:
- Value of the parameter to be set.
required: true
db:
description:
- Name of the Vertica database.
required: false
default: null
cluster:
description:
- Name of the Vertica cluster.
required: false
default: localhost
port:
description:
- Vertica cluster port to connect to.
required: false
default: 5433
login_user:
description:
- The username used to authenticate with.
required: false
default: dbadmin
login_password:
description:
- The password used to authenticate with.
required: false
default: null
notes:
- The default authentication assumes that you are either logging in as or sudo'ing
to the C(dbadmin) account on the host.
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: "Dariusz Owczarek (@dareko)"
"""
EXAMPLES = """
- name: updating load_balance_policy
vertica_configuration: name=failovertostandbyafter value='8 hours'
"""
try:
import pyodbc
except ImportError:
pyodbc_found = False
else:
pyodbc_found = True
class NotSupportedError(Exception):
pass
class CannotDropError(Exception):
pass
# module specific functions
def get_configuration_facts(cursor, parameter_name=''):
facts = {}
cursor.execute("""
select c.parameter_name, c.current_value, c.default_value
from configuration_parameters c
where c.node_name = 'ALL'
and (? = '' or c.parameter_name ilike ?)
""", parameter_name, parameter_name)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
facts[row.parameter_name.lower()] = {
'parameter_name': row.parameter_name,
'current_value': row.current_value,
'default_value': row.default_value}
return facts
def check(configuration_facts, parameter_name, current_value):
parameter_key = parameter_name.lower()
if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
return False
return True
def present(configuration_facts, cursor, parameter_name, current_value):
parameter_key = parameter_name.lower()
changed = False
if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
cursor.execute("select set_config_parameter('{0}', '{1}')".format(parameter_name, current_value))
changed = True
if changed:
configuration_facts.update(get_configuration_facts(cursor, parameter_name))
return changed
# module logic
def main():
module = AnsibleModule(
argument_spec=dict(
parameter=dict(required=True, aliases=['name']),
value=dict(default=None),
db=dict(default=None),
cluster=dict(default='localhost'),
port=dict(default='5433'),
login_user=dict(default='dbadmin'),
login_password=dict(default=None),
), supports_check_mode = True)
if not pyodbc_found:
module.fail_json(msg="The python pyodbc module is required.")
parameter_name = module.params['parameter']
current_value = module.params['value']
db = ''
if module.params['db']:
db = module.params['db']
changed = False
try:
dsn = (
"Driver=Vertica;"
"Server={0};"
"Port={1};"
"Database={2};"
"User={3};"
"Password={4};"
"ConnectionLoadBalance={5}"
).format(module.params['cluster'], module.params['port'], db,
module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception, e:
module.fail_json(msg="Unable to connect to database: {0}.".format(e))
try:
configuration_facts = get_configuration_facts(cursor)
if module.check_mode:
changed = not check(configuration_facts, parameter_name, current_value)
else:
try:
changed = present(configuration_facts, cursor, parameter_name, current_value)
except pyodbc.Error, e:
module.fail_json(msg=str(e))
except NotSupportedError, e:
module.fail_json(msg=str(e), ansible_facts={'vertica_configuration': configuration_facts})
except CannotDropError, e:
module.fail_json(msg=str(e), ansible_facts={'vertica_configuration': configuration_facts})
except SystemExit:
# avoid catching this on python 2.4
raise
except Exception, e:
module.fail_json(msg=e)
module.exit_json(changed=changed, parameter=parameter_name, ansible_facts={'vertica_configuration': configuration_facts})
# import ansible utilities
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
mjs/juju | refs/heads/master | releasetests/build_package.py | 1 | #!/usr/bin/python
"""Script for building source and binary debian packages."""
from __future__ import print_function
from argparse import ArgumentParser
from collections import namedtuple
import os
import re
import shutil
import subprocess
import sys
__metaclass__ = type
DEBS_NOT_FOUND = 3
# This constant defines the location of the base source package branch.
DEFAULT_SPB = 'lp:~juju-qa/juju-release-tools/packaging-juju-core-default'
DEFAULT_SPB2 = 'lp:~juju-qa/juju-release-tools/packaging-juju-core2-default'
# This constant defines the status of the series supported by CI and Releases.
SUPPORTED_RELEASES = """\
12.04 precise HISTORIC
12.10 quantal HISTORIC
13.10 saucy HISTORIC
14.04 trusty LTS
14.10 utopic HISTORIC
15.04 vivid HISTORIC
15.10 wily HISTORIC
16.04 xenial LTS
16.10 yakkety SUPPORTED
17.04 zesty DEVEL
"""
SourceFile = namedtuple('SourceFile', ['sha256', 'size', 'name', 'path'])
CREATE_LXC_TEMPLATE = """\
set -eu
sudo lxc-create -t ubuntu-cloud -n {container} -- -r {series} -a {arch}
sudo mkdir /var/lib/lxc/{container}/rootfs/workspace
echo "lxc.mount.entry = {build_dir} workspace none bind 0 0" |
sudo tee -a /var/lib/lxc/{container}/config
"""
BUILD_DEB_TEMPLATE = """\
sudo lxc-attach -n {container} -- bash <<"EOT"
set -eu
echo "\nInstalling common build deps.\n"
cd workspace
# Wait for Cloud-init to complete to indicate the machine is in a ready
# state with network to do work,
while ! tail -1 /var/log/cloud-init-output.log | \
egrep -q -i 'Cloud-init .* finished'; do
echo "Waiting for Cloud-init to finish."
sleep 5
done
set +e
# The cloud-init breaks arm64, ppc64el. s390x /etc/apt/sources.list.
if [[ $(dpkg --print-architecture) =~ ^(arm64|ppc64el|s390x)$ ]]; then
sed -i \
-e 's,archive.ubuntu.com/ubuntu,ports.ubuntu.com/ubuntu-ports,' \
/etc/apt/sources.list
fi
# Adding the ppa directly to sources.list without the archive key
# requires apt to be run with --force-yes
echo "{ppa}" >> /etc/apt/sources.list
export DEBIAN_FRONTEND=noninteractive
apt-get update
apt-get install -y --force-yes build-essential devscripts equivs
EOT
sudo lxc-attach -n {container} -- bash <<"EOT"
set -eux
echo "\nInstalling build deps from dsc.\n"
cd workspace
export DEBIAN_FRONTEND=noninteractive
mk-build-deps -i --tool 'apt-get --yes --force-yes' *.dsc
EOT
sudo lxc-attach -n {container} -- bash <<"EOT"
set -eux
echo "\nBuilding the packages.\n"
cd workspace
rm *build-deps*.deb || true
dpkg-source -x *.dsc
cd $(basename *.orig.tar.gz .orig.tar.gz | tr _ -)
dpkg-buildpackage -us -uc
EOT
"""
CREATE_SPB_TEMPLATE = """\
set -eux
bzr branch {branch} {spb}
cd {spb}
bzr import-upstream {version} {tarfile_path}
bzr merge . -r upstream-{version}
bzr commit -m "Merged upstream-{version}."
"""
BUILD_SOURCE_TEMPLATE = """\
set -eux
bzr branch {spb} {source}
cd {source}
dch --newversion {ubuntu_version} -D {series} --force-distribution "{message}"
debcommit
bzr bd -S -- -us -uc
"""
DEBSIGN_TEMPLATE = 'debsign -p {gpgcmd} *.changes'
UBUNTU_VERSION_TEMPLATE_EPOCH = \
'{epoch}:{version}-0ubuntu1~{release}.{upatch}~juju1'
DAILY_VERSION_TEMPLATE_EPOCH = \
'{epoch}:{version}-{date}+{build}+{revid}~{release}'
UBUNTU_VERSION_TEMPLATE = '{version}-0ubuntu1~{release}.{upatch}~juju1'
DAILY_VERSION_TEMPLATE = '{version}-{date}+{build}+{revid}~{release}'
VERSION_PATTERN = re.compile('(\d+)\.(\d+)\.(\d+)')
Series = namedtuple('Series', ['version', 'name', 'status'])
class _JujuSeries:
LIVING_STATUSES = ('DEVEL', 'SUPPORTED', 'LTS')
def __init__(self):
self.all = {}
for line in SUPPORTED_RELEASES.splitlines():
series = Series(*line.split())
self.all[series.name] = series
def get_devel_version(self):
for series in self.all.values():
if series.status == 'DEVEL':
return series.version
else:
raise AssertionError(
"SUPPORTED_RELEASES is missing the DEVEL series")
def get_living_names(self):
return sorted(s.name for s in self.all.values()
if s.status in self.LIVING_STATUSES)
def get_name(self, version):
for series in self.all.values():
if series.version == version:
return series.name
else:
raise KeyError("'%s' is not a known series" % version)
def get_name_from_package_version(self, package_version):
"""Return the series name associated with the package version.
The series is matched to the series version commonly embedded in
backported package versions. Official juju package versions always
contain the series version to indicate the tool-chain used to build.
Ubuntu devel packages do not have series versions, they cannot be
matched to a series. As Ubuntu packages are not built with ideal rules,
they are not suitable for building agents.
"""
for series in self.all.values():
if series.version in package_version:
return series.name
return None
def get_version(self, name):
return self.all[name].version
juju_series = _JujuSeries()
def parse_dsc(dsc_path, verbose=False):
"""Return the source files need to build a binary package."""
there = os.path.dirname(dsc_path)
dsc_name = os.path.basename(dsc_path)
dcs_source_file = SourceFile(None, None, dsc_name, dsc_path)
files = [dcs_source_file]
with open(dsc_path) as f:
content = f.read()
found = False
for line in content.splitlines():
if found and line.startswith(' '):
data = line.split()
data.append(os.path.join(there, data[2]))
files.append(SourceFile(*data))
if verbose:
print("Found %s" % files[-1].name)
elif found:
# All files were found.
break
if not found and line.startswith('Checksums-Sha256:'):
found = True
return files
def setup_local(location, series, arch, source_files, verbose=False):
"""Create a directory to build binaries in.
The directoy has the source files required to build binaries.
"""
build_dir = os.path.abspath(
os.path.join(location, 'juju-build-{}-{}'.format(series, arch)))
if verbose:
print('Creating %s' % build_dir)
os.makedirs(build_dir)
for sf in source_files:
dest_path = os.path.join(build_dir, sf.name)
if verbose:
print('Copying %s to %s' % (sf.name, build_dir))
shutil.copyfile(sf.path, dest_path)
return build_dir
def setup_lxc(series, arch, build_dir, verbose=False):
"""Create an LXC container to build binaries.
The local build_dir with the source files is bound to the container.
"""
container = '{}-{}'.format(series, arch)
lxc_script = CREATE_LXC_TEMPLATE.format(
container=container, series=series, arch=arch, build_dir=build_dir)
if verbose:
print('Creating %s container' % container)
output = subprocess.check_output([lxc_script], shell=True)
if verbose:
print(output)
return container
def build_in_lxc(container, build_dir, ppa=None, verbose=False):
"""Build the binaries from the source files in the container."""
returncode = 1
if ppa:
path = ppa.split(':')[1]
series = container.split('-')[0]
ppa = 'deb http://ppa.launchpad.net/{}/ubuntu {} main'.format(
path, series)
else:
ppa = '# No PPA added.'
# The work in the container runs as a different user. Care is needed to
# ensure permissions and ownership are correct before and after the build.
os.chmod(build_dir, 0o777)
subprocess.check_call(['sudo', 'lxc-start', '-d', '-n', container])
try:
build_script = BUILD_DEB_TEMPLATE.format(container=container, ppa=ppa)
returncode = subprocess.call([build_script], shell=True)
finally:
subprocess.check_call(['sudo', 'lxc-stop', '-n', container])
user = os.environ.get('USER', 'jenkins')
subprocess.check_call(['sudo', 'chown', '-R', user, build_dir])
os.chmod(build_dir, 0o775)
return returncode
def teardown_lxc(container, verbose=False):
"""Destroy the lxc container."""
if verbose:
print('Deleting the lxc container %s' % container)
subprocess.check_call(['sudo', 'lxc-destroy', '-n', container])
def move_debs(build_dir, location, verbose=False):
"""Move the debs from the build_dir to the location dir."""
found = False
files = [f for f in os.listdir(build_dir) if f.endswith('.deb')]
for file_name in files:
file_path = os.path.join(build_dir, file_name)
dest_path = os.path.join(location, file_name)
if verbose:
print("Found %s" % file_name)
shutil.move(file_path, dest_path)
found = True
return found
def build_binary(dsc_path, location, series, arch, ppa=None, verbose=False):
"""Build binary debs from a dsc file."""
# If location is remote, setup remote location and run.
source_files = parse_dsc(dsc_path, verbose=verbose)
build_dir = setup_local(
location, series, arch, source_files, verbose=verbose)
container = setup_lxc(series, arch, build_dir, verbose=verbose)
try:
build_in_lxc(container, build_dir, ppa=ppa, verbose=verbose)
finally:
teardown_lxc(container, verbose=False)
found = move_debs(build_dir, location, verbose=verbose)
if not found:
return DEBS_NOT_FOUND
return 0
def create_source_package_branch(build_dir, version, tarfile, branch):
"""Create a new source package branch with the imported release tarfile.
The new source package can be pushed to a permanent location if it will
be used as the base for future packages.
:param build_dir: The root directory to create the new branch in.
:param version: The upstream version, which is not always the version
in the tarfile name.
:param tarfile: The path to the tarfile to import.
:param branch: The base source package branch to fork and import into.
:return: The path to the source package branch.
"""
spb = os.path.join(build_dir, 'spb')
tarfile_path = os.path.join(build_dir, tarfile)
script = CREATE_SPB_TEMPLATE.format(
branch=branch, spb=spb, tarfile_path=tarfile_path, version=version)
subprocess.check_call([script], shell=True, cwd=build_dir)
return spb
def make_ubuntu_version(series, version, upatch=1,
date=None, build=None, revid=None, epoch=None):
"""Return an Ubuntu package version.
:param series: The series codename.
:param version: The upstream version.
:param upatch: The package patch number for cases where a packaging rules
are updated and the package rebuilt.
:param date: The date of the build.
:param build: The build number in CI.
:param revid: The revid hash of the source.
:param epoch: The epoch to pass in version name
:return: An Ubuntu version string.
"""
release = juju_series.get_version(series)
# if daily params are set, we make daily build
if all([date, build, revid]):
if epoch:
return DAILY_VERSION_TEMPLATE_EPOCH.format(
epoch=epoch, version=version, release=release, upatch=upatch,
date=date, build=build, revid=revid)
return DAILY_VERSION_TEMPLATE.format(
version=version, release=release, upatch=upatch,
date=date, build=build, revid=revid)
else:
if epoch:
return UBUNTU_VERSION_TEMPLATE_EPOCH.format(
epoch=epoch, version=version, release=release, upatch=upatch)
return UBUNTU_VERSION_TEMPLATE.format(
version=version, release=release, upatch=upatch)
def make_changelog_message(version, bugs=None):
"""Return a changelog message for the version.
:param version: The upstream version.
:param bugs: A list of Lp bug numbers or None. They will be formatted
for the changelog.
:return: a changelog message.
"""
match = VERSION_PATTERN.match(version)
if match is None:
message = 'New upstream devel release.'
elif match.group(3) == '0':
message = 'New upstream stable release.'
else:
message = 'New upstream stable point release.'
if bugs:
fixes = ', '.join(['LP #%s' % b for b in bugs])
message = '%s (%s)' % (message, fixes)
return message
def make_deb_shell_env(debemail, debfullname):
"""Return a replacement environ suitable for DEB building.
:param debemail: The email address to attribute the changelog entry to.
:param debfullname: The name to attribute the changelog entry to.
:return: A modified copy of os.environ that can be passed to subprocesses.
"""
env = dict(os.environ)
env['DEBEMAIL'] = debemail
env['DEBFULLNAME'] = debfullname
return env
def sign_source_package(source_dir, gpgcmd, debemail, debfullname):
"""Sign a source package.
The debemail and debfullname must match the identity used in the changelog
and the changes file.
:param source_dir: The source package directory.
:param gpgcmd: The path to a gpg signing command to sign with.
:param debemail: The email address to attribute the changelog entry to.
:param debfullname: The name to attribute the changelog entry to.
"""
env = make_deb_shell_env(debemail, debfullname)
script = DEBSIGN_TEMPLATE.format(gpgcmd=gpgcmd)
subprocess.check_call([script], shell=True, cwd=source_dir, env=env)
def create_source_package(source_dir, spb, series, version,
upatch='1', bugs=None, gpgcmd=None, debemail=None,
debfullname=None, verbose=False,
date=None, build=None, revid=None, epoch=None):
"""Create a series source package from a source package branch.
The new source package can be used to create series source packages.
:param source_dir: The source package directory.
:param spb: The path (or location) of the source package branch to create
the new source package with.
:param series: The series codename.
:param version: The upstream version.
:param upatch: The package patch number for cases where a packaging rules
are updated and the package rebuilt.
:param bugs: A list of Lp bug numbers for the changelog or None.
:param gpgcmd: The path to a gpg signing command to sign with.
Source packages will be signed when gpgcmd is not None.
:param debemail: The email address to attribute the changelog entry to.
:param debfullname: The name to attribute the changelog entry to.
:param verbose: Increase the information about the work performed.
:param date: The date of the build.
:param build: The build number in CI.
:param revid: The revid hash of the source.
:param epoch: The epoch to pass in version name
"""
ubuntu_version = make_ubuntu_version(series, version, upatch,
date, build, revid, epoch)
message = make_changelog_message(version, bugs=bugs)
source = os.path.join(source_dir, 'source')
env = make_deb_shell_env(debemail, debfullname)
script = BUILD_SOURCE_TEMPLATE.format(
spb=spb, source=source, series=series, ubuntu_version=ubuntu_version,
message=message)
subprocess.check_call([script], shell=True, cwd=source_dir, env=env)
if gpgcmd:
sign_source_package(source_dir, gpgcmd, debemail, debfullname)
def build_source(tarfile_path, location, series, bugs,
debemail=None, debfullname=None, gpgcmd=None,
branch=None, upatch=1, verbose=False,
date=None, build=None, revid=None, epoch=None):
"""Build one or more series source packages from a new release tarfile.
The packages are unsigned by default, but providing the path to a gpgcmd,
the dsc file will be signed.
:param tarfile_path: The path to the upstream tarfile. to import.
:param location: The path to the directory to build packages in.
:param series: The series codename or list of series codenames.
:param bugs: A list of Lp bug numbers the release fixes.
:param gpgcmd: The path to a gpg signing command to sign with.
Source packages will be signed when gpgcmd is not None.
:param debemail: The email address to attribute the changelog entry to.
:param debfullname: The name to attribute the changelog entry to.
:param branch: The path (or location) of the source package branch to
create the new source package with.
:param upatch: The package patch number for cases where a packaging rules
are updated and the package rebuilt.
:param verbose: Increase the verbostiy of output.
:param date: The date of the build.
:param build: The build number in CI.
:param revid: The revid hash of the source.
:param epoch: The epoch to pass in version name
:return: the exit code (which is 0 or else an exception was raised).
"""
if not isinstance(series, list):
series = [series]
tarfile_name = os.path.basename(tarfile_path)
version = tarfile_name.split('_')[-1].replace('.tar.gz', '')
if all([date, build, revid]):
daily_version = '{}~{}~{}~{}'.format(version, date, build, revid)
daily_tarfile_name = tarfile_name.replace(version, daily_version)
tarfile_dir = os.path.dirname(tarfile_path)
daily_tarfile_path = os.path.join(tarfile_dir, daily_tarfile_name)
os.rename(tarfile_name, daily_tarfile_name)
tarfile_path = daily_tarfile_path
tarfile_name = daily_tarfile_name
version = daily_version
files = [SourceFile(None, None, tarfile_name, tarfile_path)]
spb_dir = setup_local(
location, 'any', 'all', files, verbose=verbose)
spb = create_source_package_branch(spb_dir, version, tarfile_name, branch)
for a_series in series:
build_dir = setup_local(location, a_series, 'all', [], verbose=verbose)
create_source_package(
build_dir, spb, a_series, version,
upatch=upatch, bugs=bugs, gpgcmd=gpgcmd,
debemail=debemail, debfullname=debfullname, verbose=verbose,
date=date, build=build, revid=revid, epoch=epoch)
return 0
def print_series_info(package_version=None):
exitcode = 1
if package_version:
version = juju_series.get_name_from_package_version(package_version)
if version:
print(version)
return 0
return exitcode
def main(argv):
"""Execute the commands from the command line."""
exitcode = 0
args = get_args(argv)
if args.command == 'source':
exitcode = build_source(
args.tar_file, args.location, args.series, args.bugs,
debemail=args.debemail, debfullname=args.debfullname,
gpgcmd=args.gpgcmd, branch=args.branch, upatch=args.upatch,
verbose=args.verbose, date=args.date, build=args.build,
revid=args.revid, epoch=args.epoch)
elif args.command == 'binary':
exitcode = build_binary(
args.dsc, args.location, args.series, args.arch,
ppa=args.ppa, verbose=args.verbose)
elif args.command == 'print':
exitcode = print_series_info(
package_version=args.series_name_from_package_version)
return exitcode
def get_args(argv=None):
"""Return the arguments for this program."""
parser = ArgumentParser("Build debian packages.")
parser.add_argument(
"-v", "--verbose", action="store_true", default=False,
help="Increase the verbosity of the output")
subparsers = parser.add_subparsers(help='sub-command help', dest="command")
src_parser = subparsers.add_parser('source', help='Build source packages')
src_parser.add_argument(
'--debemail', default=os.environ.get("DEBEMAIL"),
help="Your email address; Environment: DEBEMAIL.")
src_parser.add_argument(
'--debfullname', default=os.environ.get("DEBFULLNAME"),
help="Your full name; Environment: DEBFULLNAME.")
src_parser.add_argument(
'--epoch', default=None, help="The epoch for package version")
src_parser.add_argument(
'--gpgcmd', default=None,
help="Path to a gpg signing command to make signed packages.")
src_parser.add_argument(
'--branch', help="The base/previous source package branch.")
src_parser.add_argument(
'--upatch', default='1', help="The Ubuntu patch number.")
src_parser.add_argument('tar_file', help="The release tar file.")
src_parser.add_argument("location", help="The location to build in.")
src_parser.add_argument(
'series', help="The destination Ubuntu release or LIVING for all.")
src_parser.add_argument(
'--date', default=None, help="A datestamp to apply to the build")
src_parser.add_argument(
'--build', default=None, help="The build number from CI")
src_parser.add_argument(
'--revid', default=None, help="The short hash for revid")
src_parser.add_argument(
'bugs', nargs='*', help="Bugs this version will fix in the release.")
bin_parser = subparsers.add_parser('binary', help='Build a binary package')
bin_parser.add_argument(
'--ppa', default=None, help="The PPA that provides package deps.")
bin_parser.add_argument("dsc", help="The dsc file to build")
bin_parser.add_argument("location", help="The location to build in.")
bin_parser.add_argument("series", help="The series to build in.")
bin_parser.add_argument("arch", help="The dpkg architecture to build in.")
print_parser = subparsers.add_parser('print', help='Print series info')
print_parser.add_argument(
'--series-name-from-package-version',
help="Print the series name associated with the package version.")
args = parser.parse_args(argv[1:])
if getattr(args, 'series', None) and args.series == 'LIVING':
args.series = juju_series.get_living_names()
if args.command == 'source' and args.branch is None:
tarfile_name = os.path.basename(args.tar_file)
version = tarfile_name.split('_')[-1].replace('.tar.gz', '')
if version.startswith('2.'):
args.branch = DEFAULT_SPB2
else:
args.branch = DEFAULT_SPB
return args
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
DailyActie/Surrogate-Model | refs/heads/master | 01-codes/numpy-master/numpy/ma/core.py | 1 | """
numpy.ma : a package to handle missing or invalid values.
This package was initially written for numarray by Paul F. Dubois
at Lawrence Livermore National Laboratory.
In 2006, the package was completely rewritten by Pierre Gerard-Marchant
(University of Georgia) to make the MaskedArray class a subclass of ndarray,
and to improve support of structured arrays.
Copyright 1999, 2000, 2001 Regents of the University of California.
Released for unlimited redistribution.
* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois.
* Subclassing of the base `ndarray` 2006 by Pierre Gerard-Marchant
(pgmdevlist_AT_gmail_DOT_com)
* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com)
.. moduleauthor:: Pierre Gerard-Marchant
"""
# pylint: disable-msg=E1002
from __future__ import division, absolute_import, print_function
import sys
import warnings
from functools import reduce
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
import numpy as np
import numpy.core.umath as umath
import numpy.core.numerictypes as ntypes
from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue
from numpy import array as narray
from numpy.lib.function_base import angle
from numpy.compat import (
getargspec, formatargspec, long, basestring, unicode, bytes, sixu
)
from numpy import expand_dims as n_expand_dims
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
__all__ = [
'MAError', 'MaskError', 'MaskType', 'MaskedArray', 'abs', 'absolute',
'add', 'all', 'allclose', 'allequal', 'alltrue', 'amax', 'amin',
'angle', 'anom', 'anomalies', 'any', 'append', 'arange', 'arccos',
'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh',
'argmax', 'argmin', 'argsort', 'around', 'array', 'asanyarray',
'asarray', 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'bool_', 'ceil',
'choose', 'clip', 'common_fill_value', 'compress', 'compressed',
'concatenate', 'conjugate', 'convolve', 'copy', 'correlate', 'cos', 'cosh',
'count', 'cumprod', 'cumsum', 'default_fill_value', 'diag', 'diagonal',
'diff', 'divide', 'dump', 'dumps', 'empty', 'empty_like', 'equal', 'exp',
'expand_dims', 'fabs', 'filled', 'fix_invalid', 'flatten_mask',
'flatten_structured_array', 'floor', 'floor_divide', 'fmod',
'frombuffer', 'fromflex', 'fromfunction', 'getdata', 'getmask',
'getmaskarray', 'greater', 'greater_equal', 'harden_mask', 'hypot',
'identity', 'ids', 'indices', 'inner', 'innerproduct', 'isMA',
'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift',
'less', 'less_equal', 'load', 'loads', 'log', 'log10', 'log2',
'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'make_mask',
'make_mask_descr', 'make_mask_none', 'mask_or', 'masked',
'masked_array', 'masked_equal', 'masked_greater',
'masked_greater_equal', 'masked_inside', 'masked_invalid',
'masked_less', 'masked_less_equal', 'masked_not_equal',
'masked_object', 'masked_outside', 'masked_print_option',
'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum',
'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value',
'mod', 'multiply', 'mvoid', 'ndim', 'negative', 'nomask', 'nonzero',
'not_equal', 'ones', 'outer', 'outerproduct', 'power', 'prod',
'product', 'ptp', 'put', 'putmask', 'rank', 'ravel', 'remainder',
'repeat', 'reshape', 'resize', 'right_shift', 'round', 'round_',
'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'soften_mask',
'sometrue', 'sort', 'sqrt', 'squeeze', 'std', 'subtract', 'sum',
'swapaxes', 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide',
'var', 'where', 'zeros',
]
MaskType = np.bool_
nomask = MaskType(0)
class MaskedArrayFutureWarning(FutureWarning):
pass
def doc_note(initialdoc, note):
"""
Adds a Notes section to an existing docstring.
"""
if initialdoc is None:
return
if note is None:
return initialdoc
newdoc = """
%s
Notes
-----
%s
"""
return newdoc % (initialdoc, note)
def get_object_signature(obj):
"""
Get the signature from obj
"""
try:
sig = formatargspec(*getargspec(obj))
except TypeError:
sig = ''
return sig
###############################################################################
# Exceptions #
###############################################################################
class MAError(Exception):
"""
Class for masked array related errors.
"""
pass
class MaskError(MAError):
"""
Class for mask related errors.
"""
pass
###############################################################################
# Filling options #
###############################################################################
# b: boolean - c: complex - f: floats - i: integer - O: object - S: string
default_filler = {'b': True,
'c': 1.e20 + 0.0j,
'f': 1.e20,
'i': 999999,
'O': '?',
'S': b'N/A',
'u': 999999,
'V': '???',
'U': sixu('N/A')
}
# Add datetime64 and timedelta64 types
for v in ["Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps",
"fs", "as"]:
default_filler["M8[" + v + "]"] = np.datetime64("NaT", v)
default_filler["m8[" + v + "]"] = np.timedelta64("NaT", v)
max_filler = ntypes._minvals
max_filler.update([(k, -np.inf) for k in [np.float32, np.float64]])
min_filler = ntypes._maxvals
min_filler.update([(k, +np.inf) for k in [np.float32, np.float64]])
if 'float128' in ntypes.typeDict:
max_filler.update([(np.float128, -np.inf)])
min_filler.update([(np.float128, +np.inf)])
def default_fill_value(obj):
"""
Return the default fill value for the argument object.
The default filling value depends on the datatype of the input
array or the type of the input scalar:
======== ========
datatype default
======== ========
bool True
int 999999
float 1.e20
complex 1.e20+0j
object '?'
string 'N/A'
======== ========
Parameters
----------
obj : ndarray, dtype or scalar
The array data-type or scalar for which the default fill value
is returned.
Returns
-------
fill_value : scalar
The default fill value.
Examples
--------
>>> np.ma.default_fill_value(1)
999999
>>> np.ma.default_fill_value(np.array([1.1, 2., np.pi]))
1e+20
>>> np.ma.default_fill_value(np.dtype(complex))
(1e+20+0j)
"""
if hasattr(obj, 'dtype'):
defval = _check_fill_value(None, obj.dtype)
elif isinstance(obj, np.dtype):
if obj.subdtype:
defval = default_filler.get(obj.subdtype[0].kind, '?')
elif obj.kind in 'Mm':
defval = default_filler.get(obj.str[1:], '?')
else:
defval = default_filler.get(obj.kind, '?')
elif isinstance(obj, float):
defval = default_filler['f']
elif isinstance(obj, int) or isinstance(obj, long):
defval = default_filler['i']
elif isinstance(obj, bytes):
defval = default_filler['S']
elif isinstance(obj, unicode):
defval = default_filler['U']
elif isinstance(obj, complex):
defval = default_filler['c']
else:
defval = default_filler['O']
return defval
def _recursive_extremum_fill_value(ndtype, extremum):
names = ndtype.names
if names:
deflist = []
for name in names:
fval = _recursive_extremum_fill_value(ndtype[name], extremum)
deflist.append(fval)
return tuple(deflist)
return extremum[ndtype]
def minimum_fill_value(obj):
"""
Return the maximum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the minimum of an array with a given dtype.
Parameters
----------
obj : ndarray or dtype
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The maximum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
maximum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.minimum_fill_value(a)
127
>>> a = np.int32()
>>> ma.minimum_fill_value(a)
2147483647
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.minimum_fill_value(a)
127
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.minimum_fill_value(a)
inf
"""
errmsg = "Unsuitable type for calculating minimum."
if hasattr(obj, 'dtype'):
return _recursive_extremum_fill_value(obj.dtype, min_filler)
elif isinstance(obj, float):
return min_filler[ntypes.typeDict['float_']]
elif isinstance(obj, int):
return min_filler[ntypes.typeDict['int_']]
elif isinstance(obj, long):
return min_filler[ntypes.typeDict['uint']]
elif isinstance(obj, np.dtype):
return min_filler[obj]
else:
raise TypeError(errmsg)
def maximum_fill_value(obj):
"""
Return the minimum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the maximum of an array with a given dtype.
Parameters
----------
obj : {ndarray, dtype}
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The minimum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
minimum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.maximum_fill_value(a)
-128
>>> a = np.int32()
>>> ma.maximum_fill_value(a)
-2147483648
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.maximum_fill_value(a)
-128
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.maximum_fill_value(a)
-inf
"""
errmsg = "Unsuitable type for calculating maximum."
if hasattr(obj, 'dtype'):
return _recursive_extremum_fill_value(obj.dtype, max_filler)
elif isinstance(obj, float):
return max_filler[ntypes.typeDict['float_']]
elif isinstance(obj, int):
return max_filler[ntypes.typeDict['int_']]
elif isinstance(obj, long):
return max_filler[ntypes.typeDict['uint']]
elif isinstance(obj, np.dtype):
return max_filler[obj]
else:
raise TypeError(errmsg)
def _recursive_set_default_fill_value(dt):
"""
Create the default fill value for a structured dtype.
Parameters
----------
dt: dtype
The structured dtype for which to create the fill value.
Returns
-------
val: tuple
A tuple of values corresponding to the default structured fill value.
"""
deflist = []
for name in dt.names:
currenttype = dt[name]
if currenttype.subdtype:
currenttype = currenttype.subdtype[0]
if currenttype.names:
deflist.append(
tuple(_recursive_set_default_fill_value(currenttype)))
else:
deflist.append(default_fill_value(currenttype))
return tuple(deflist)
def _recursive_set_fill_value(fillvalue, dt):
"""
Create a fill value for a structured dtype.
Parameters
----------
fillvalue: scalar or array_like
Scalar or array representing the fill value. If it is of shorter
length than the number of fields in dt, it will be resized.
dt: dtype
The structured dtype for which to create the fill value.
Returns
-------
val: tuple
A tuple of values corresponding to the structured fill value.
"""
fillvalue = np.resize(fillvalue, len(dt.names))
output_value = []
for (fval, name) in zip(fillvalue, dt.names):
cdtype = dt[name]
if cdtype.subdtype:
cdtype = cdtype.subdtype[0]
if cdtype.names:
output_value.append(tuple(_recursive_set_fill_value(fval, cdtype)))
else:
output_value.append(np.array(fval, dtype=cdtype).item())
return tuple(output_value)
def _check_fill_value(fill_value, ndtype):
"""
Private function validating the given `fill_value` for the given dtype.
If fill_value is None, it is set to the default corresponding to the dtype
if this latter is standard (no fields). If the datatype is flexible (named
fields), fill_value is set to a tuple whose elements are the default fill
values corresponding to each field.
If fill_value is not None, its value is forced to the given dtype.
"""
ndtype = np.dtype(ndtype)
fields = ndtype.fields
if fill_value is None:
if fields:
fill_value = np.array(_recursive_set_default_fill_value(ndtype),
dtype=ndtype)
else:
fill_value = default_fill_value(ndtype)
elif fields:
fdtype = [(_[0], _[1]) for _ in ndtype.descr]
if isinstance(fill_value, (ndarray, np.void)):
try:
fill_value = np.array(fill_value, copy=False, dtype=fdtype)
except ValueError:
err_msg = "Unable to transform %s to dtype %s"
raise ValueError(err_msg % (fill_value, fdtype))
else:
fill_value = np.asarray(fill_value, dtype=object)
fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype),
dtype=ndtype)
else:
if isinstance(fill_value, basestring) and (ndtype.char not in 'OSVU'):
err_msg = "Cannot set fill value of string with array of dtype %s"
raise TypeError(err_msg % ndtype)
else:
# In case we want to convert 1e20 to int.
try:
fill_value = np.array(fill_value, copy=False, dtype=ndtype)
except OverflowError:
# Raise TypeError instead of OverflowError. OverflowError
# is seldom used, and the real problem here is that the
# passed fill_value is not compatible with the ndtype.
err_msg = "Fill value %s overflows dtype %s"
raise TypeError(err_msg % (fill_value, ndtype))
return np.array(fill_value)
def set_fill_value(a, fill_value):
"""
Set the filling value of a, if a is a masked array.
This function changes the fill value of the masked array `a` in place.
If `a` is not a masked array, the function returns silently, without
doing anything.
Parameters
----------
a : array_like
Input array.
fill_value : dtype
Filling value. A consistency test is performed to make sure
the value is compatible with the dtype of `a`.
Returns
-------
None
Nothing returned by this function.
See Also
--------
maximum_fill_value : Return the default fill value for a dtype.
MaskedArray.fill_value : Return current fill value.
MaskedArray.set_fill_value : Equivalent method.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> a = ma.masked_where(a < 3, a)
>>> a
masked_array(data = [-- -- -- 3 4],
mask = [ True True True False False],
fill_value=999999)
>>> ma.set_fill_value(a, -999)
>>> a
masked_array(data = [-- -- -- 3 4],
mask = [ True True True False False],
fill_value=-999)
Nothing happens if `a` is not a masked array.
>>> a = range(5)
>>> a
[0, 1, 2, 3, 4]
>>> ma.set_fill_value(a, 100)
>>> a
[0, 1, 2, 3, 4]
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> ma.set_fill_value(a, 100)
>>> a
array([0, 1, 2, 3, 4])
"""
if isinstance(a, MaskedArray):
a.set_fill_value(fill_value)
return
def get_fill_value(a):
"""
Return the filling value of a, if any. Otherwise, returns the
default filling value for that type.
"""
if isinstance(a, MaskedArray):
result = a.fill_value
else:
result = default_fill_value(a)
return result
def common_fill_value(a, b):
"""
Return the common filling value of two masked arrays, if any.
If ``a.fill_value == b.fill_value``, return the fill value,
otherwise return None.
Parameters
----------
a, b : MaskedArray
The masked arrays for which to compare fill values.
Returns
-------
fill_value : scalar or None
The common fill value, or None.
Examples
--------
>>> x = np.ma.array([0, 1.], fill_value=3)
>>> y = np.ma.array([0, 1.], fill_value=3)
>>> np.ma.common_fill_value(x, y)
3.0
"""
t1 = get_fill_value(a)
t2 = get_fill_value(b)
if t1 == t2:
return t1
return None
def filled(a, fill_value=None):
"""
Return input as an array with masked data replaced by a fill value.
If `a` is not a `MaskedArray`, `a` itself is returned.
If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to
``a.fill_value``.
Parameters
----------
a : MaskedArray or array_like
An input object.
fill_value : scalar, optional
Filling value. Default is None.
Returns
-------
a : ndarray
The filled array.
See Also
--------
compressed
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> x.filled()
array([[999999, 1, 2],
[999999, 4, 5],
[ 6, 7, 8]])
"""
if hasattr(a, 'filled'):
return a.filled(fill_value)
elif isinstance(a, ndarray):
# Should we check for contiguity ? and a.flags['CONTIGUOUS']:
return a
elif isinstance(a, dict):
return np.array(a, 'O')
else:
return np.array(a)
def get_masked_subclass(*arrays):
"""
Return the youngest subclass of MaskedArray from a list of (masked) arrays.
In case of siblings, the first listed takes over.
"""
if len(arrays) == 1:
arr = arrays[0]
if isinstance(arr, MaskedArray):
rcls = type(arr)
else:
rcls = MaskedArray
else:
arrcls = [type(a) for a in arrays]
rcls = arrcls[0]
if not issubclass(rcls, MaskedArray):
rcls = MaskedArray
for cls in arrcls[1:]:
if issubclass(cls, rcls):
rcls = cls
# Don't return MaskedConstant as result: revert to MaskedArray
if rcls.__name__ == 'MaskedConstant':
return MaskedArray
return rcls
def getdata(a, subok=True):
"""
Return the data of a masked array as an ndarray.
Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``,
else return `a` as a ndarray or subclass (depending on `subok`) if not.
Parameters
----------
a : array_like
Input ``MaskedArray``, alternatively a ndarray or a subclass thereof.
subok : bool
Whether to force the output to be a `pure` ndarray (False) or to
return a subclass of ndarray if appropriate (True, default).
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value=999999)
>>> ma.getdata(a)
array([[1, 2],
[3, 4]])
Equivalently use the ``MaskedArray`` `data` attribute.
>>> a.data
array([[1, 2],
[3, 4]])
"""
try:
data = a._data
except AttributeError:
data = np.array(a, copy=False, subok=subok)
if not subok:
return data.view(ndarray)
return data
get_data = getdata
def fix_invalid(a, mask=nomask, copy=True, fill_value=None):
"""
Return input with invalid data masked and replaced by a fill value.
Invalid data means values of `nan`, `inf`, etc.
Parameters
----------
a : array_like
Input array, a (subclass of) ndarray.
mask : sequence, optional
Mask. Must be convertible to an array of booleans with the same
shape as `data`. True indicates a masked (i.e. invalid) data.
copy : bool, optional
Whether to use a copy of `a` (True) or to fix `a` in place (False).
Default is True.
fill_value : scalar, optional
Value used for fixing invalid data. Default is None, in which case
the ``a.fill_value`` is used.
Returns
-------
b : MaskedArray
The input array with invalid entries fixed.
Notes
-----
A copy is performed by default.
Examples
--------
>>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3)
>>> x
masked_array(data = [-- -1.0 nan inf],
mask = [ True False False False],
fill_value = 1e+20)
>>> np.ma.fix_invalid(x)
masked_array(data = [-- -1.0 -- --],
mask = [ True False True True],
fill_value = 1e+20)
>>> fixed = np.ma.fix_invalid(x)
>>> fixed.data
array([ 1.00000000e+00, -1.00000000e+00, 1.00000000e+20,
1.00000000e+20])
>>> x.data
array([ 1., -1., NaN, Inf])
"""
a = masked_array(a, copy=copy, mask=mask, subok=True)
invalid = np.logical_not(np.isfinite(a._data))
if not invalid.any():
return a
a._mask |= invalid
if fill_value is None:
fill_value = a.fill_value
a._data[invalid] = fill_value
return a
###############################################################################
# Ufuncs #
###############################################################################
ufunc_domain = {}
ufunc_fills = {}
class _DomainCheckInterval:
"""
Define a valid interval, so that :
``domain_check_interval(a,b)(x) == True`` where
``x < a`` or ``x > b``.
"""
def __init__(self, a, b):
"domain_check_interval(a,b)(x) = true where x < a or y > b"
if (a > b):
(a, b) = (b, a)
self.a = a
self.b = b
def __call__(self, x):
"Execute the call behavior."
# nans at masked positions cause RuntimeWarnings, even though
# they are masked. To avoid this we suppress warnings.
with np.errstate(invalid='ignore'):
return umath.logical_or(umath.greater(x, self.b),
umath.less(x, self.a))
class _DomainTan:
"""
Define a valid interval for the `tan` function, so that:
``domain_tan(eps) = True`` where ``abs(cos(x)) < eps``
"""
def __init__(self, eps):
"domain_tan(eps) = true where abs(cos(x)) < eps)"
self.eps = eps
def __call__(self, x):
"Executes the call behavior."
with np.errstate(invalid='ignore'):
return umath.less(umath.absolute(umath.cos(x)), self.eps)
class _DomainSafeDivide:
"""
Define a domain for safe division.
"""
def __init__(self, tolerance=None):
self.tolerance = tolerance
def __call__(self, a, b):
# Delay the selection of the tolerance to here in order to reduce numpy
# import times. The calculation of these parameters is a substantial
# component of numpy's import time.
if self.tolerance is None:
self.tolerance = np.finfo(float).tiny
# don't call ma ufuncs from __array_wrap__ which would fail for scalars
a, b = np.asarray(a), np.asarray(b)
with np.errstate(invalid='ignore'):
return umath.absolute(a) * self.tolerance >= umath.absolute(b)
class _DomainGreater:
"""
DomainGreater(v)(x) is True where x <= v.
"""
def __init__(self, critical_value):
"DomainGreater(v)(x) = true where x <= v"
self.critical_value = critical_value
def __call__(self, x):
"Executes the call behavior."
with np.errstate(invalid='ignore'):
return umath.less_equal(x, self.critical_value)
class _DomainGreaterEqual:
"""
DomainGreaterEqual(v)(x) is True where x < v.
"""
def __init__(self, critical_value):
"DomainGreaterEqual(v)(x) = true where x < v"
self.critical_value = critical_value
def __call__(self, x):
"Executes the call behavior."
with np.errstate(invalid='ignore'):
return umath.less(x, self.critical_value)
class _MaskedUnaryOperation:
"""
Defines masked version of unary operations, where invalid values are
pre-masked.
Parameters
----------
mufunc : callable
The function for which to define a masked version. Made available
as ``_MaskedUnaryOperation.f``.
fill : scalar, optional
Filling value, default is 0.
domain : class instance
Domain for the function. Should be one of the ``_Domain*``
classes. Default is None.
"""
def __init__(self, mufunc, fill=0, domain=None):
self.f = mufunc
self.fill = fill
self.domain = domain
self.__doc__ = getattr(mufunc, "__doc__", str(mufunc))
self.__name__ = getattr(mufunc, "__name__", str(mufunc))
ufunc_domain[mufunc] = domain
ufunc_fills[mufunc] = fill
def __call__(self, a, *args, **kwargs):
"""
Execute the call behavior.
"""
d = getdata(a)
# Deal with domain
if self.domain is not None:
# Case 1.1. : Domained function
# nans at masked positions cause RuntimeWarnings, even though
# they are masked. To avoid this we suppress warnings.
with np.errstate(divide='ignore', invalid='ignore'):
result = self.f(d, *args, **kwargs)
# Make a mask
m = ~umath.isfinite(result)
m |= self.domain(d)
m |= getmask(a)
else:
# Case 1.2. : Function without a domain
# Get the result and the mask
with np.errstate(divide='ignore', invalid='ignore'):
result = self.f(d, *args, **kwargs)
m = getmask(a)
if not result.ndim:
# Case 2.1. : The result is scalarscalar
if m:
return masked
return result
if m is not nomask:
# Case 2.2. The result is an array
# We need to fill the invalid data back w/ the input Now,
# that's plain silly: in C, we would just skip the element and
# keep the original, but we do have to do it that way in Python
# In case result has a lower dtype than the inputs (as in
# equal)
try:
np.copyto(result, d, where=m)
except TypeError:
pass
# Transform to
masked_result = result.view(get_masked_subclass(a))
masked_result._mask = m
masked_result._update_from(a)
return masked_result
def __str__(self):
return "Masked version of %s. [Invalid values are masked]" % str(self.f)
class _MaskedBinaryOperation:
"""
Define masked version of binary operations, where invalid
values are pre-masked.
Parameters
----------
mbfunc : function
The function for which to define a masked version. Made available
as ``_MaskedBinaryOperation.f``.
domain : class instance
Default domain for the function. Should be one of the ``_Domain*``
classes. Default is None.
fillx : scalar, optional
Filling value for the first argument, default is 0.
filly : scalar, optional
Filling value for the second argument, default is 0.
"""
def __init__(self, mbfunc, fillx=0, filly=0):
"""
abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
self.f = mbfunc
self.fillx = fillx
self.filly = filly
self.__doc__ = getattr(mbfunc, "__doc__", str(mbfunc))
self.__name__ = getattr(mbfunc, "__name__", str(mbfunc))
ufunc_domain[mbfunc] = None
ufunc_fills[mbfunc] = (fillx, filly)
def __call__(self, a, b, *args, **kwargs):
"""
Execute the call behavior.
"""
# Get the data, as ndarray
(da, db) = (getdata(a), getdata(b))
# Get the result
with np.errstate():
np.seterr(divide='ignore', invalid='ignore')
result = self.f(da, db, *args, **kwargs)
# Get the mask for the result
(ma, mb) = (getmask(a), getmask(b))
if ma is nomask:
if mb is nomask:
m = nomask
else:
m = umath.logical_or(getmaskarray(a), mb)
elif mb is nomask:
m = umath.logical_or(ma, getmaskarray(b))
else:
m = umath.logical_or(ma, mb)
# Case 1. : scalar
if not result.ndim:
if m:
return masked
return result
# Case 2. : array
# Revert result to da where masked
if m is not nomask and m.any():
# any errors, just abort; impossible to guarantee masked values
try:
np.copyto(result, da, casting='unsafe', where=m)
except:
pass
# Transforms to a (subclass of) MaskedArray
masked_result = result.view(get_masked_subclass(a, b))
masked_result._mask = m
if isinstance(a, MaskedArray):
masked_result._update_from(a)
elif isinstance(b, MaskedArray):
masked_result._update_from(b)
return masked_result
def reduce(self, target, axis=0, dtype=None):
"""
Reduce `target` along the given `axis`.
"""
tclass = get_masked_subclass(target)
m = getmask(target)
t = filled(target, self.filly)
if t.shape == ():
t = t.reshape(1)
if m is not nomask:
m = make_mask(m, copy=1)
m.shape = (1,)
if m is nomask:
tr = self.f.reduce(t, axis)
mr = nomask
else:
tr = self.f.reduce(t, axis, dtype=dtype or t.dtype)
mr = umath.logical_and.reduce(m, axis)
if not tr.shape:
if mr:
return masked
else:
return tr
masked_tr = tr.view(tclass)
masked_tr._mask = mr
return masked_tr
def outer(self, a, b):
"""
Return the function applied to the outer product of a and b.
"""
(da, db) = (getdata(a), getdata(b))
d = self.f.outer(da, db)
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = umath.logical_or.outer(ma, mb)
if (not m.ndim) and m:
return masked
if m is not nomask:
np.copyto(d, da, where=m)
if not d.shape:
return d
masked_d = d.view(get_masked_subclass(a, b))
masked_d._mask = m
return masked_d
def accumulate(self, target, axis=0):
"""Accumulate `target` along `axis` after filling with y fill
value.
"""
tclass = get_masked_subclass(target)
t = filled(target, self.filly)
result = self.f.accumulate(t, axis)
masked_result = result.view(tclass)
return masked_result
def __str__(self):
return "Masked version of " + str(self.f)
class _DomainedBinaryOperation:
"""
Define binary operations that have a domain, like divide.
They have no reduce, outer or accumulate.
Parameters
----------
mbfunc : function
The function for which to define a masked version. Made available
as ``_DomainedBinaryOperation.f``.
domain : class instance
Default domain for the function. Should be one of the ``_Domain*``
classes.
fillx : scalar, optional
Filling value for the first argument, default is 0.
filly : scalar, optional
Filling value for the second argument, default is 0.
"""
def __init__(self, dbfunc, domain, fillx=0, filly=0):
"""abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
self.f = dbfunc
self.domain = domain
self.fillx = fillx
self.filly = filly
self.__doc__ = getattr(dbfunc, "__doc__", str(dbfunc))
self.__name__ = getattr(dbfunc, "__name__", str(dbfunc))
ufunc_domain[dbfunc] = domain
ufunc_fills[dbfunc] = (fillx, filly)
def __call__(self, a, b, *args, **kwargs):
"Execute the call behavior."
# Get the data
(da, db) = (getdata(a), getdata(b))
# Get the result
with np.errstate(divide='ignore', invalid='ignore'):
result = self.f(da, db, *args, **kwargs)
# Get the mask as a combination of the source masks and invalid
m = ~umath.isfinite(result)
m |= getmask(a)
m |= getmask(b)
# Apply the domain
domain = ufunc_domain.get(self.f, None)
if domain is not None:
m |= domain(da, db)
# Take care of the scalar case first
if (not m.ndim):
if m:
return masked
else:
return result
# When the mask is True, put back da if possible
# any errors, just abort; impossible to guarantee masked values
try:
np.copyto(result, 0, casting='unsafe', where=m)
# avoid using "*" since this may be overlaid
masked_da = umath.multiply(m, da)
# only add back if it can be cast safely
if np.can_cast(masked_da.dtype, result.dtype, casting='safe'):
result += masked_da
except:
pass
# Transforms to a (subclass of) MaskedArray
masked_result = result.view(get_masked_subclass(a, b))
masked_result._mask = m
if isinstance(a, MaskedArray):
masked_result._update_from(a)
elif isinstance(b, MaskedArray):
masked_result._update_from(b)
return masked_result
def __str__(self):
return "Masked version of " + str(self.f)
# Unary ufuncs
exp = _MaskedUnaryOperation(umath.exp)
conjugate = _MaskedUnaryOperation(umath.conjugate)
sin = _MaskedUnaryOperation(umath.sin)
cos = _MaskedUnaryOperation(umath.cos)
tan = _MaskedUnaryOperation(umath.tan)
arctan = _MaskedUnaryOperation(umath.arctan)
arcsinh = _MaskedUnaryOperation(umath.arcsinh)
sinh = _MaskedUnaryOperation(umath.sinh)
cosh = _MaskedUnaryOperation(umath.cosh)
tanh = _MaskedUnaryOperation(umath.tanh)
abs = absolute = _MaskedUnaryOperation(umath.absolute)
angle = _MaskedUnaryOperation(angle) # from numpy.lib.function_base
fabs = _MaskedUnaryOperation(umath.fabs)
negative = _MaskedUnaryOperation(umath.negative)
floor = _MaskedUnaryOperation(umath.floor)
ceil = _MaskedUnaryOperation(umath.ceil)
around = _MaskedUnaryOperation(np.round_)
logical_not = _MaskedUnaryOperation(umath.logical_not)
# Domained unary ufuncs
sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0,
_DomainGreaterEqual(0.0))
log = _MaskedUnaryOperation(umath.log, 1.0,
_DomainGreater(0.0))
log2 = _MaskedUnaryOperation(umath.log2, 1.0,
_DomainGreater(0.0))
log10 = _MaskedUnaryOperation(umath.log10, 1.0,
_DomainGreater(0.0))
tan = _MaskedUnaryOperation(umath.tan, 0.0,
_DomainTan(1e-35))
arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0,
_DomainCheckInterval(-1.0, 1.0))
arccos = _MaskedUnaryOperation(umath.arccos, 0.0,
_DomainCheckInterval(-1.0, 1.0))
arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0,
_DomainGreaterEqual(1.0))
arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0,
_DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15))
# Binary ufuncs
add = _MaskedBinaryOperation(umath.add)
subtract = _MaskedBinaryOperation(umath.subtract)
multiply = _MaskedBinaryOperation(umath.multiply, 1, 1)
arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0)
equal = _MaskedBinaryOperation(umath.equal)
equal.reduce = None
not_equal = _MaskedBinaryOperation(umath.not_equal)
not_equal.reduce = None
less_equal = _MaskedBinaryOperation(umath.less_equal)
less_equal.reduce = None
greater_equal = _MaskedBinaryOperation(umath.greater_equal)
greater_equal.reduce = None
less = _MaskedBinaryOperation(umath.less)
less.reduce = None
greater = _MaskedBinaryOperation(umath.greater)
greater.reduce = None
logical_and = _MaskedBinaryOperation(umath.logical_and)
alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce
logical_or = _MaskedBinaryOperation(umath.logical_or)
sometrue = logical_or.reduce
logical_xor = _MaskedBinaryOperation(umath.logical_xor)
bitwise_and = _MaskedBinaryOperation(umath.bitwise_and)
bitwise_or = _MaskedBinaryOperation(umath.bitwise_or)
bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor)
hypot = _MaskedBinaryOperation(umath.hypot)
# Domained binary ufuncs
divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1)
true_divide = _DomainedBinaryOperation(umath.true_divide,
_DomainSafeDivide(), 0, 1)
floor_divide = _DomainedBinaryOperation(umath.floor_divide,
_DomainSafeDivide(), 0, 1)
remainder = _DomainedBinaryOperation(umath.remainder,
_DomainSafeDivide(), 0, 1)
fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1)
mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1)
###############################################################################
# Mask creation functions #
###############################################################################
def _recursive_make_descr(datatype, newtype=bool_):
"Private function allowing recursion in make_descr."
# Do we have some name fields ?
if datatype.names:
descr = []
for name in datatype.names:
field = datatype.fields[name]
if len(field) == 3:
# Prepend the title to the name
name = (field[-1], name)
descr.append((name, _recursive_make_descr(field[0], newtype)))
return descr
# Is this some kind of composite a la (np.float,2)
elif datatype.subdtype:
mdescr = list(datatype.subdtype)
mdescr[0] = _recursive_make_descr(datatype.subdtype[0], newtype)
return tuple(mdescr)
else:
return newtype
def make_mask_descr(ndtype):
"""
Construct a dtype description list from a given dtype.
Returns a new dtype object, with the type of all fields in `ndtype` to a
boolean type. Field names are not altered.
Parameters
----------
ndtype : dtype
The dtype to convert.
Returns
-------
result : dtype
A dtype that looks like `ndtype`, the type of all fields is boolean.
Examples
--------
>>> import numpy.ma as ma
>>> dtype = np.dtype({'names':['foo', 'bar'],
'formats':[np.float32, np.int]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i4')])
>>> ma.make_mask_descr(dtype)
dtype([('foo', '|b1'), ('bar', '|b1')])
>>> ma.make_mask_descr(np.float32)
<type 'numpy.bool_'>
"""
# Make sure we do have a dtype
if not isinstance(ndtype, np.dtype):
ndtype = np.dtype(ndtype)
return np.dtype(_recursive_make_descr(ndtype, np.bool))
def getmask(a):
"""
Return the mask of a masked array, or nomask.
Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the
mask is not `nomask`, else return `nomask`. To guarantee a full array
of booleans of the same shape as a, use `getmaskarray`.
Parameters
----------
a : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getdata : Return the data of a masked array as an ndarray.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value=999999)
>>> ma.getmask(a)
array([[False, True],
[False, False]], dtype=bool)
Equivalently use the `MaskedArray` `mask` attribute.
>>> a.mask
array([[False, True],
[False, False]], dtype=bool)
Result when mask == `nomask`
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(data =
[[1 2]
[3 4]],
mask =
False,
fill_value=999999)
>>> ma.nomask
False
>>> ma.getmask(b) == ma.nomask
True
>>> b.mask == ma.nomask
True
"""
return getattr(a, '_mask', nomask)
get_mask = getmask
def getmaskarray(arr):
"""
Return the mask of a masked array, or full boolean array of False.
Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and
the mask is not `nomask`, else return a full boolean array of False of
the same shape as `arr`.
Parameters
----------
arr : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getdata : Return the data of a masked array as an ndarray.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value=999999)
>>> ma.getmaskarray(a)
array([[False, True],
[False, False]], dtype=bool)
Result when mask == ``nomask``
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(data =
[[1 2]
[3 4]],
mask =
False,
fill_value=999999)
>>> >ma.getmaskarray(b)
array([[False, False],
[False, False]], dtype=bool)
"""
mask = getmask(arr)
if mask is nomask:
mask = make_mask_none(np.shape(arr), getattr(arr, 'dtype', None))
return mask
def is_mask(m):
"""
Return True if m is a valid, standard mask.
This function does not check the contents of the input, only that the
type is MaskType. In particular, this function returns False if the
mask has a flexible dtype.
Parameters
----------
m : array_like
Array to test.
Returns
-------
result : bool
True if `m.dtype.type` is MaskType, False otherwise.
See Also
--------
isMaskedArray : Test whether input is an instance of MaskedArray.
Examples
--------
>>> import numpy.ma as ma
>>> m = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> m
masked_array(data = [-- 1 -- 2 3],
mask = [ True False True False False],
fill_value=999999)
>>> ma.is_mask(m)
False
>>> ma.is_mask(m.mask)
True
Input must be an ndarray (or have similar attributes)
for it to be considered a valid mask.
>>> m = [False, True, False]
>>> ma.is_mask(m)
False
>>> m = np.array([False, True, False])
>>> m
array([False, True, False], dtype=bool)
>>> ma.is_mask(m)
True
Arrays with complex dtypes don't return True.
>>> dtype = np.dtype({'names':['monty', 'pithon'],
'formats':[np.bool, np.bool]})
>>> dtype
dtype([('monty', '|b1'), ('pithon', '|b1')])
>>> m = np.array([(True, False), (False, True), (True, False)],
dtype=dtype)
>>> m
array([(True, False), (False, True), (True, False)],
dtype=[('monty', '|b1'), ('pithon', '|b1')])
>>> ma.is_mask(m)
False
"""
try:
return m.dtype.type is MaskType
except AttributeError:
return False
def make_mask(m, copy=False, shrink=True, dtype=MaskType):
"""
Create a boolean mask from an array.
Return `m` as a boolean mask, creating a copy if necessary or requested.
The function can accept any sequence that is convertible to integers,
or ``nomask``. Does not require that contents must be 0s and 1s, values
of 0 are interepreted as False, everything else as True.
Parameters
----------
m : array_like
Potential mask.
copy : bool, optional
Whether to return a copy of `m` (True) or `m` itself (False).
shrink : bool, optional
Whether to shrink `m` to ``nomask`` if all its values are False.
dtype : dtype, optional
Data-type of the output mask. By default, the output mask has a
dtype of MaskType (bool). If the dtype is flexible, each field has
a boolean dtype. This is ignored when `m` is ``nomask``, in which
case ``nomask`` is always returned.
Returns
-------
result : ndarray
A boolean mask derived from `m`.
Examples
--------
>>> import numpy.ma as ma
>>> m = [True, False, True, True]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
>>> m = [1, 0, 1, 1]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
>>> m = [1, 0, 2, -3]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
Effect of the `shrink` parameter.
>>> m = np.zeros(4)
>>> m
array([ 0., 0., 0., 0.])
>>> ma.make_mask(m)
False
>>> ma.make_mask(m, shrink=False)
array([False, False, False, False], dtype=bool)
Using a flexible `dtype`.
>>> m = [1, 0, 1, 1]
>>> n = [0, 1, 0, 0]
>>> arr = []
>>> for man, mouse in zip(m, n):
... arr.append((man, mouse))
>>> arr
[(1, 0), (0, 1), (1, 0), (1, 0)]
>>> dtype = np.dtype({'names':['man', 'mouse'],
'formats':[np.int, np.int]})
>>> arr = np.array(arr, dtype=dtype)
>>> arr
array([(1, 0), (0, 1), (1, 0), (1, 0)],
dtype=[('man', '<i4'), ('mouse', '<i4')])
>>> ma.make_mask(arr, dtype=dtype)
array([(True, False), (False, True), (True, False), (True, False)],
dtype=[('man', '|b1'), ('mouse', '|b1')])
"""
if m is nomask:
return nomask
elif isinstance(m, ndarray):
# We won't return after this point to make sure we can shrink the mask
# Fill the mask in case there are missing data
m = filled(m, True)
# Make sure the input dtype is valid
dtype = make_mask_descr(dtype)
if m.dtype == dtype:
if copy:
result = m.copy()
else:
result = m
else:
result = np.array(m, dtype=dtype, copy=copy)
else:
result = np.array(filled(m, True), dtype=MaskType)
# Bas les masques !
if shrink and (not result.dtype.names) and (not result.any()):
return nomask
else:
return result
def make_mask_none(newshape, dtype=None):
"""
Return a boolean mask of the given shape, filled with False.
This function returns a boolean ndarray with all entries False, that can
be used in common mask manipulations. If a complex dtype is specified, the
type of each field is converted to a boolean type.
Parameters
----------
newshape : tuple
A tuple indicating the shape of the mask.
dtype : {None, dtype}, optional
If None, use a MaskType instance. Otherwise, use a new datatype with
the same fields as `dtype`, converted to boolean types.
Returns
-------
result : ndarray
An ndarray of appropriate shape and dtype, filled with False.
See Also
--------
make_mask : Create a boolean mask from an array.
make_mask_descr : Construct a dtype description list from a given dtype.
Examples
--------
>>> import numpy.ma as ma
>>> ma.make_mask_none((3,))
array([False, False, False], dtype=bool)
Defining a more complex dtype.
>>> dtype = np.dtype({'names':['foo', 'bar'],
'formats':[np.float32, np.int]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i4')])
>>> ma.make_mask_none((3,), dtype=dtype)
array([(False, False), (False, False), (False, False)],
dtype=[('foo', '|b1'), ('bar', '|b1')])
"""
if dtype is None:
result = np.zeros(newshape, dtype=MaskType)
else:
result = np.zeros(newshape, dtype=make_mask_descr(dtype))
return result
def mask_or(m1, m2, copy=False, shrink=True):
"""
Combine two masks with the ``logical_or`` operator.
The result may be a view on `m1` or `m2` if the other is `nomask`
(i.e. False).
Parameters
----------
m1, m2 : array_like
Input masks.
copy : bool, optional
If copy is False and one of the inputs is `nomask`, return a view
of the other input mask. Defaults to False.
shrink : bool, optional
Whether to shrink the output to `nomask` if all its values are
False. Defaults to True.
Returns
-------
mask : output mask
The result masks values that are masked in either `m1` or `m2`.
Raises
------
ValueError
If `m1` and `m2` have different flexible dtypes.
Examples
--------
>>> m1 = np.ma.make_mask([0, 1, 1, 0])
>>> m2 = np.ma.make_mask([1, 0, 0, 0])
>>> np.ma.mask_or(m1, m2)
array([ True, True, True, False], dtype=bool)
"""
def _recursive_mask_or(m1, m2, newmask):
names = m1.dtype.names
for name in names:
current1 = m1[name]
if current1.dtype.names:
_recursive_mask_or(current1, m2[name], newmask[name])
else:
umath.logical_or(current1, m2[name], newmask[name])
return
if (m1 is nomask) or (m1 is False):
dtype = getattr(m2, 'dtype', MaskType)
return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype)
if (m2 is nomask) or (m2 is False):
dtype = getattr(m1, 'dtype', MaskType)
return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype)
if m1 is m2 and is_mask(m1):
return m1
(dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None))
if (dtype1 != dtype2):
raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2))
if dtype1.names:
newmask = np.empty_like(m1)
_recursive_mask_or(m1, m2, newmask)
return newmask
return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink)
def flatten_mask(mask):
"""
Returns a completely flattened version of the mask, where nested fields
are collapsed.
Parameters
----------
mask : array_like
Input array, which will be interpreted as booleans.
Returns
-------
flattened_mask : ndarray of bools
The flattened input.
Examples
--------
>>> mask = np.array([0, 0, 1], dtype=np.bool)
>>> flatten_mask(mask)
array([False, False, True], dtype=bool)
>>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
>>> flatten_mask(mask)
array([False, False, False, True], dtype=bool)
>>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
>>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype)
>>> flatten_mask(mask)
array([False, False, False, False, False, True], dtype=bool)
"""
def _flatmask(mask):
"Flatten the mask and returns a (maybe nested) sequence of booleans."
mnames = mask.dtype.names
if mnames:
return [flatten_mask(mask[name]) for name in mnames]
else:
return mask
def _flatsequence(sequence):
"Generates a flattened version of the sequence."
try:
for element in sequence:
if hasattr(element, '__iter__'):
for f in _flatsequence(element):
yield f
else:
yield element
except TypeError:
yield sequence
mask = np.asarray(mask)
flattened = _flatsequence(_flatmask(mask))
return np.array([_ for _ in flattened], dtype=bool)
def _check_mask_axis(mask, axis, keepdims=np._NoValue):
"Check whether there are masked values along the given axis"
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
if mask is not nomask:
return mask.all(axis=axis, **kwargs)
return nomask
###############################################################################
# Masking functions #
###############################################################################
def masked_where(condition, a, copy=True):
"""
Mask an array where a condition is met.
Return `a` as an array masked where `condition` is True.
Any masked values of `a` or `condition` are also masked in the output.
Parameters
----------
condition : array_like
Masking condition. When `condition` tests floating point values for
equality, consider using ``masked_values`` instead.
a : array_like
Array to mask.
copy : bool
If True (default) make a copy of `a` in the result. If False modify
`a` in place and return a view.
Returns
-------
result : MaskedArray
The result of masking `a` where `condition` is True.
See Also
--------
masked_values : Mask using floating point equality.
masked_equal : Mask where equal to a given value.
masked_not_equal : Mask where `not` equal to a given value.
masked_less_equal : Mask where less than or equal to a given value.
masked_greater_equal : Mask where greater than or equal to a given value.
masked_less : Mask where less than a given value.
masked_greater : Mask where greater than a given value.
masked_inside : Mask inside a given interval.
masked_outside : Mask outside a given interval.
masked_invalid : Mask invalid values (NaNs or infs).
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_where(a <= 2, a)
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
Mask array `b` conditional on `a`.
>>> b = ['a', 'b', 'c', 'd']
>>> ma.masked_where(a == 2, b)
masked_array(data = [a b -- d],
mask = [False False True False],
fill_value=N/A)
Effect of the `copy` argument.
>>> c = ma.masked_where(a <= 2, a)
>>> c
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
>>> c[0] = 99
>>> c
masked_array(data = [99 -- -- 3],
mask = [False True True False],
fill_value=999999)
>>> a
array([0, 1, 2, 3])
>>> c = ma.masked_where(a <= 2, a, copy=False)
>>> c[0] = 99
>>> c
masked_array(data = [99 -- -- 3],
mask = [False True True False],
fill_value=999999)
>>> a
array([99, 1, 2, 3])
When `condition` or `a` contain masked values.
>>> a = np.arange(4)
>>> a = ma.masked_where(a == 2, a)
>>> a
masked_array(data = [0 1 -- 3],
mask = [False False True False],
fill_value=999999)
>>> b = np.arange(4)
>>> b = ma.masked_where(b == 0, b)
>>> b
masked_array(data = [-- 1 2 3],
mask = [ True False False False],
fill_value=999999)
>>> ma.masked_where(a == 3, b)
masked_array(data = [-- 1 -- --],
mask = [ True False True True],
fill_value=999999)
"""
# Make sure that condition is a valid standard-type mask.
cond = make_mask(condition)
a = np.array(a, copy=copy, subok=True)
(cshape, ashape) = (cond.shape, a.shape)
if cshape and cshape != ashape:
raise IndexError("Inconsistant shape between the condition and the input"
" (got %s and %s)" % (cshape, ashape))
if hasattr(a, '_mask'):
cond = mask_or(cond, a._mask)
cls = type(a)
else:
cls = MaskedArray
result = a.view(cls)
# Assign to *.mask so that structured masks are handled correctly.
result.mask = cond
return result
def masked_greater(x, value, copy=True):
"""
Mask an array where greater than a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x > value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater(a, 2)
masked_array(data = [0 1 2 --],
mask = [False False False True],
fill_value=999999)
"""
return masked_where(greater(x, value), x, copy=copy)
def masked_greater_equal(x, value, copy=True):
"""
Mask an array where greater than or equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x >= value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater_equal(a, 2)
masked_array(data = [0 1 -- --],
mask = [False False True True],
fill_value=999999)
"""
return masked_where(greater_equal(x, value), x, copy=copy)
def masked_less(x, value, copy=True):
"""
Mask an array where less than a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x < value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_less(a, 2)
masked_array(data = [-- -- 2 3],
mask = [ True True False False],
fill_value=999999)
"""
return masked_where(less(x, value), x, copy=copy)
def masked_less_equal(x, value, copy=True):
"""
Mask an array where less than or equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x <= value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_less_equal(a, 2)
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
"""
return masked_where(less_equal(x, value), x, copy=copy)
def masked_not_equal(x, value, copy=True):
"""
Mask an array where `not` equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x != value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_not_equal(a, 2)
masked_array(data = [-- -- 2 --],
mask = [ True True False True],
fill_value=999999)
"""
return masked_where(not_equal(x, value), x, copy=copy)
def masked_equal(x, value, copy=True):
"""
Mask an array where equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x == value). For floating point arrays,
consider using ``masked_values(x, value)``.
See Also
--------
masked_where : Mask where a condition is met.
masked_values : Mask using floating point equality.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_equal(a, 2)
masked_array(data = [0 1 -- 3],
mask = [False False True False],
fill_value=999999)
"""
output = masked_where(equal(x, value), x, copy=copy)
output.fill_value = value
return output
def masked_inside(x, v1, v2, copy=True):
"""
Mask an array inside a given interval.
Shortcut to ``masked_where``, where `condition` is True for `x` inside
the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2`
can be given in either order.
See Also
--------
masked_where : Mask where a condition is met.
Notes
-----
The array `x` is prefilled with its filling value.
Examples
--------
>>> import numpy.ma as ma
>>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
>>> ma.masked_inside(x, -0.3, 0.3)
masked_array(data = [0.31 1.2 -- -- -0.4 -1.1],
mask = [False False True True False False],
fill_value=1e+20)
The order of `v1` and `v2` doesn't matter.
>>> ma.masked_inside(x, 0.3, -0.3)
masked_array(data = [0.31 1.2 -- -- -0.4 -1.1],
mask = [False False True True False False],
fill_value=1e+20)
"""
if v2 < v1:
(v1, v2) = (v2, v1)
xf = filled(x)
condition = (xf >= v1) & (xf <= v2)
return masked_where(condition, x, copy=copy)
def masked_outside(x, v1, v2, copy=True):
"""
Mask an array outside a given interval.
Shortcut to ``masked_where``, where `condition` is True for `x` outside
the interval [v1,v2] (x < v1)|(x > v2).
The boundaries `v1` and `v2` can be given in either order.
See Also
--------
masked_where : Mask where a condition is met.
Notes
-----
The array `x` is prefilled with its filling value.
Examples
--------
>>> import numpy.ma as ma
>>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
>>> ma.masked_outside(x, -0.3, 0.3)
masked_array(data = [-- -- 0.01 0.2 -- --],
mask = [ True True False False True True],
fill_value=1e+20)
The order of `v1` and `v2` doesn't matter.
>>> ma.masked_outside(x, 0.3, -0.3)
masked_array(data = [-- -- 0.01 0.2 -- --],
mask = [ True True False False True True],
fill_value=1e+20)
"""
if v2 < v1:
(v1, v2) = (v2, v1)
xf = filled(x)
condition = (xf < v1) | (xf > v2)
return masked_where(condition, x, copy=copy)
def masked_object(x, value, copy=True, shrink=True):
"""
Mask the array `x` where the data are exactly equal to value.
This function is similar to `masked_values`, but only suitable
for object arrays: for floating point, use `masked_values` instead.
Parameters
----------
x : array_like
Array to mask
value : object
Comparison value
copy : {True, False}, optional
Whether to return a copy of `x`.
shrink : {True, False}, optional
Whether to collapse a mask full of False to nomask
Returns
-------
result : MaskedArray
The result of masking `x` where equal to `value`.
See Also
--------
masked_where : Mask where a condition is met.
masked_equal : Mask where equal to a given value (integers).
masked_values : Mask using floating point equality.
Examples
--------
>>> import numpy.ma as ma
>>> food = np.array(['green_eggs', 'ham'], dtype=object)
>>> # don't eat spoiled food
>>> eat = ma.masked_object(food, 'green_eggs')
>>> print(eat)
[-- ham]
>>> # plain ol` ham is boring
>>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object)
>>> eat = ma.masked_object(fresh_food, 'green_eggs')
>>> print(eat)
[cheese ham pineapple]
Note that `mask` is set to ``nomask`` if possible.
>>> eat
masked_array(data = [cheese ham pineapple],
mask = False,
fill_value=?)
"""
if isMaskedArray(x):
condition = umath.equal(x._data, value)
mask = x._mask
else:
condition = umath.equal(np.asarray(x), value)
mask = nomask
mask = mask_or(mask, make_mask(condition, shrink=shrink))
return masked_array(x, mask=mask, copy=copy, fill_value=value)
def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True):
"""
Mask using floating point equality.
Return a MaskedArray, masked where the data in array `x` are approximately
equal to `value`, i.e. where the following condition is True
(abs(x - value) <= atol+rtol*abs(value))
The fill_value is set to `value` and the mask is set to ``nomask`` if
possible. For integers, consider using ``masked_equal``.
Parameters
----------
x : array_like
Array to mask.
value : float
Masking value.
rtol : float, optional
Tolerance parameter.
atol : float, optional
Tolerance parameter (1e-8).
copy : bool, optional
Whether to return a copy of `x`.
shrink : bool, optional
Whether to collapse a mask full of False to ``nomask``.
Returns
-------
result : MaskedArray
The result of masking `x` where approximately equal to `value`.
See Also
--------
masked_where : Mask where a condition is met.
masked_equal : Mask where equal to a given value (integers).
Examples
--------
>>> import numpy.ma as ma
>>> x = np.array([1, 1.1, 2, 1.1, 3])
>>> ma.masked_values(x, 1.1)
masked_array(data = [1.0 -- 2.0 -- 3.0],
mask = [False True False True False],
fill_value=1.1)
Note that `mask` is set to ``nomask`` if possible.
>>> ma.masked_values(x, 1.5)
masked_array(data = [ 1. 1.1 2. 1.1 3. ],
mask = False,
fill_value=1.5)
For integers, the fill value will be different in general to the
result of ``masked_equal``.
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> ma.masked_values(x, 2)
masked_array(data = [0 1 -- 3 4],
mask = [False False True False False],
fill_value=2)
>>> ma.masked_equal(x, 2)
masked_array(data = [0 1 -- 3 4],
mask = [False False True False False],
fill_value=999999)
"""
mabs = umath.absolute
xnew = filled(x, value)
if issubclass(xnew.dtype.type, np.floating):
condition = umath.less_equal(
mabs(xnew - value), atol + rtol * mabs(value))
mask = getattr(x, '_mask', nomask)
else:
condition = umath.equal(xnew, value)
mask = nomask
mask = mask_or(mask, make_mask(condition, shrink=shrink), shrink=shrink)
return masked_array(xnew, mask=mask, copy=copy, fill_value=value)
def masked_invalid(a, copy=True):
"""
Mask an array where invalid values occur (NaNs or infs).
This function is a shortcut to ``masked_where``, with
`condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved.
Only applies to arrays with a dtype where NaNs or infs make sense
(i.e. floating point types), but accepts any array_like object.
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(5, dtype=np.float)
>>> a[2] = np.NaN
>>> a[3] = np.PINF
>>> a
array([ 0., 1., NaN, Inf, 4.])
>>> ma.masked_invalid(a)
masked_array(data = [0.0 1.0 -- -- 4.0],
mask = [False False True True False],
fill_value=1e+20)
"""
a = np.array(a, copy=copy, subok=True)
mask = getattr(a, '_mask', None)
if mask is not None:
condition = ~(np.isfinite(getdata(a)))
if mask is not nomask:
condition |= mask
cls = type(a)
else:
condition = ~(np.isfinite(a))
cls = MaskedArray
result = a.view(cls)
result._mask = condition
return result
###############################################################################
# Printing options #
###############################################################################
class _MaskedPrintOption:
"""
Handle the string used to represent missing data in a masked array.
"""
def __init__(self, display):
"""
Create the masked_print_option object.
"""
self._display = display
self._enabled = True
def display(self):
"""
Display the string to print for masked values.
"""
return self._display
def set_display(self, s):
"""
Set the string to print for masked values.
"""
self._display = s
def enabled(self):
"""
Is the use of the display value enabled?
"""
return self._enabled
def enable(self, shrink=1):
"""
Set the enabling shrink to `shrink`.
"""
self._enabled = shrink
def __str__(self):
return str(self._display)
__repr__ = __str__
# if you single index into a masked location you get this object.
masked_print_option = _MaskedPrintOption('--')
def _recursive_printoption(result, mask, printopt):
"""
Puts printoptions in result where mask is True.
Private function allowing for recursion
"""
names = result.dtype.names
for name in names:
(curdata, curmask) = (result[name], mask[name])
if curdata.dtype.names:
_recursive_printoption(curdata, curmask, printopt)
else:
np.copyto(curdata, printopt, where=curmask)
return
_print_templates = dict(long_std="""\
masked_%(name)s(data =
%(data)s,
%(nlen)s mask =
%(mask)s,
%(nlen)s fill_value = %(fill)s)
""",
short_std="""\
masked_%(name)s(data = %(data)s,
%(nlen)s mask = %(mask)s,
%(nlen)s fill_value = %(fill)s)
""",
long_flx="""\
masked_%(name)s(data =
%(data)s,
%(nlen)s mask =
%(mask)s,
%(nlen)s fill_value = %(fill)s,
%(nlen)s dtype = %(dtype)s)
""",
short_flx="""\
masked_%(name)s(data = %(data)s,
%(nlen)s mask = %(mask)s,
%(nlen)s fill_value = %(fill)s,
%(nlen)s dtype = %(dtype)s)
""")
###############################################################################
# MaskedArray class #
###############################################################################
def _recursive_filled(a, mask, fill_value):
"""
Recursively fill `a` with `fill_value`.
"""
names = a.dtype.names
for name in names:
current = a[name]
if current.dtype.names:
_recursive_filled(current, mask[name], fill_value[name])
else:
np.copyto(current, fill_value[name], where=mask[name])
def flatten_structured_array(a):
"""
Flatten a structured array.
The data type of the output is chosen such that it can represent all of the
(nested) fields.
Parameters
----------
a : structured array
Returns
-------
output : masked array or ndarray
A flattened masked array if the input is a masked array, otherwise a
standard ndarray.
Examples
--------
>>> ndtype = [('a', int), ('b', float)]
>>> a = np.array([(1, 1), (2, 2)], dtype=ndtype)
>>> flatten_structured_array(a)
array([[1., 1.],
[2., 2.]])
"""
def flatten_sequence(iterable):
"""
Flattens a compound of nested iterables.
"""
for elm in iter(iterable):
if hasattr(elm, '__iter__'):
for f in flatten_sequence(elm):
yield f
else:
yield elm
a = np.asanyarray(a)
inishape = a.shape
a = a.ravel()
if isinstance(a, MaskedArray):
out = np.array([tuple(flatten_sequence(d.item())) for d in a._data])
out = out.view(MaskedArray)
out._mask = np.array([tuple(flatten_sequence(d.item()))
for d in getmaskarray(a)])
else:
out = np.array([tuple(flatten_sequence(d.item())) for d in a])
if len(inishape) > 1:
newshape = list(out.shape)
newshape[0] = inishape
out.shape = tuple(flatten_sequence(newshape))
return out
def _arraymethod(funcname, onmask=True):
"""
Return a class method wrapper around a basic array method.
Creates a class method which returns a masked array, where the new
``_data`` array is the output of the corresponding basic method called
on the original ``_data``.
If `onmask` is True, the new mask is the output of the method called
on the initial mask. Otherwise, the new mask is just a reference
to the initial mask.
Parameters
----------
funcname : str
Name of the function to apply on data.
onmask : bool
Whether the mask must be processed also (True) or left
alone (False). Default is True. Make available as `_onmask`
attribute.
Returns
-------
method : instancemethod
Class method wrapper of the specified basic array method.
"""
def wrapped_method(self, *args, **params):
result = getattr(self._data, funcname)(*args, **params)
result = result.view(type(self))
result._update_from(self)
mask = self._mask
if result.ndim:
if not onmask:
result.__setmask__(mask)
elif mask is not nomask:
result.__setmask__(getattr(mask, funcname)(*args, **params))
else:
if mask.ndim and (not mask.dtype.names and mask.all()):
return masked
return result
methdoc = getattr(ndarray, funcname, None) or getattr(np, funcname, None)
if methdoc is not None:
wrapped_method.__doc__ = methdoc.__doc__
wrapped_method.__name__ = funcname
return wrapped_method
class MaskedIterator(object):
"""
Flat iterator object to iterate over masked arrays.
A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array
`x`. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
MaskedArray.flat : Return a flat iterator over an array.
MaskedArray.flatten : Returns a flattened copy of an array.
Notes
-----
`MaskedIterator` is not exported by the `ma` module. Instead of
instantiating a `MaskedIterator` directly, use `MaskedArray.flat`.
Examples
--------
>>> x = np.ma.array(arange(6).reshape(2, 3))
>>> fl = x.flat
>>> type(fl)
<class 'numpy.ma.core.MaskedIterator'>
>>> for item in fl:
... print(item)
...
0
1
2
3
4
5
Extracting more than a single element b indexing the `MaskedIterator`
returns a masked array:
>>> fl[2:4]
masked_array(data = [2 3],
mask = False,
fill_value = 999999)
"""
def __init__(self, ma):
self.ma = ma
self.dataiter = ma._data.flat
if ma._mask is nomask:
self.maskiter = None
else:
self.maskiter = ma._mask.flat
def __iter__(self):
return self
def __getitem__(self, indx):
result = self.dataiter.__getitem__(indx).view(type(self.ma))
if self.maskiter is not None:
_mask = self.maskiter.__getitem__(indx)
if isinstance(_mask, ndarray):
# set shape to match that of data; this is needed for matrices
_mask.shape = result.shape
result._mask = _mask
elif isinstance(_mask, np.void):
return mvoid(result, mask=_mask, hardmask=self.ma._hardmask)
elif _mask: # Just a scalar, masked
return masked
return result
# This won't work if ravel makes a copy
def __setitem__(self, index, value):
self.dataiter[index] = getdata(value)
if self.maskiter is not None:
self.maskiter[index] = getmaskarray(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
Examples
--------
>>> x = np.ma.array([3, 2], mask=[0, 1])
>>> fl = x.flat
>>> fl.next()
3
>>> fl.next()
masked_array(data = --,
mask = True,
fill_value = 1e+20)
>>> fl.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/ralf/python/numpy/numpy/ma/core.py", line 2243, in next
d = self.dataiter.next()
StopIteration
"""
d = next(self.dataiter)
if self.maskiter is not None:
m = next(self.maskiter)
if isinstance(m, np.void):
return mvoid(d, mask=m, hardmask=self.ma._hardmask)
elif m: # Just a scalar, masked
return masked
return d
next = __next__
class MaskedArray(ndarray):
"""
An array class with possibly masked values.
Masked values of True exclude the corresponding element from any
computation.
Construction::
x = MaskedArray(data, mask=nomask, dtype=None, copy=False, subok=True,
ndmin=0, fill_value=None, keep_mask=True, hard_mask=None,
shrink=True, order=None)
Parameters
----------
data : array_like
Input data.
mask : sequence, optional
Mask. Must be convertible to an array of booleans with the same
shape as `data`. True indicates a masked (i.e. invalid) data.
dtype : dtype, optional
Data type of the output.
If `dtype` is None, the type of the data argument (``data.dtype``)
is used. If `dtype` is not None and different from ``data.dtype``,
a copy is performed.
copy : bool, optional
Whether to copy the input data (True), or to use a reference instead.
Default is False.
subok : bool, optional
Whether to return a subclass of `MaskedArray` if possible (True) or a
plain `MaskedArray`. Default is True.
ndmin : int, optional
Minimum number of dimensions. Default is 0.
fill_value : scalar, optional
Value used to fill in the masked values when necessary.
If None, a default based on the data-type is used.
keep_mask : bool, optional
Whether to combine `mask` with the mask of the input data, if any
(True), or to use only `mask` for the output (False). Default is True.
hard_mask : bool, optional
Whether to use a hard mask or not. With a hard mask, masked values
cannot be unmasked. Default is False.
shrink : bool, optional
Whether to force compression of an empty mask. Default is True.
order : {'C', 'F', 'A'}, optional
Specify the order of the array. If order is 'C', then the array
will be in C-contiguous order (last-index varies the fastest).
If order is 'F', then the returned array will be in
Fortran-contiguous order (first-index varies the fastest).
If order is 'A' (default), then the returned array may be
in any order (either C-, Fortran-contiguous, or even discontiguous),
unless a copy is required, in which case it will be C-contiguous.
"""
__array_priority__ = 15
_defaultmask = nomask
_defaulthardmask = False
_baseclass = ndarray
# Maximum number of elements per axis used when printing an array. The
# 1d case is handled separately because we need more values in this case.
_print_width = 100
_print_width_1d = 1500
def __new__(cls, data=None, mask=nomask, dtype=None, copy=False,
subok=True, ndmin=0, fill_value=None, keep_mask=True,
hard_mask=None, shrink=True, order=None, **options):
"""
Create a new masked array from scratch.
Notes
-----
A masked array can also be created by taking a .view(MaskedArray).
"""
# Process data.
_data = np.array(data, dtype=dtype, copy=copy,
order=order, subok=True, ndmin=ndmin)
_baseclass = getattr(data, '_baseclass', type(_data))
# Check that we're not erasing the mask.
if isinstance(data, MaskedArray) and (data.shape != _data.shape):
copy = True
# Careful, cls might not always be MaskedArray.
if not isinstance(data, cls) or not subok:
_data = ndarray.view(_data, cls)
else:
_data = ndarray.view(_data, type(data))
# Backwards compatibility w/ numpy.core.ma.
if hasattr(data, '_mask') and not isinstance(data, ndarray):
_data._mask = data._mask
# FIXME _sharedmask is never used.
_sharedmask = True
# Process mask.
# Number of named fields (or zero if none)
names_ = _data.dtype.names or ()
# Type of the mask
if names_:
mdtype = make_mask_descr(_data.dtype)
else:
mdtype = MaskType
if mask is nomask:
# Case 1. : no mask in input.
# Erase the current mask ?
if not keep_mask:
# With a reduced version
if shrink:
_data._mask = nomask
# With full version
else:
_data._mask = np.zeros(_data.shape, dtype=mdtype)
# Check whether we missed something
elif isinstance(data, (tuple, list)):
try:
# If data is a sequence of masked array
mask = np.array([getmaskarray(m) for m in data],
dtype=mdtype)
except ValueError:
# If data is nested
mask = nomask
# Force shrinking of the mask if needed (and possible)
if (mdtype == MaskType) and mask.any():
_data._mask = mask
_data._sharedmask = False
else:
if copy:
_data._mask = _data._mask.copy()
_data._sharedmask = False
# Reset the shape of the original mask
if getmask(data) is not nomask:
data._mask.shape = data.shape
else:
_data._sharedmask = True
else:
# Case 2. : With a mask in input.
# If mask is boolean, create an array of True or False
if mask is True and mdtype == MaskType:
mask = np.ones(_data.shape, dtype=mdtype)
elif mask is False and mdtype == MaskType:
mask = np.zeros(_data.shape, dtype=mdtype)
else:
# Read the mask with the current mdtype
try:
mask = np.array(mask, copy=copy, dtype=mdtype)
# Or assume it's a sequence of bool/int
except TypeError:
mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
# Make sure the mask and the data have the same shape
if mask.shape != _data.shape:
(nd, nm) = (_data.size, mask.size)
if nm == 1:
mask = np.resize(mask, _data.shape)
elif nm == nd:
mask = np.reshape(mask, _data.shape)
else:
msg = "Mask and data not compatible: data size is %i, " + \
"mask size is %i."
raise MaskError(msg % (nd, nm))
copy = True
# Set the mask to the new value
if _data._mask is nomask:
_data._mask = mask
_data._sharedmask = not copy
else:
if not keep_mask:
_data._mask = mask
_data._sharedmask = not copy
else:
if names_:
def _recursive_or(a, b):
"do a|=b on each field of a, recursively"
for name in a.dtype.names:
(af, bf) = (a[name], b[name])
if af.dtype.names:
_recursive_or(af, bf)
else:
af |= bf
return
_recursive_or(_data._mask, mask)
else:
_data._mask = np.logical_or(mask, _data._mask)
_data._sharedmask = False
# Update fill_value.
if fill_value is None:
fill_value = getattr(data, '_fill_value', None)
# But don't run the check unless we have something to check.
if fill_value is not None:
_data._fill_value = _check_fill_value(fill_value, _data.dtype)
# Process extra options ..
if hard_mask is None:
_data._hardmask = getattr(data, '_hardmask', False)
else:
_data._hardmask = hard_mask
_data._baseclass = _baseclass
return _data
def _update_from(self, obj):
"""
Copies some attributes of obj to self.
"""
if obj is not None and isinstance(obj, ndarray):
_baseclass = type(obj)
else:
_baseclass = ndarray
# We need to copy the _basedict to avoid backward propagation
_optinfo = {}
_optinfo.update(getattr(obj, '_optinfo', {}))
_optinfo.update(getattr(obj, '_basedict', {}))
if not isinstance(obj, MaskedArray):
_optinfo.update(getattr(obj, '__dict__', {}))
_dict = dict(_fill_value=getattr(obj, '_fill_value', None),
_hardmask=getattr(obj, '_hardmask', False),
_sharedmask=getattr(obj, '_sharedmask', False),
_isfield=getattr(obj, '_isfield', False),
_baseclass=getattr(obj, '_baseclass', _baseclass),
_optinfo=_optinfo,
_basedict=_optinfo)
self.__dict__.update(_dict)
self.__dict__.update(_optinfo)
return
def __array_finalize__(self, obj):
"""
Finalizes the masked array.
"""
# Get main attributes.
self._update_from(obj)
# We have to decide how to initialize self.mask, based on
# obj.mask. This is very difficult. There might be some
# correspondence between the elements in the array we are being
# created from (= obj) and us. Or there might not. This method can
# be called in all kinds of places for all kinds of reasons -- could
# be empty_like, could be slicing, could be a ufunc, could be a view.
# The numpy subclassing interface simply doesn't give us any way
# to know, which means that at best this method will be based on
# guesswork and heuristics. To make things worse, there isn't even any
# clear consensus about what the desired behavior is. For instance,
# most users think that np.empty_like(marr) -- which goes via this
# method -- should return a masked array with an empty mask (see
# gh-3404 and linked discussions), but others disagree, and they have
# existing code which depends on empty_like returning an array that
# matches the input mask.
#
# Historically our algorithm was: if the template object mask had the
# same *number of elements* as us, then we used *it's mask object
# itself* as our mask, so that writes to us would also write to the
# original array. This is horribly broken in multiple ways.
#
# Now what we do instead is, if the template object mask has the same
# number of elements as us, and we do not have the same base pointer
# as the template object (b/c views like arr[...] should keep the same
# mask), then we make a copy of the template object mask and use
# that. This is also horribly broken but somewhat less so. Maybe.
if isinstance(obj, ndarray):
# XX: This looks like a bug -- shouldn't it check self.dtype
# instead?
if obj.dtype.names:
_mask = getattr(obj, '_mask',
make_mask_none(obj.shape, obj.dtype))
else:
_mask = getattr(obj, '_mask', nomask)
# If self and obj point to exactly the same data, then probably
# self is a simple view of obj (e.g., self = obj[...]), so they
# should share the same mask. (This isn't 100% reliable, e.g. self
# could be the first row of obj, or have strange strides, but as a
# heuristic it's not bad.) In all other cases, we make a copy of
# the mask, so that future modifications to 'self' do not end up
# side-effecting 'obj' as well.
if (obj.__array_interface__["data"][0]
!= self.__array_interface__["data"][0]):
_mask = _mask.copy()
else:
_mask = nomask
self._mask = _mask
# Finalize the mask
if self._mask is not nomask:
try:
self._mask.shape = self.shape
except ValueError:
self._mask = nomask
except (TypeError, AttributeError):
# When _mask.shape is not writable (because it's a void)
pass
# Finalize the fill_value for structured arrays
if self.dtype.names:
if self._fill_value is None:
self._fill_value = _check_fill_value(None, self.dtype)
return
def __array_wrap__(self, obj, context=None):
"""
Special hook for ufuncs.
Wraps the numpy array and sets the mask according to context.
"""
result = obj.view(type(self))
result._update_from(self)
if context is not None:
result._mask = result._mask.copy()
(func, args, _) = context
m = reduce(mask_or, [getmaskarray(arg) for arg in args])
# Get the domain mask
domain = ufunc_domain.get(func, None)
if domain is not None:
# Take the domain, and make sure it's a ndarray
if len(args) > 2:
with np.errstate(divide='ignore', invalid='ignore'):
d = filled(reduce(domain, args), True)
else:
with np.errstate(divide='ignore', invalid='ignore'):
d = filled(domain(*args), True)
if d.any():
# Fill the result where the domain is wrong
try:
# Binary domain: take the last value
fill_value = ufunc_fills[func][-1]
except TypeError:
# Unary domain: just use this one
fill_value = ufunc_fills[func]
except KeyError:
# Domain not recognized, use fill_value instead
fill_value = self.fill_value
result = result.copy()
np.copyto(result, fill_value, where=d)
# Update the mask
if m is nomask:
m = d
else:
# Don't modify inplace, we risk back-propagation
m = (m | d)
# Make sure the mask has the proper size
if result.shape == () and m:
return masked
else:
result._mask = m
result._sharedmask = False
return result
def view(self, dtype=None, type=None, fill_value=None):
"""
Return a view of the MaskedArray data
Parameters
----------
dtype : data-type or ndarray sub-class, optional
Data-type descriptor of the returned view, e.g., float32 or int16.
The default, None, results in the view having the same data-type
as `a`. As with ``ndarray.view``, dtype can also be specified as
an ndarray sub-class, which then specifies the type of the
returned object (this is equivalent to setting the ``type``
parameter).
type : Python type, optional
Type of the returned view, e.g., ndarray or matrix. Again, the
default None results in type preservation.
Notes
-----
``a.view()`` is used two different ways:
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
of the array's memory with a different data-type. This can cause a
reinterpretation of the bytes of memory.
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
returns an instance of `ndarray_subclass` that looks at the same array
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
If `fill_value` is not specified, but `dtype` is specified (and is not
an ndarray sub-class), the `fill_value` of the MaskedArray will be
reset. If neither `fill_value` nor `dtype` are specified (or if
`dtype` is an ndarray sub-class), then the fill value is preserved.
Finally, if `fill_value` is specified, but `dtype` is not, the fill
value is set to the specified value.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
bytes per entry than the previous dtype (for example, converting a
regular array to a structured array), then the behavior of the view
cannot be predicted just from the superficial appearance of ``a`` (shown
by ``print(a)``). It also depends on exactly how ``a`` is stored in
memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
defined as a slice or transpose, etc., the view may give different
results.
"""
if dtype is None:
if type is None:
output = ndarray.view(self)
else:
output = ndarray.view(self, type)
elif type is None:
try:
if issubclass(dtype, ndarray):
output = ndarray.view(self, dtype)
dtype = None
else:
output = ndarray.view(self, dtype)
except TypeError:
output = ndarray.view(self, dtype)
else:
output = ndarray.view(self, dtype, type)
# also make the mask be a view (so attr changes to the view's
# mask do no affect original object's mask)
# (especially important to avoid affecting np.masked singleton)
if (getattr(output, '_mask', nomask) is not nomask):
output._mask = output._mask.view()
# Make sure to reset the _fill_value if needed
if getattr(output, '_fill_value', None) is not None:
if fill_value is None:
if dtype is None:
pass # leave _fill_value as is
else:
output._fill_value = None
else:
output.fill_value = fill_value
return output
view.__doc__ = ndarray.view.__doc__
def astype(self, newtype):
"""
Returns a copy of the MaskedArray cast to given newtype.
Returns
-------
output : MaskedArray
A copy of self cast to input newtype.
The returned record shape matches self.shape.
Examples
--------
>>> x = np.ma.array([[1,2,3.1],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print(x)
[[1.0 -- 3.1]
[-- 5.0 --]
[7.0 -- 9.0]]
>>> print(x.astype(int32))
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
"""
newtype = np.dtype(newtype)
output = self._data.astype(newtype).view(type(self))
output._update_from(self)
names = output.dtype.names
if names is None:
output._mask = self._mask.astype(bool)
else:
if self._mask is nomask:
output._mask = nomask
else:
output._mask = self._mask.astype([(n, bool) for n in names])
# Don't check _fill_value if it's None, that'll speed things up
if self._fill_value is not None:
output._fill_value = _check_fill_value(self._fill_value, newtype)
return output
def __getitem__(self, indx):
"""
x.__getitem__(y) <==> x[y]
Return the item described by i, as a masked array.
"""
dout = self.data[indx]
# We could directly use ndarray.__getitem__ on self.
# But then we would have to modify __array_finalize__ to prevent the
# mask of being reshaped if it hasn't been set up properly yet
# So it's easier to stick to the current version
_mask = self._mask
# Did we extract a single item?
if not getattr(dout, 'ndim', False):
# A record
if isinstance(dout, np.void):
mask = _mask[indx]
# We should always re-cast to mvoid, otherwise users can
# change masks on rows that already have masked values, but not
# on rows that have no masked values, which is inconsistent.
dout = mvoid(dout, mask=mask, hardmask=self._hardmask)
# Just a scalar
elif _mask is not nomask and _mask[indx]:
return masked
elif self.dtype.type is np.object_ and self.dtype is not dout.dtype:
# self contains an object array of arrays (yes, that happens).
# If masked, turn into a MaskedArray, with everything masked.
if _mask is not nomask and _mask[indx]:
return MaskedArray(dout, mask=True)
else:
# Force dout to MA
dout = dout.view(type(self))
# Inherit attributes from self
dout._update_from(self)
# Check the fill_value
if isinstance(indx, basestring):
if self._fill_value is not None:
dout._fill_value = self._fill_value[indx]
# If we're indexing a multidimensional field in a
# structured array (such as dtype("(2,)i2,(2,)i1")),
# dimensionality goes up (M[field].ndim == M.ndim +
# len(M.dtype[field].shape)). That's fine for
# M[field] but problematic for M[field].fill_value
# which should have shape () to avoid breaking several
# methods. There is no great way out, so set to
# first element. See issue #6723.
if dout._fill_value.ndim > 0:
if not (dout._fill_value ==
dout._fill_value.flat[0]).all():
warnings.warn(
"Upon accessing multidimensional field "
"{indx:s}, need to keep dimensionality "
"of fill_value at 0. Discarding "
"heterogeneous fill_value and setting "
"all to {fv!s}.".format(indx=indx,
fv=dout._fill_value[0]),
stacklevel=2)
dout._fill_value = dout._fill_value.flat[0]
dout._isfield = True
# Update the mask if needed
if _mask is not nomask:
dout._mask = _mask[indx]
# set shape to match that of data; this is needed for matrices
dout._mask.shape = dout.shape
dout._sharedmask = True
# Note: Don't try to check for m.any(), that'll take too long
return dout
def __setitem__(self, indx, value):
"""
x.__setitem__(i, y) <==> x[i]=y
Set item described by index. If value is masked, masks those
locations.
"""
if self is masked:
raise MaskError('Cannot alter the masked element.')
_data = self._data
_mask = self._mask
if isinstance(indx, basestring):
_data[indx] = value
if _mask is nomask:
self._mask = _mask = make_mask_none(self.shape, self.dtype)
_mask[indx] = getmask(value)
return
_dtype = _data.dtype
nbfields = len(_dtype.names or ())
if value is masked:
# The mask wasn't set: create a full version.
if _mask is nomask:
_mask = self._mask = make_mask_none(self.shape, _dtype)
# Now, set the mask to its value.
if nbfields:
_mask[indx] = tuple([True] * nbfields)
else:
_mask[indx] = True
if not self._isfield:
self._sharedmask = False
return
# Get the _data part of the new value
dval = value
# Get the _mask part of the new value
mval = getattr(value, '_mask', nomask)
if nbfields and mval is nomask:
mval = tuple([False] * nbfields)
if _mask is nomask:
# Set the data, then the mask
_data[indx] = dval
if mval is not nomask:
_mask = self._mask = make_mask_none(self.shape, _dtype)
_mask[indx] = mval
elif not self._hardmask:
# Unshare the mask if necessary to avoid propagation
# We want to remove the unshare logic from this place in the
# future. Note that _sharedmask has lots of false positives.
if not self._isfield:
notthree = getattr(sys, 'getrefcount', False) and (sys.getrefcount(_mask) != 3)
if self._sharedmask and not (
# If no one else holds a reference (we have two
# references (_mask and self._mask) -- add one for
# getrefcount) and the array owns its own data
# copying the mask should do nothing.
(not notthree) and _mask.flags.owndata):
# 2016.01.15 -- v1.11.0
warnings.warn(
"setting an item on a masked array which has a shared "
"mask will not copy the mask and also change the "
"original mask array in the future.\n"
"Check the NumPy 1.11 release notes for more "
"information.",
MaskedArrayFutureWarning, stacklevel=2)
self.unshare_mask()
_mask = self._mask
# Set the data, then the mask
_data[indx] = dval
_mask[indx] = mval
elif hasattr(indx, 'dtype') and (indx.dtype == MaskType):
indx = indx * umath.logical_not(_mask)
_data[indx] = dval
else:
if nbfields:
err_msg = "Flexible 'hard' masks are not yet supported."
raise NotImplementedError(err_msg)
mindx = mask_or(_mask[indx], mval, copy=True)
dindx = self._data[indx]
if dindx.size > 1:
np.copyto(dindx, dval, where=~mindx)
elif mindx is nomask:
dindx = dval
_data[indx] = dindx
_mask[indx] = mindx
return
def __setattr__(self, attr, value):
super(MaskedArray, self).__setattr__(attr, value)
if attr == 'dtype' and self._mask is not nomask:
self._mask = self._mask.view(make_mask_descr(value), ndarray)
# Try to reset the shape of the mask (if we don't have a void)
# This raises a ValueError if the dtype change won't work
try:
self._mask.shape = self.shape
except (AttributeError, TypeError):
pass
def __getslice__(self, i, j):
"""
x.__getslice__(i, j) <==> x[i:j]
Return the slice described by (i, j). The use of negative indices
is not supported.
"""
return self.__getitem__(slice(i, j))
def __setslice__(self, i, j, value):
"""
x.__setslice__(i, j, value) <==> x[i:j]=value
Set the slice (i,j) of a to value. If value is masked, mask those
locations.
"""
self.__setitem__(slice(i, j), value)
def __setmask__(self, mask, copy=False):
"""
Set the mask.
"""
idtype = self.dtype
current_mask = self._mask
if mask is masked:
mask = True
if (current_mask is nomask):
# Make sure the mask is set
# Just don't do anything if there's nothing to do.
if mask is nomask:
return
current_mask = self._mask = make_mask_none(self.shape, idtype)
if idtype.names is None:
# No named fields.
# Hardmask: don't unmask the data
if self._hardmask:
current_mask |= mask
# Softmask: set everything to False
# If it's obviously a compatible scalar, use a quick update
# method.
elif isinstance(mask, (int, float, np.bool_, np.number)):
current_mask[...] = mask
# Otherwise fall back to the slower, general purpose way.
else:
current_mask.flat = mask
else:
# Named fields w/
mdtype = current_mask.dtype
mask = np.array(mask, copy=False)
# Mask is a singleton
if not mask.ndim:
# It's a boolean : make a record
if mask.dtype.kind == 'b':
mask = np.array(tuple([mask.item()] * len(mdtype)),
dtype=mdtype)
# It's a record: make sure the dtype is correct
else:
mask = mask.astype(mdtype)
# Mask is a sequence
else:
# Make sure the new mask is a ndarray with the proper dtype
try:
mask = np.array(mask, copy=copy, dtype=mdtype)
# Or assume it's a sequence of bool/int
except TypeError:
mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
# Hardmask: don't unmask the data
if self._hardmask:
for n in idtype.names:
current_mask[n] |= mask[n]
# Softmask: set everything to False
# If it's obviously a compatible scalar, use a quick update
# method.
elif isinstance(mask, (int, float, np.bool_, np.number)):
current_mask[...] = mask
# Otherwise fall back to the slower, general purpose way.
else:
current_mask.flat = mask
# Reshape if needed
if current_mask.shape:
current_mask.shape = self.shape
return
_set_mask = __setmask__
def _get_mask(self):
"""Return the current mask.
"""
# We could try to force a reshape, but that wouldn't work in some
# cases.
return self._mask
mask = property(fget=_get_mask, fset=__setmask__, doc="Mask")
def _get_recordmask(self):
"""
Return the mask of the records.
A record is masked when all the fields are masked.
"""
_mask = self._mask.view(ndarray)
if _mask.dtype.names is None:
return _mask
return np.all(flatten_structured_array(_mask), axis=-1)
def _set_recordmask(self):
"""
Return the mask of the records.
A record is masked when all the fields are masked.
"""
raise NotImplementedError("Coming soon: setting the mask per records!")
recordmask = property(fget=_get_recordmask)
def harden_mask(self):
"""
Force the mask to hard.
Whether the mask of a masked array is hard or soft is determined by
its `hardmask` property. `harden_mask` sets `hardmask` to True.
See Also
--------
hardmask
"""
self._hardmask = True
return self
def soften_mask(self):
"""
Force the mask to soft.
Whether the mask of a masked array is hard or soft is determined by
its `hardmask` property. `soften_mask` sets `hardmask` to False.
See Also
--------
hardmask
"""
self._hardmask = False
return self
hardmask = property(fget=lambda self: self._hardmask,
doc="Hardness of the mask")
def unshare_mask(self):
"""
Copy the mask and set the sharedmask flag to False.
Whether the mask is shared between masked arrays can be seen from
the `sharedmask` property. `unshare_mask` ensures the mask is not shared.
A copy of the mask is only made if it was shared.
See Also
--------
sharedmask
"""
if self._sharedmask:
self._mask = self._mask.copy()
self._sharedmask = False
return self
sharedmask = property(fget=lambda self: self._sharedmask,
doc="Share status of the mask (read-only).")
def shrink_mask(self):
"""
Reduce a mask to nomask when possible.
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4)
>>> x.mask
array([[False, False],
[False, False]], dtype=bool)
>>> x.shrink_mask()
>>> x.mask
False
"""
m = self._mask
if m.ndim and not m.any():
self._mask = nomask
return self
baseclass = property(fget=lambda self: self._baseclass,
doc="Class of the underlying data (read-only).")
def _get_data(self):
"""Return the current data, as a view of the original
underlying data.
"""
return ndarray.view(self, self._baseclass)
_data = property(fget=_get_data)
data = property(fget=_get_data)
def _get_flat(self):
"Return a flat iterator."
return MaskedIterator(self)
def _set_flat(self, value):
"Set a flattened version of self to value."
y = self.ravel()
y[:] = value
flat = property(fget=_get_flat, fset=_set_flat,
doc="Flat version of the array.")
def get_fill_value(self):
"""
Return the filling value of the masked array.
Returns
-------
fill_value : scalar
The filling value.
Examples
--------
>>> for dt in [np.int32, np.int64, np.float64, np.complex128]:
... np.ma.array([0, 1], dtype=dt).get_fill_value()
...
999999
999999
1e+20
(1e+20+0j)
>>> x = np.ma.array([0, 1.], fill_value=-np.inf)
>>> x.get_fill_value()
-inf
"""
if self._fill_value is None:
self._fill_value = _check_fill_value(None, self.dtype)
# Temporary workaround to account for the fact that str and bytes
# scalars cannot be indexed with (), whereas all other numpy
# scalars can. See issues #7259 and #7267.
# The if-block can be removed after #7267 has been fixed.
if isinstance(self._fill_value, ndarray):
return self._fill_value[()]
return self._fill_value
def set_fill_value(self, value=None):
"""
Set the filling value of the masked array.
Parameters
----------
value : scalar, optional
The new filling value. Default is None, in which case a default
based on the data type is used.
See Also
--------
ma.set_fill_value : Equivalent function.
Examples
--------
>>> x = np.ma.array([0, 1.], fill_value=-np.inf)
>>> x.fill_value
-inf
>>> x.set_fill_value(np.pi)
>>> x.fill_value
3.1415926535897931
Reset to default:
>>> x.set_fill_value()
>>> x.fill_value
1e+20
"""
target = _check_fill_value(value, self.dtype)
_fill_value = self._fill_value
if _fill_value is None:
# Create the attribute if it was undefined
self._fill_value = target
else:
# Don't overwrite the attribute, just fill it (for propagation)
_fill_value[()] = target
fill_value = property(fget=get_fill_value, fset=set_fill_value,
doc="Filling value.")
def filled(self, fill_value=None):
"""
Return a copy of self, with masked values filled with a given value.
**However**, if there are no masked values to fill, self will be
returned instead as an ndarray.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries (None by default).
If None, the `fill_value` attribute of the array is used instead.
Returns
-------
filled_array : ndarray
A copy of ``self`` with invalid entries replaced by *fill_value*
(be it the function argument or the attribute of ``self``), or
``self`` itself as an ndarray if there are no invalid entries to
be replaced.
Notes
-----
The result is **not** a MaskedArray!
Examples
--------
>>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999)
>>> x.filled()
array([1, 2, -999, 4, -999])
>>> type(x.filled())
<type 'numpy.ndarray'>
Subclassing is preserved. This means that if the data part of the masked
array is a matrix, `filled` returns a matrix:
>>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
>>> x.filled()
matrix([[ 1, 999999],
[999999, 4]])
"""
m = self._mask
if m is nomask:
return self._data
if fill_value is None:
fill_value = self.fill_value
else:
fill_value = _check_fill_value(fill_value, self.dtype)
if self is masked_singleton:
return np.asanyarray(fill_value)
if m.dtype.names:
result = self._data.copy('K')
_recursive_filled(result, self._mask, fill_value)
elif not m.any():
return self._data
else:
result = self._data.copy('K')
try:
np.copyto(result, fill_value, where=m)
except (TypeError, AttributeError):
fill_value = narray(fill_value, dtype=object)
d = result.astype(object)
result = np.choose(m, (d, fill_value))
except IndexError:
# ok, if scalar
if self._data.shape:
raise
elif m:
result = np.array(fill_value, dtype=self.dtype)
else:
result = self._data
return result
def compressed(self):
"""
Return all the non-masked data as a 1-D array.
Returns
-------
data : ndarray
A new `ndarray` holding the non-masked data is returned.
Notes
-----
The result is **not** a MaskedArray!
Examples
--------
>>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3)
>>> x.compressed()
array([0, 1])
>>> type(x.compressed())
<type 'numpy.ndarray'>
"""
data = ndarray.ravel(self._data)
if self._mask is not nomask:
data = data.compress(np.logical_not(ndarray.ravel(self._mask)))
return data
def compress(self, condition, axis=None, out=None):
"""
Return `a` where condition is ``True``.
If condition is a `MaskedArray`, missing values are considered
as ``False``.
Parameters
----------
condition : var
Boolean 1-d array selecting which entries to return. If len(condition)
is less than the size of a along the axis, then output is truncated
to length of condition array.
axis : {None, int}, optional
Axis along which the operation must be performed.
out : {None, ndarray}, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
Returns
-------
result : MaskedArray
A :class:`MaskedArray` object.
Notes
-----
Please note the difference with :meth:`compressed` !
The output of :meth:`compress` has a mask, the output of
:meth:`compressed` does not.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print(x)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> x.compress([1, 0, 1])
masked_array(data = [1 3],
mask = [False False],
fill_value=999999)
>>> x.compress([1, 0, 1], axis=1)
masked_array(data =
[[1 3]
[-- --]
[7 9]],
mask =
[[False False]
[ True True]
[False False]],
fill_value=999999)
"""
# Get the basic components
(_data, _mask) = (self._data, self._mask)
# Force the condition to a regular ndarray and forget the missing
# values.
condition = np.array(condition, copy=False, subok=False)
_new = _data.compress(condition, axis=axis, out=out).view(type(self))
_new._update_from(self)
if _mask is not nomask:
_new._mask = _mask.compress(condition, axis=axis)
return _new
def __str__(self):
"""
String representation.
"""
if masked_print_option.enabled():
f = masked_print_option
if self is masked:
return str(f)
m = self._mask
if m is nomask:
res = self._data
else:
if m.shape == () and m.itemsize == len(m.dtype):
if m.dtype.names:
m = m.view((bool, len(m.dtype)))
if m.any():
return str(tuple((f if _m else _d) for _d, _m in
zip(self._data.tolist(), m)))
else:
return str(self._data)
elif m:
return str(f)
else:
return str(self._data)
# convert to object array to make filled work
names = self.dtype.names
if names is None:
data = self._data
mask = m
# For big arrays, to avoid a costly conversion to the
# object dtype, extract the corners before the conversion.
print_width = (self._print_width if self.ndim > 1
else self._print_width_1d)
for axis in range(self.ndim):
if data.shape[axis] > print_width:
ind = print_width // 2
arr = np.split(data, (ind, -ind), axis=axis)
data = np.concatenate((arr[0], arr[2]), axis=axis)
arr = np.split(mask, (ind, -ind), axis=axis)
mask = np.concatenate((arr[0], arr[2]), axis=axis)
res = data.astype("O")
res.view(ndarray)[mask] = f
else:
rdtype = _recursive_make_descr(self.dtype, "O")
res = self._data.astype(rdtype)
_recursive_printoption(res, m, f)
else:
res = self.filled(self.fill_value)
return str(res)
def __repr__(self):
"""
Literal string representation.
"""
n = len(self.shape)
if self._baseclass is np.ndarray:
name = 'array'
else:
name = self._baseclass.__name__
parameters = dict(name=name, nlen=" " * len(name),
data=str(self), mask=str(self._mask),
fill=str(self.fill_value), dtype=str(self.dtype))
if self.dtype.names:
if n <= 1:
return _print_templates['short_flx'] % parameters
return _print_templates['long_flx'] % parameters
elif n <= 1:
return _print_templates['short_std'] % parameters
return _print_templates['long_std'] % parameters
def _delegate_binop(self, other):
# This emulates the logic in
# multiarray/number.c:PyArray_GenericBinaryFunction
if (not isinstance(other, np.ndarray)
and not hasattr(other, "__numpy_ufunc__")):
other_priority = getattr(other, "__array_priority__", -1000000)
if self.__array_priority__ < other_priority:
return True
return False
def __eq__(self, other):
"""
Check whether other equals self elementwise.
"""
if self is masked:
return masked
omask = getattr(other, '_mask', nomask)
if omask is nomask:
check = self.filled(0).__eq__(other)
try:
check = check.view(type(self))
check._mask = self._mask
except AttributeError:
# Dang, we have a bool instead of an array: return the bool
return check
else:
odata = filled(other, 0)
check = self.filled(0).__eq__(odata).view(type(self))
if self._mask is nomask:
check._mask = omask
else:
mask = mask_or(self._mask, omask)
if mask.dtype.names:
if mask.size > 1:
axis = 1
else:
axis = None
try:
mask = mask.view((bool_, len(self.dtype))).all(axis)
except ValueError:
mask = np.all([[f[n].all() for n in mask.dtype.names]
for f in mask], axis=axis)
check._mask = mask
return check
def __ne__(self, other):
"""
Check whether other doesn't equal self elementwise
"""
if self is masked:
return masked
omask = getattr(other, '_mask', nomask)
if omask is nomask:
check = self.filled(0).__ne__(other)
try:
check = check.view(type(self))
check._mask = self._mask
except AttributeError:
# In case check is a boolean (or a numpy.bool)
return check
else:
odata = filled(other, 0)
check = self.filled(0).__ne__(odata).view(type(self))
if self._mask is nomask:
check._mask = omask
else:
mask = mask_or(self._mask, omask)
if mask.dtype.names:
if mask.size > 1:
axis = 1
else:
axis = None
try:
mask = mask.view((bool_, len(self.dtype))).all(axis)
except ValueError:
mask = np.all([[f[n].all() for n in mask.dtype.names]
for f in mask], axis=axis)
check._mask = mask
return check
def __add__(self, other):
"""
Add self to other, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return add(self, other)
def __radd__(self, other):
"""
Add other to self, and return a new masked array.
"""
# In analogy with __rsub__ and __rdiv__, use original order:
# we get here from `other + self`.
return add(other, self)
def __sub__(self, other):
"""
Subtract other from self, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return subtract(self, other)
def __rsub__(self, other):
"""
Subtract self from other, and return a new masked array.
"""
return subtract(other, self)
def __mul__(self, other):
"Multiply self by other, and return a new masked array."
if self._delegate_binop(other):
return NotImplemented
return multiply(self, other)
def __rmul__(self, other):
"""
Multiply other by self, and return a new masked array.
"""
# In analogy with __rsub__ and __rdiv__, use original order:
# we get here from `other * self`.
return multiply(other, self)
def __div__(self, other):
"""
Divide other into self, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return divide(self, other)
def __truediv__(self, other):
"""
Divide other into self, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return true_divide(self, other)
def __rtruediv__(self, other):
"""
Divide self into other, and return a new masked array.
"""
return true_divide(other, self)
def __floordiv__(self, other):
"""
Divide other into self, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return floor_divide(self, other)
def __rfloordiv__(self, other):
"""
Divide self into other, and return a new masked array.
"""
return floor_divide(other, self)
def __pow__(self, other):
"""
Raise self to the power other, masking the potential NaNs/Infs
"""
if self._delegate_binop(other):
return NotImplemented
return power(self, other)
def __rpow__(self, other):
"""
Raise other to the power self, masking the potential NaNs/Infs
"""
return power(other, self)
def __iadd__(self, other):
"""
Add other to self in-place.
"""
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
else:
if m is not nomask:
self._mask += m
self._data.__iadd__(np.where(self._mask, self.dtype.type(0),
getdata(other)))
return self
def __isub__(self, other):
"""
Subtract other from self in-place.
"""
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
elif m is not nomask:
self._mask += m
self._data.__isub__(np.where(self._mask, self.dtype.type(0),
getdata(other)))
return self
def __imul__(self, other):
"""
Multiply self by other in-place.
"""
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
elif m is not nomask:
self._mask += m
self._data.__imul__(np.where(self._mask, self.dtype.type(1),
getdata(other)))
return self
def __idiv__(self, other):
"""
Divide self by other in-place.
"""
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.divide]
other_data = np.where(dom_mask, fval, other_data)
self._mask |= new_mask
self._data.__idiv__(np.where(self._mask, self.dtype.type(1),
other_data))
return self
def __ifloordiv__(self, other):
"""
Floor divide self by other in-place.
"""
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.floor_divide]
other_data = np.where(dom_mask, fval, other_data)
self._mask |= new_mask
self._data.__ifloordiv__(np.where(self._mask, self.dtype.type(1),
other_data))
return self
def __itruediv__(self, other):
"""
True divide self by other in-place.
"""
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.true_divide]
other_data = np.where(dom_mask, fval, other_data)
self._mask |= new_mask
self._data.__itruediv__(np.where(self._mask, self.dtype.type(1),
other_data))
return self
def __ipow__(self, other):
"""
Raise self to the power other, in place.
"""
other_data = getdata(other)
other_mask = getmask(other)
with np.errstate(divide='ignore', invalid='ignore'):
self._data.__ipow__(np.where(self._mask, self.dtype.type(1),
other_data))
invalid = np.logical_not(np.isfinite(self._data))
if invalid.any():
if self._mask is not nomask:
self._mask |= invalid
else:
self._mask = invalid
np.copyto(self._data, self.fill_value, where=invalid)
new_mask = mask_or(other_mask, invalid)
self._mask = mask_or(self._mask, new_mask)
return self
def __float__(self):
"""
Convert to float.
"""
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted "
"to Python scalars")
elif self._mask:
warnings.warn("Warning: converting a masked element to nan.", stacklevel=2)
return np.nan
return float(self.item())
def __int__(self):
"""
Convert to int.
"""
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted "
"to Python scalars")
elif self._mask:
raise MaskError('Cannot convert masked element to a Python int.')
return int(self.item())
def get_imag(self):
"""
Return the imaginary part of the masked array.
The returned array is a view on the imaginary part of the `MaskedArray`
whose `get_imag` method is called.
Parameters
----------
None
Returns
-------
result : MaskedArray
The imaginary part of the masked array.
See Also
--------
get_real, real, imag
Examples
--------
>>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])
>>> x.get_imag()
masked_array(data = [1.0 -- 1.6],
mask = [False True False],
fill_value = 1e+20)
"""
result = self._data.imag.view(type(self))
result.__setmask__(self._mask)
return result
imag = property(fget=get_imag, doc="Imaginary part.")
def get_real(self):
"""
Return the real part of the masked array.
The returned array is a view on the real part of the `MaskedArray`
whose `get_real` method is called.
Parameters
----------
None
Returns
-------
result : MaskedArray
The real part of the masked array.
See Also
--------
get_imag, real, imag
Examples
--------
>>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])
>>> x.get_real()
masked_array(data = [1.0 -- 3.45],
mask = [False True False],
fill_value = 1e+20)
"""
result = self._data.real.view(type(self))
result.__setmask__(self._mask)
return result
real = property(fget=get_real, doc="Real part")
def count(self, axis=None, keepdims=np._NoValue):
"""
Count the non-masked elements of the array along the given axis.
Parameters
----------
axis : None or int or tuple of ints, optional
Axis or axes along which the count is performed.
The default (`axis` = `None`) performs the count over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.10.0
If this is a tuple of ints, the count is performed on multiple
axes, instead of a single axis or all the axes as before.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the array.
Returns
-------
result : ndarray or scalar
An array with the same shape as the input array, with the specified
axis removed. If the array is a 0-d array, or if `axis` is None, a
scalar is returned.
See Also
--------
count_masked : Count masked elements in array or along a given axis.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.arange(6).reshape((2, 3))
>>> a[1, :] = ma.masked
>>> a
masked_array(data =
[[0 1 2]
[-- -- --]],
mask =
[[False False False]
[ True True True]],
fill_value = 999999)
>>> a.count()
3
When the `axis` keyword is specified an array of appropriate size is
returned.
>>> a.count(axis=0)
array([1, 1, 1])
>>> a.count(axis=1)
array([3, 0])
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
m = self._mask
# special case for matrices (we assume no other subclasses modify
# their dimensions)
if isinstance(self.data, np.matrix):
if m is nomask:
m = np.zeros(self.shape, dtype=np.bool_)
m = m.view(type(self.data))
if m is nomask:
# compare to _count_reduce_items in _methods.py
if self.shape is ():
if axis not in (None, 0):
raise ValueError("'axis' entry is out of bounds")
return 1
elif axis is None:
if kwargs.get('keepdims', False):
return np.array(self.size, dtype=np.intp, ndmin=self.ndim)
return self.size
axes = axis if isinstance(axis, tuple) else (axis,)
axes = tuple(a if a >= 0 else self.ndim + a for a in axes)
if len(axes) != len(set(axes)):
raise ValueError("duplicate value in 'axis'")
if builtins.any(a < 0 or a >= self.ndim for a in axes):
raise ValueError("'axis' entry is out of bounds")
items = 1
for ax in axes:
items *= self.shape[ax]
if kwargs.get('keepdims', False):
out_dims = list(self.shape)
for a in axes:
out_dims[a] = 1
else:
out_dims = [d for n, d in enumerate(self.shape)
if n not in axes]
# make sure to return a 0-d array if axis is supplied
return np.full(out_dims, items, dtype=np.intp)
# take care of the masked singleton
if self is masked:
return 0
return (~m).sum(axis=axis, dtype=np.intp, **kwargs)
flatten = _arraymethod('flatten')
def ravel(self, order='C'):
"""
Returns a 1D version of self, as a view.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
The elements of `a` are read using this index order. 'C' means to
index the elements in C-like order, with the last axis index
changing fastest, back to the first axis index changing slowest.
'F' means to index the elements in Fortran-like index order, with
the first index changing fastest, and the last index changing
slowest. Note that the 'C' and 'F' options take no account of the
memory layout of the underlying array, and only refer to the order
of axis indexing. 'A' means to read the elements in Fortran-like
index order if `m` is Fortran *contiguous* in memory, C-like order
otherwise. 'K' means to read the elements in the order they occur
in memory, except for reversing the data when strides are negative.
By default, 'C' index order is used.
Returns
-------
MaskedArray
Output view is of shape ``(self.size,)`` (or
``(np.ma.product(self.shape),)``).
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print(x)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print(x.ravel())
[1 -- 3 -- 5 -- 7 -- 9]
"""
r = ndarray.ravel(self._data, order=order).view(type(self))
r._update_from(self)
if self._mask is not nomask:
r._mask = ndarray.ravel(self._mask, order=order).reshape(r.shape)
else:
r._mask = nomask
return r
repeat = _arraymethod('repeat')
def reshape(self, *s, **kwargs):
"""
Give a new shape to the array without changing its data.
Returns a masked array containing the same data, but with a new shape.
The result is a view on the original array; if this is not possible, a
ValueError is raised.
Parameters
----------
shape : int or tuple of ints
The new shape should be compatible with the original shape. If an
integer is supplied, then the result will be a 1-D array of that
length.
order : {'C', 'F'}, optional
Determines whether the array data should be viewed as in C
(row-major) or FORTRAN (column-major) order.
Returns
-------
reshaped_array : array
A new view on the array.
See Also
--------
reshape : Equivalent function in the masked array module.
numpy.ndarray.reshape : Equivalent method on ndarray object.
numpy.reshape : Equivalent function in the NumPy module.
Notes
-----
The reshaping operation cannot guarantee that a copy will not be made,
to modify the shape in place, use ``a.shape = s``
Examples
--------
>>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1])
>>> print(x)
[[-- 2]
[3 --]]
>>> x = x.reshape((4,1))
>>> print(x)
[[--]
[2]
[3]
[--]]
"""
kwargs.update(order=kwargs.get('order', 'C'))
result = self._data.reshape(*s, **kwargs).view(type(self))
result._update_from(self)
mask = self._mask
if mask is not nomask:
result._mask = mask.reshape(*s, **kwargs)
return result
def resize(self, newshape, refcheck=True, order=False):
"""
.. warning::
This method does nothing, except raise a ValueError exception. A
masked array does not own its data and therefore cannot safely be
resized in place. Use the `numpy.ma.resize` function instead.
This method is difficult to implement safely and may be deprecated in
future releases of NumPy.
"""
# Note : the 'order' keyword looks broken, let's just drop it
errmsg = "A masked array does not own its data " \
"and therefore cannot be resized.\n" \
"Use the numpy.ma.resize function instead."
raise ValueError(errmsg)
def put(self, indices, values, mode='raise'):
"""
Set storage-indexed locations to corresponding values.
Sets self._data.flat[n] = values[n] for each n in indices.
If `values` is shorter than `indices` then it will repeat.
If `values` has some masked values, the initial mask is updated
in consequence, else the corresponding values are unmasked.
Parameters
----------
indices : 1-D array_like
Target indices, interpreted as integers.
values : array_like
Values to place in self._data copy at target indices.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
'raise' : raise an error.
'wrap' : wrap around.
'clip' : clip to the range.
Notes
-----
`values` can be a scalar or length 1 array.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print(x)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> x.put([0,4,8],[10,20,30])
>>> print(x)
[[10 -- 3]
[-- 20 --]
[7 -- 30]]
>>> x.put(4,999)
>>> print(x)
[[10 -- 3]
[-- 999 --]
[7 -- 30]]
"""
# Hard mask: Get rid of the values/indices that fall on masked data
if self._hardmask and self._mask is not nomask:
mask = self._mask[indices]
indices = narray(indices, copy=False)
values = narray(values, copy=False, subok=True)
values.resize(indices.shape)
indices = indices[~mask]
values = values[~mask]
self._data.put(indices, values, mode=mode)
# short circut if neither self nor values are masked
if self._mask is nomask and getmask(values) is nomask:
return
m = getmaskarray(self).copy()
if getmask(values) is nomask:
m.put(indices, False, mode=mode)
else:
m.put(indices, values._mask, mode=mode)
m = make_mask(m, copy=False, shrink=True)
self._mask = m
return
def ids(self):
"""
Return the addresses of the data and mask areas.
Parameters
----------
None
Examples
--------
>>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1])
>>> x.ids()
(166670640, 166659832)
If the array has no mask, the address of `nomask` is returned. This address
is typically not close to the data in memory:
>>> x = np.ma.array([1, 2, 3])
>>> x.ids()
(166691080, 3083169284L)
"""
if self._mask is nomask:
return (self.ctypes.data, id(nomask))
return (self.ctypes.data, self._mask.ctypes.data)
def iscontiguous(self):
"""
Return a boolean indicating whether the data is contiguous.
Parameters
----------
None
Examples
--------
>>> x = np.ma.array([1, 2, 3])
>>> x.iscontiguous()
True
`iscontiguous` returns one of the flags of the masked array:
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : True
OWNDATA : False
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
"""
return self.flags['CONTIGUOUS']
def all(self, axis=None, out=None, keepdims=np._NoValue):
"""
Returns True if all elements evaluate to True.
The output array is masked where all the values along the given axis
are masked: if the output would have been a scalar and that all the
values are masked, then the output is `masked`.
Refer to `numpy.all` for full documentation.
See Also
--------
ndarray.all : corresponding function for ndarrays
numpy.all : equivalent function
Examples
--------
>>> np.ma.array([1,2,3]).all()
True
>>> a = np.ma.array([1,2,3], mask=True)
>>> (a.all() is np.ma.masked)
True
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
mask = _check_mask_axis(self._mask, axis, **kwargs)
if out is None:
d = self.filled(True).all(axis=axis, **kwargs).view(type(self))
if d.ndim:
d.__setmask__(mask)
elif mask:
return masked
return d
self.filled(True).all(axis=axis, out=out, **kwargs)
if isinstance(out, MaskedArray):
if out.ndim or mask:
out.__setmask__(mask)
return out
def any(self, axis=None, out=None, keepdims=np._NoValue):
"""
Returns True if any of the elements of `a` evaluate to True.
Masked values are considered as False during computation.
Refer to `numpy.any` for full documentation.
See Also
--------
ndarray.any : corresponding function for ndarrays
numpy.any : equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
mask = _check_mask_axis(self._mask, axis, **kwargs)
if out is None:
d = self.filled(False).any(axis=axis, **kwargs).view(type(self))
if d.ndim:
d.__setmask__(mask)
elif mask:
d = masked
return d
self.filled(False).any(axis=axis, out=out, **kwargs)
if isinstance(out, MaskedArray):
if out.ndim or mask:
out.__setmask__(mask)
return out
def nonzero(self):
"""
Return the indices of unmasked elements that are not zero.
Returns a tuple of arrays, one for each dimension, containing the
indices of the non-zero elements in that dimension. The corresponding
non-zero values can be obtained with::
a[a.nonzero()]
To group the indices by element, rather than dimension, use
instead::
np.transpose(a.nonzero())
The result of this is always a 2d array, with a row for each non-zero
element.
Parameters
----------
None
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
numpy.nonzero :
Function operating on ndarrays.
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.array(np.eye(3))
>>> x
masked_array(data =
[[ 1. 0. 0.]
[ 0. 1. 0.]
[ 0. 0. 1.]],
mask =
False,
fill_value=1e+20)
>>> x.nonzero()
(array([0, 1, 2]), array([0, 1, 2]))
Masked elements are ignored.
>>> x[1, 1] = ma.masked
>>> x
masked_array(data =
[[1.0 0.0 0.0]
[0.0 -- 0.0]
[0.0 0.0 1.0]],
mask =
[[False False False]
[False True False]
[False False False]],
fill_value=1e+20)
>>> x.nonzero()
(array([0, 2]), array([0, 2]))
Indices can also be grouped by element.
>>> np.transpose(x.nonzero())
array([[0, 0],
[2, 2]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, ma.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]])
>>> a > 3
masked_array(data =
[[False False False]
[ True True True]
[ True True True]],
mask =
False,
fill_value=999999)
>>> ma.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
The ``nonzero`` method of the condition array can also be called.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
return narray(self.filled(0), copy=False).nonzero()
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
(this docstring should be overwritten)
"""
# !!!: implement out + test!
m = self._mask
if m is nomask:
result = super(MaskedArray, self).trace(offset=offset, axis1=axis1,
axis2=axis2, out=out)
return result.astype(dtype)
else:
D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2)
return D.astype(dtype).filled(0).sum(axis=None, out=out)
trace.__doc__ = ndarray.trace.__doc__
def dot(self, b, out=None, strict=False):
"""
a.dot(b, out=None)
Masked dot product of two arrays. Note that `out` and `strict` are
located in different positions than in `ma.dot`. In order to
maintain compatibility with the functional version, it is
recommended that the optional arguments be treated as keyword only.
At some point that may be mandatory.
.. versionadded:: 1.10.0
Parameters
----------
b : masked_array_like
Inputs array.
out : masked_array, optional
Output argument. This must have the exact kind that would be
returned if it was not used. In particular, it must have the
right type, must be C-contiguous, and its dtype must be the
dtype that would be returned for `ma.dot(a,b)`. This is a
performance feature. Therefore, if these conditions are not
met, an exception is raised, instead of attempting to be
flexible.
strict : bool, optional
Whether masked data are propagated (True) or set to 0 (False)
for the computation. Default is False. Propagating the mask
means that if a masked value appears in a row or column, the
whole row or column is considered masked.
.. versionadded:: 1.10.2
See Also
--------
numpy.ma.dot : equivalent function
"""
return dot(self, b, out=out, strict=strict)
def sum(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the sum of the array elements over the given axis.
Masked elements are set to 0 internally.
Refer to `numpy.sum` for full documentation.
See Also
--------
ndarray.sum : corresponding function for ndarrays
numpy.sum : equivalent function
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print(x)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print(x.sum())
25
>>> print(x.sum(axis=1))
[4 5 16]
>>> print(x.sum(axis=0))
[8 5 12]
>>> print(type(x.sum(axis=0, dtype=np.int64)[0]))
<type 'numpy.int64'>
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
_mask = self._mask
newmask = _check_mask_axis(_mask, axis, **kwargs)
# No explicit output
if out is None:
result = self.filled(0).sum(axis, dtype=dtype, **kwargs)
rndim = getattr(result, 'ndim', 0)
if rndim:
result = result.view(type(self))
result.__setmask__(newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(0).sum(axis, dtype=dtype, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
def cumsum(self, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the array elements over the given axis.
Masked values are set to 0 internally during the computation.
However, their position is saved, and the result will be masked at
the same locations.
Refer to `numpy.cumsum` for full documentation.
Notes
-----
The mask is lost if `out` is not a valid :class:`MaskedArray` !
Arithmetic is modular when using integer types, and no error is
raised on overflow.
See Also
--------
ndarray.cumsum : corresponding function for ndarrays
numpy.cumsum : equivalent function
Examples
--------
>>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0])
>>> print(marr.cumsum())
[0 1 3 -- -- -- 9 16 24 33]
"""
result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(self.mask)
return out
result = result.view(type(self))
result.__setmask__(self._mask)
return result
def prod(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the product of the array elements over the given axis.
Masked elements are set to 1 internally for computation.
Refer to `numpy.prod` for full documentation.
Notes
-----
Arithmetic is modular when using integer types, and no error is raised
on overflow.
See Also
--------
ndarray.prod : corresponding function for ndarrays
numpy.prod : equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
_mask = self._mask
newmask = _check_mask_axis(_mask, axis, **kwargs)
# No explicit output
if out is None:
result = self.filled(1).prod(axis, dtype=dtype, **kwargs)
rndim = getattr(result, 'ndim', 0)
if rndim:
result = result.view(type(self))
result.__setmask__(newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(1).prod(axis, dtype=dtype, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
product = prod
def cumprod(self, axis=None, dtype=None, out=None):
"""
Return the cumulative product of the array elements over the given axis.
Masked values are set to 1 internally during the computation.
However, their position is saved, and the result will be masked at
the same locations.
Refer to `numpy.cumprod` for full documentation.
Notes
-----
The mask is lost if `out` is not a valid MaskedArray !
Arithmetic is modular when using integer types, and no error is
raised on overflow.
See Also
--------
ndarray.cumprod : corresponding function for ndarrays
numpy.cumprod : equivalent function
"""
result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(self._mask)
return out
result = result.view(type(self))
result.__setmask__(self._mask)
return result
def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Returns the average of the array elements along given axis.
Masked entries are ignored, and result elements which are not
finite will be masked.
Refer to `numpy.mean` for full documentation.
See Also
--------
ndarray.mean : corresponding function for ndarrays
numpy.mean : Equivalent function
numpy.ma.average: Weighted average.
Examples
--------
>>> a = np.ma.array([1,2,3], mask=[False, False, True])
>>> a
masked_array(data = [1 2 --],
mask = [False False True],
fill_value = 999999)
>>> a.mean()
1.5
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
if self._mask is nomask:
result = super(MaskedArray, self).mean(axis=axis,
dtype=dtype, **kwargs)[()]
else:
dsum = self.sum(axis=axis, dtype=dtype, **kwargs)
cnt = self.count(axis=axis, **kwargs)
if cnt.shape == () and (cnt == 0):
result = masked
else:
result = dsum * 1. / cnt
if out is not None:
out.flat = result
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = getattr(result, '_mask', nomask)
return out
return result
def anom(self, axis=None, dtype=None):
"""
Compute the anomalies (deviations from the arithmetic mean)
along the given axis.
Returns an array of anomalies, with the same shape as the input and
where the arithmetic mean is computed along the given axis.
Parameters
----------
axis : int, optional
Axis over which the anomalies are taken.
The default is to use the mean of the flattened array as reference.
dtype : dtype, optional
Type to use in computing the variance. For arrays of integer type
the default is float32; for arrays of float types it is the same as
the array type.
See Also
--------
mean : Compute the mean of the array.
Examples
--------
>>> a = np.ma.array([1,2,3])
>>> a.anom()
masked_array(data = [-1. 0. 1.],
mask = False,
fill_value = 1e+20)
"""
m = self.mean(axis, dtype)
if m is masked:
return m
if not axis:
return (self - m)
else:
return (self - expand_dims(m, axis))
def var(self, axis=None, dtype=None, out=None, ddof=0,
keepdims=np._NoValue):
"""
Returns the variance of the array elements along given axis.
Masked entries are ignored, and result elements which are not
finite will be masked.
Refer to `numpy.var` for full documentation.
See Also
--------
ndarray.var : corresponding function for ndarrays
numpy.var : Equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
# Easy case: nomask, business as usual
if self._mask is nomask:
ret = super(MaskedArray, self).var(axis=axis, dtype=dtype, out=out,
ddof=ddof, **kwargs)[()]
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(nomask)
return out
return ret
# Some data are masked, yay!
cnt = self.count(axis=axis, **kwargs) - ddof
danom = self - self.mean(axis, dtype, keepdims=True)
if iscomplexobj(self):
danom = umath.absolute(danom) ** 2
else:
danom *= danom
dvar = divide(danom.sum(axis, **kwargs), cnt).view(type(self))
# Apply the mask if it's not a scalar
if dvar.ndim:
dvar._mask = mask_or(self._mask.all(axis, **kwargs), (cnt <= 0))
dvar._update_from(self)
elif getattr(dvar, '_mask', False):
# Make sure that masked is returned when the scalar is masked.
dvar = masked
if out is not None:
if isinstance(out, MaskedArray):
out.flat = 0
out.__setmask__(True)
elif out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or " \
"more location."
raise MaskError(errmsg)
else:
out.flat = np.nan
return out
# In case with have an explicit output
if out is not None:
# Set the data
out.flat = dvar
# Set the mask if needed
if isinstance(out, MaskedArray):
out.__setmask__(dvar.mask)
return out
return dvar
var.__doc__ = np.var.__doc__
def std(self, axis=None, dtype=None, out=None, ddof=0,
keepdims=np._NoValue):
"""
Returns the standard deviation of the array elements along given axis.
Masked entries are ignored.
Refer to `numpy.std` for full documentation.
See Also
--------
ndarray.std : corresponding function for ndarrays
numpy.std : Equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
dvar = self.var(axis, dtype, out, ddof, **kwargs)
if dvar is not masked:
if out is not None:
np.power(out, 0.5, out=out, casting='unsafe')
return out
dvar = sqrt(dvar)
return dvar
def round(self, decimals=0, out=None):
"""
Return each element rounded to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
ndarray.around : corresponding function for ndarrays
numpy.around : equivalent function
"""
result = self._data.round(decimals=decimals, out=out).view(type(self))
if result.ndim > 0:
result._mask = self._mask
result._update_from(self)
elif self._mask:
# Return masked when the scalar is masked
result = masked
# No explicit output: we're done
if out is None:
return result
if isinstance(out, MaskedArray):
out.__setmask__(self._mask)
return out
def argsort(self, axis=None, kind='quicksort', order=None, fill_value=None):
"""
Return an ndarray of indices that sort the array along the
specified axis. Masked values are filled beforehand to
`fill_value`.
Parameters
----------
axis : int, optional
Axis along which to sort. The default is -1 (last axis).
If None, the flattened array is used.
fill_value : var, optional
Value used to fill the array before sorting.
The default is the `fill_value` attribute of the input array.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
In other words, ``a[index_array]`` yields a sorted `a`.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.ma.array([3,2,1], mask=[False, False, True])
>>> a
masked_array(data = [3 2 --],
mask = [False False True],
fill_value = 999999)
>>> a.argsort()
array([1, 0, 2])
"""
if fill_value is None:
fill_value = default_fill_value(self)
d = self.filled(fill_value).view(ndarray)
return d.argsort(axis=axis, kind=kind, order=order)
def argmin(self, axis=None, fill_value=None, out=None):
"""
Return array of indices to the minimum values along the given axis.
Parameters
----------
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis
fill_value : {var}, optional
Value used to fill in the masked values. If None, the output of
minimum_fill_value(self._data) is used instead.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
Returns
-------
ndarray or scalar
If multi-dimension input, returns a new ndarray of indices to the
minimum values along the given axis. Otherwise, returns a scalar
of index to the minimum values along the given axis.
Examples
--------
>>> x = np.ma.array(arange(4), mask=[1,1,0,0])
>>> x.shape = (2,2)
>>> print(x)
[[-- --]
[2 3]]
>>> print(x.argmin(axis=0, fill_value=-1))
[0 0]
>>> print(x.argmin(axis=0, fill_value=9))
[1 1]
"""
if fill_value is None:
fill_value = minimum_fill_value(self)
d = self.filled(fill_value).view(ndarray)
return d.argmin(axis, out=out)
def argmax(self, axis=None, fill_value=None, out=None):
"""
Returns array of indices of the maximum values along the given axis.
Masked values are treated as if they had the value fill_value.
Parameters
----------
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis
fill_value : {var}, optional
Value used to fill in the masked values. If None, the output of
maximum_fill_value(self._data) is used instead.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
Returns
-------
index_array : {integer_array}
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a.argmax()
5
>>> a.argmax(0)
array([1, 1, 1])
>>> a.argmax(1)
array([2, 2])
"""
if fill_value is None:
fill_value = maximum_fill_value(self._data)
d = self.filled(fill_value).view(ndarray)
return d.argmax(axis, out=out)
def sort(self, axis=-1, kind='quicksort', order=None,
endwith=True, fill_value=None):
"""
Sort the array, in-place
Parameters
----------
a : array_like
Array to be sorted.
axis : int, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : list, optional
When `a` is a structured array, this argument specifies which fields
to compare first, second, and so on. This list does not need to
include all of the fields.
endwith : {True, False}, optional
Whether missing values (if any) should be forced in the upper indices
(at the end of the array) (True) or lower indices (at the beginning).
When the array contains unmasked values of the largest (or smallest if
False) representable value of the datatype the ordering of these values
and the masked values is undefined. To enforce the masked values are
at the end (beginning) in this case one must sort the mask.
fill_value : {var}, optional
Value used internally for the masked values.
If ``fill_value`` is not None, it supersedes ``endwith``.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
Notes
-----
See ``sort`` for notes on the different sorting algorithms.
Examples
--------
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # Default
>>> a.sort()
>>> print(a)
[1 3 5 -- --]
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # Put missing values in the front
>>> a.sort(endwith=False)
>>> print(a)
[-- -- 1 3 5]
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # fill_value takes over endwith
>>> a.sort(endwith=False, fill_value=3)
>>> print(a)
[1 -- -- 3 5]
"""
if self._mask is nomask:
ndarray.sort(self, axis=axis, kind=kind, order=order)
else:
if self is masked:
return self
if fill_value is None:
if endwith:
# nan > inf
if np.issubdtype(self.dtype, np.floating):
filler = np.nan
else:
filler = minimum_fill_value(self)
else:
filler = maximum_fill_value(self)
else:
filler = fill_value
sidx = self.filled(filler).argsort(axis=axis, kind=kind,
order=order)
# save meshgrid memory for 1d arrays
if self.ndim == 1:
idx = sidx
else:
idx = np.meshgrid(*[np.arange(x) for x in self.shape], sparse=True,
indexing='ij')
idx[axis] = sidx
tmp_mask = self._mask[idx].flat
tmp_data = self._data[idx].flat
self._data.flat = tmp_data
self._mask.flat = tmp_mask
return
def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
"""
Return the minimum along a given axis.
Parameters
----------
axis : {None, int}, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
out : array_like, optional
Alternative output array in which to place the result. Must be of
the same shape and buffer length as the expected output.
fill_value : {var}, optional
Value used to fill in the masked values.
If None, use the output of `minimum_fill_value`.
Returns
-------
amin : array_like
New array holding the result.
If ``out`` was specified, ``out`` is returned.
See Also
--------
minimum_fill_value
Returns the minimum filling value for a given datatype.
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
_mask = self._mask
newmask = _check_mask_axis(_mask, axis, **kwargs)
if fill_value is None:
fill_value = minimum_fill_value(self)
# No explicit output
if out is None:
result = self.filled(fill_value).min(
axis=axis, out=out, **kwargs).view(type(self))
if result.ndim:
# Set the mask
result.__setmask__(newmask)
# Get rid of Infs
if newmask.ndim:
np.copyto(result, result.fill_value, where=newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(fill_value).min(axis=axis, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
if out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or more" \
" location."
raise MaskError(errmsg)
np.copyto(out, np.nan, where=newmask)
return out
# unique to masked arrays
def mini(self, axis=None):
"""
Return the array minimum along the specified axis.
Parameters
----------
axis : int, optional
The axis along which to find the minima. Default is None, in which case
the minimum value in the whole array is returned.
Returns
-------
min : scalar or MaskedArray
If `axis` is None, the result is a scalar. Otherwise, if `axis` is
given and the array is at least 2-D, the result is a masked array with
dimension one smaller than the array on which `mini` is called.
Examples
--------
>>> x = np.ma.array(np.arange(6), mask=[0 ,1, 0, 0, 0 ,1]).reshape(3, 2)
>>> print(x)
[[0 --]
[2 3]
[4 --]]
>>> x.mini()
0
>>> x.mini(axis=0)
masked_array(data = [0 3],
mask = [False False],
fill_value = 999999)
>>> print(x.mini(axis=1))
[0 2 4]
"""
if axis is None:
return minimum(self)
else:
return minimum.reduce(self, axis)
def max(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
"""
Return the maximum along a given axis.
Parameters
----------
axis : {None, int}, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
out : array_like, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
fill_value : {var}, optional
Value used to fill in the masked values.
If None, use the output of maximum_fill_value().
Returns
-------
amax : array_like
New array holding the result.
If ``out`` was specified, ``out`` is returned.
See Also
--------
maximum_fill_value
Returns the maximum filling value for a given datatype.
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
_mask = self._mask
newmask = _check_mask_axis(_mask, axis, **kwargs)
if fill_value is None:
fill_value = maximum_fill_value(self)
# No explicit output
if out is None:
result = self.filled(fill_value).max(
axis=axis, out=out, **kwargs).view(type(self))
if result.ndim:
# Set the mask
result.__setmask__(newmask)
# Get rid of Infs
if newmask.ndim:
np.copyto(result, result.fill_value, where=newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(fill_value).max(axis=axis, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
if out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or more" \
" location."
raise MaskError(errmsg)
np.copyto(out, np.nan, where=newmask)
return out
def ptp(self, axis=None, out=None, fill_value=None):
"""
Return (maximum - minimum) along the given dimension
(i.e. peak-to-peak value).
Parameters
----------
axis : {None, int}, optional
Axis along which to find the peaks. If None (default) the
flattened array is used.
out : {None, array_like}, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
fill_value : {var}, optional
Value used to fill in the masked values.
Returns
-------
ptp : ndarray.
A new array holding the result, unless ``out`` was
specified, in which case a reference to ``out`` is returned.
"""
if out is None:
result = self.max(axis=axis, fill_value=fill_value)
result -= self.min(axis=axis, fill_value=fill_value)
return result
out.flat = self.max(axis=axis, out=out, fill_value=fill_value)
min_value = self.min(axis=axis, fill_value=fill_value)
np.subtract(out, min_value, out=out, casting='unsafe')
return out
def take(self, indices, axis=None, out=None, mode='raise'):
"""
"""
(_data, _mask) = (self._data, self._mask)
cls = type(self)
# Make sure the indices are not masked
maskindices = getattr(indices, '_mask', nomask)
if maskindices is not nomask:
indices = indices.filled(0)
# Get the data, promoting scalars to 0d arrays with [...] so that
# .view works correctly
if out is None:
out = _data.take(indices, axis=axis, mode=mode)[...].view(cls)
else:
np.take(_data, indices, axis=axis, mode=mode, out=out)
# Get the mask
if isinstance(out, MaskedArray):
if _mask is nomask:
outmask = maskindices
else:
outmask = _mask.take(indices, axis=axis, mode=mode)
outmask |= maskindices
out.__setmask__(outmask)
# demote 0d arrays back to scalars, for consistency with ndarray.take
return out[()]
# Array methods
copy = _arraymethod('copy')
diagonal = _arraymethod('diagonal')
transpose = _arraymethod('transpose')
T = property(fget=lambda self: self.transpose())
swapaxes = _arraymethod('swapaxes')
clip = _arraymethod('clip', onmask=False)
copy = _arraymethod('copy')
squeeze = _arraymethod('squeeze')
def tolist(self, fill_value=None):
"""
Return the data portion of the masked array as a hierarchical Python list.
Data items are converted to the nearest compatible Python type.
Masked values are converted to `fill_value`. If `fill_value` is None,
the corresponding entries in the output list will be ``None``.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries. Default is None.
Returns
-------
result : list
The Python list representation of the masked array.
Examples
--------
>>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4)
>>> x.tolist()
[[1, None, 3], [None, 5, None], [7, None, 9]]
>>> x.tolist(-999)
[[1, -999, 3], [-999, 5, -999], [7, -999, 9]]
"""
_mask = self._mask
# No mask ? Just return .data.tolist ?
if _mask is nomask:
return self._data.tolist()
# Explicit fill_value: fill the array and get the list
if fill_value is not None:
return self.filled(fill_value).tolist()
# Structured array.
names = self.dtype.names
if names:
result = self._data.astype([(_, object) for _ in names])
for n in names:
result[n][_mask[n]] = None
return result.tolist()
# Standard arrays.
if _mask is nomask:
return [None]
# Set temps to save time when dealing w/ marrays.
inishape = self.shape
result = np.array(self._data.ravel(), dtype=object)
result[_mask.ravel()] = None
result.shape = inishape
return result.tolist()
def tostring(self, fill_value=None, order='C'):
"""
This function is a compatibility alias for tobytes. Despite its name it
returns bytes not strings.
"""
return self.tobytes(fill_value, order='C')
def tobytes(self, fill_value=None, order='C'):
"""
Return the array data as a string containing the raw bytes in the array.
The array is filled with a fill value before the string conversion.
.. versionadded:: 1.9.0
Parameters
----------
fill_value : scalar, optional
Value used to fill in the masked values. Deafult is None, in which
case `MaskedArray.fill_value` is used.
order : {'C','F','A'}, optional
Order of the data item in the copy. Default is 'C'.
- 'C' -- C order (row major).
- 'F' -- Fortran order (column major).
- 'A' -- Any, current order of array.
- None -- Same as 'A'.
See Also
--------
ndarray.tobytes
tolist, tofile
Notes
-----
As for `ndarray.tobytes`, information about the shape, dtype, etc.,
but also about `fill_value`, will be lost.
Examples
--------
>>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
>>> x.tobytes()
'\\x01\\x00\\x00\\x00?B\\x0f\\x00?B\\x0f\\x00\\x04\\x00\\x00\\x00'
"""
return self.filled(fill_value).tobytes(order=order)
def tofile(self, fid, sep="", format="%s"):
"""
Save a masked array to a file in binary format.
.. warning::
This function is not implemented yet.
Raises
------
NotImplementedError
When `tofile` is called.
"""
raise NotImplementedError("MaskedArray.tofile() not implemented yet.")
def toflex(self):
"""
Transforms a masked array into a flexible-type array.
The flexible type array that is returned will have two fields:
* the ``_data`` field stores the ``_data`` part of the array.
* the ``_mask`` field stores the ``_mask`` part of the array.
Parameters
----------
None
Returns
-------
record : ndarray
A new flexible-type `ndarray` with two fields: the first element
containing a value, the second element containing the corresponding
mask boolean. The returned record shape matches self.shape.
Notes
-----
A side-effect of transforming a masked array into a flexible `ndarray` is
that meta information (``fill_value``, ...) will be lost.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print(x)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print(x.toflex())
[[(1, False) (2, True) (3, False)]
[(4, True) (5, False) (6, True)]
[(7, False) (8, True) (9, False)]]
"""
# Get the basic dtype.
ddtype = self.dtype
# Make sure we have a mask
_mask = self._mask
if _mask is None:
_mask = make_mask_none(self.shape, ddtype)
# And get its dtype
mdtype = self._mask.dtype
record = np.ndarray(shape=self.shape,
dtype=[('_data', ddtype), ('_mask', mdtype)])
record['_data'] = self._data
record['_mask'] = self._mask
return record
torecords = toflex
# Pickling
def __getstate__(self):
"""Return the internal state of the masked array, for pickling
purposes.
"""
cf = 'CF'[self.flags.fnc]
data_state = super(MaskedArray, self).__reduce__()[2]
return data_state + (getmaskarray(self).tobytes(cf), self._fill_value)
def __setstate__(self, state):
"""Restore the internal state of the masked array, for
pickling purposes. ``state`` is typically the output of the
``__getstate__`` output, and is a 5-tuple:
- class name
- a tuple giving the shape of the data
- a typecode for the data
- a binary string for the data
- a binary string for the mask.
"""
(_, shp, typ, isf, raw, msk, flv) = state
super(MaskedArray, self).__setstate__((shp, typ, isf, raw))
self._mask.__setstate__((shp, make_mask_descr(typ), isf, msk))
self.fill_value = flv
def __reduce__(self):
"""Return a 3-tuple for pickling a MaskedArray.
"""
return (_mareconstruct,
(self.__class__, self._baseclass, (0,), 'b',),
self.__getstate__())
def __deepcopy__(self, memo=None):
from copy import deepcopy
copied = MaskedArray.__new__(type(self), self, copy=True)
if memo is None:
memo = {}
memo[id(self)] = copied
for (k, v) in self.__dict__.items():
copied.__dict__[k] = deepcopy(v, memo)
return copied
def _mareconstruct(subtype, baseclass, baseshape, basetype, ):
"""Internal function that builds a new MaskedArray from the
information stored in a pickle.
"""
_data = ndarray.__new__(baseclass, baseshape, basetype)
_mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype))
return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype, )
class mvoid(MaskedArray):
"""
Fake a 'void' object to use for masked array with structured dtypes.
"""
def __new__(self, data, mask=nomask, dtype=None, fill_value=None,
hardmask=False, copy=False, subok=True):
_data = np.array(data, copy=copy, subok=subok, dtype=dtype)
_data = _data.view(self)
_data._hardmask = hardmask
if mask is not nomask:
if isinstance(mask, np.void):
_data._mask = mask
else:
try:
# Mask is already a 0D array
_data._mask = np.void(mask)
except TypeError:
# Transform the mask to a void
mdtype = make_mask_descr(dtype)
_data._mask = np.array(mask, dtype=mdtype)[()]
if fill_value is not None:
_data.fill_value = fill_value
return _data
def _get_data(self):
# Make sure that the _data part is a np.void
return self.view(ndarray)[()]
_data = property(fget=_get_data)
def __getitem__(self, indx):
"""
Get the index.
"""
m = self._mask
if isinstance(m[indx], ndarray):
# Can happen when indx is a multi-dimensional field:
# A = ma.masked_array(data=[([0,1],)], mask=[([True,
# False],)], dtype=[("A", ">i2", (2,))])
# x = A[0]; y = x["A"]; then y.mask["A"].size==2
# and we can not say masked/unmasked.
# The result is no longer mvoid!
# See also issue #6724.
return masked_array(
data=self._data[indx], mask=m[indx],
fill_value=self._fill_value[indx],
hard_mask=self._hardmask)
if m is not nomask and m[indx]:
return masked
return self._data[indx]
def __setitem__(self, indx, value):
self._data[indx] = value
if self._hardmask:
self._mask[indx] |= getattr(value, "_mask", False)
else:
self._mask[indx] = getattr(value, "_mask", False)
def __str__(self):
m = self._mask
if m is nomask:
return self._data.__str__()
printopt = masked_print_option
rdtype = _recursive_make_descr(self._data.dtype, "O")
# temporary hack to fix gh-7493. A more permanent fix
# is proposed in gh-6053, after which the next two
# lines should be changed to
# res = np.array([self._data], dtype=rdtype)
res = np.empty(1, rdtype)
res[:1] = self._data
_recursive_printoption(res, self._mask, printopt)
return str(res[0])
__repr__ = __str__
def __iter__(self):
"Defines an iterator for mvoid"
(_data, _mask) = (self._data, self._mask)
if _mask is nomask:
for d in _data:
yield d
else:
for (d, m) in zip(_data, _mask):
if m:
yield masked
else:
yield d
def __len__(self):
return self._data.__len__()
def filled(self, fill_value=None):
"""
Return a copy with masked fields filled with a given value.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries (None by default).
If None, the `fill_value` attribute is used instead.
Returns
-------
filled_void
A `np.void` object
See Also
--------
MaskedArray.filled
"""
return asarray(self).filled(fill_value)[()]
def tolist(self):
"""
Transforms the mvoid object into a tuple.
Masked fields are replaced by None.
Returns
-------
returned_tuple
Tuple of fields
"""
_mask = self._mask
if _mask is nomask:
return self._data.tolist()
result = []
for (d, m) in zip(self._data, self._mask):
if m:
result.append(None)
else:
# .item() makes sure we return a standard Python object
result.append(d.item())
return tuple(result)
##############################################################################
# Shortcuts #
##############################################################################
def isMaskedArray(x):
"""
Test whether input is an instance of MaskedArray.
This function returns True if `x` is an instance of MaskedArray
and returns False otherwise. Any object is accepted as input.
Parameters
----------
x : object
Object to test.
Returns
-------
result : bool
True if `x` is a MaskedArray.
See Also
--------
isMA : Alias to isMaskedArray.
isarray : Alias to isMaskedArray.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.eye(3, 3)
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> m = ma.masked_values(a, 0)
>>> m
masked_array(data =
[[1.0 -- --]
[-- 1.0 --]
[-- -- 1.0]],
mask =
[[False True True]
[ True False True]
[ True True False]],
fill_value=0.0)
>>> ma.isMaskedArray(a)
False
>>> ma.isMaskedArray(m)
True
>>> ma.isMaskedArray([0, 1, 2])
False
"""
return isinstance(x, MaskedArray)
isarray = isMaskedArray
isMA = isMaskedArray # backward compatibility
class MaskedConstant(MaskedArray):
# We define the masked singleton as a float for higher precedence.
# Note that it can be tricky sometimes w/ type comparison
_data = data = np.array(0.)
_mask = mask = np.array(True)
_baseclass = ndarray
def __new__(self):
return self._data.view(self)
def __array_finalize__(self, obj):
return
def __array_wrap__(self, obj):
return self
def __str__(self):
return str(masked_print_option._display)
def __repr__(self):
return 'masked'
def flatten(self):
return masked_array([self._data], dtype=float, mask=[True])
def __reduce__(self):
"""Override of MaskedArray's __reduce__.
"""
return (self.__class__, ())
masked = masked_singleton = MaskedConstant()
masked_array = MaskedArray
def array(data, dtype=None, copy=False, order=None,
mask=nomask, fill_value=None, keep_mask=True,
hard_mask=False, shrink=True, subok=True, ndmin=0):
"""
Shortcut to MaskedArray.
The options are in a different order for convenience and backwards
compatibility.
"""
return MaskedArray(data, mask=mask, dtype=dtype, copy=copy,
subok=subok, keep_mask=keep_mask,
hard_mask=hard_mask, fill_value=fill_value,
ndmin=ndmin, shrink=shrink, order=order)
array.__doc__ = masked_array.__doc__
def is_masked(x):
"""
Determine whether input has masked values.
Accepts any object as input, but always returns False unless the
input is a MaskedArray containing masked values.
Parameters
----------
x : array_like
Array to check for masked values.
Returns
-------
result : bool
True if `x` is a MaskedArray with masked values, False otherwise.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> x
masked_array(data = [-- 1 -- 2 3],
mask = [ True False True False False],
fill_value=999999)
>>> ma.is_masked(x)
True
>>> x = ma.masked_equal([0, 1, 0, 2, 3], 42)
>>> x
masked_array(data = [0 1 0 2 3],
mask = False,
fill_value=999999)
>>> ma.is_masked(x)
False
Always returns False if `x` isn't a MaskedArray.
>>> x = [False, True, False]
>>> ma.is_masked(x)
False
>>> x = 'a string'
>>> ma.is_masked(x)
False
"""
m = getmask(x)
if m is nomask:
return False
elif m.any():
return True
return False
##############################################################################
# Extrema functions #
##############################################################################
class _extrema_operation(object):
"""
Generic class for maximum/minimum functions.
.. note::
This is the base class for `_maximum_operation` and
`_minimum_operation`.
"""
def __call__(self, a, b=None):
"Executes the call behavior."
if b is None:
return self.reduce(a)
return where(self.compare(a, b), a, b)
def reduce(self, target, axis=None):
"Reduce target along the given axis."
target = narray(target, copy=False, subok=True)
m = getmask(target)
if axis is not None:
kargs = {'axis': axis}
else:
kargs = {}
target = target.ravel()
if not (m is nomask):
m = m.ravel()
if m is nomask:
t = self.ufunc.reduce(target, **kargs)
else:
target = target.filled(
self.fill_value_func(target)).view(type(target))
t = self.ufunc.reduce(target, **kargs)
m = umath.logical_and.reduce(m, **kargs)
if hasattr(t, '_mask'):
t._mask = m
elif m:
t = masked
return t
def outer(self, a, b):
"Return the function applied to the outer product of a and b."
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = logical_or.outer(ma, mb)
result = self.ufunc.outer(filled(a), filled(b))
if not isinstance(result, MaskedArray):
result = result.view(MaskedArray)
result._mask = m
return result
class _minimum_operation(_extrema_operation):
"Object to calculate minima"
def __init__(self):
"""minimum(a, b) or minimum(a)
In one argument case, returns the scalar minimum.
"""
self.ufunc = umath.minimum
self.afunc = amin
self.compare = less
self.fill_value_func = minimum_fill_value
class _maximum_operation(_extrema_operation):
"Object to calculate maxima"
def __init__(self):
"""maximum(a, b) or maximum(a)
In one argument case returns the scalar maximum.
"""
self.ufunc = umath.maximum
self.afunc = amax
self.compare = greater
self.fill_value_func = maximum_fill_value
def min(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
try:
return obj.min(axis=axis, fill_value=fill_value, out=out, **kwargs)
except (AttributeError, TypeError):
# If obj doesn't have a min method, or if the method doesn't accept a
# fill_value argument
return asanyarray(obj).min(axis=axis, fill_value=fill_value,
out=out, **kwargs)
min.__doc__ = MaskedArray.min.__doc__
def max(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
try:
return obj.max(axis=axis, fill_value=fill_value, out=out, **kwargs)
except (AttributeError, TypeError):
# If obj doesn't have a max method, or if the method doesn't accept a
# fill_value argument
return asanyarray(obj).max(axis=axis, fill_value=fill_value,
out=out, **kwargs)
max.__doc__ = MaskedArray.max.__doc__
def ptp(obj, axis=None, out=None, fill_value=None):
"""
a.ptp(axis=None) = a.max(axis) - a.min(axis)
"""
try:
return obj.ptp(axis, out=out, fill_value=fill_value)
except (AttributeError, TypeError):
# If obj doesn't have a ptp method or if the method doesn't accept
# a fill_value argument
return asanyarray(obj).ptp(axis=axis, fill_value=fill_value, out=out)
ptp.__doc__ = MaskedArray.ptp.__doc__
##############################################################################
# Definition of functions from the corresponding methods #
##############################################################################
class _frommethod:
"""
Define functions from existing MaskedArray methods.
Parameters
----------
methodname : str
Name of the method to transform.
"""
def __init__(self, methodname, reversed=False):
self.__name__ = methodname
self.__doc__ = self.getdoc()
self.reversed = reversed
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
meth = getattr(MaskedArray, self.__name__, None) or \
getattr(np, self.__name__, None)
signature = self.__name__ + get_object_signature(meth)
if meth is not None:
doc = """ %s\n%s""" % (
signature, getattr(meth, '__doc__', None))
return doc
def __call__(self, a, *args, **params):
if self.reversed:
args = list(args)
arr = args[0]
args[0] = a
a = arr
# Get the method from the array (if possible)
method_name = self.__name__
method = getattr(a, method_name, None)
if method is not None:
return method(*args, **params)
# Still here ? Then a is not a MaskedArray
method = getattr(MaskedArray, method_name, None)
if method is not None:
return method(MaskedArray(a), *args, **params)
# Still here ? OK, let's call the corresponding np function
method = getattr(np, method_name)
return method(a, *args, **params)
all = _frommethod('all')
anomalies = anom = _frommethod('anom')
any = _frommethod('any')
compress = _frommethod('compress', reversed=True)
cumprod = _frommethod('cumprod')
cumsum = _frommethod('cumsum')
copy = _frommethod('copy')
diagonal = _frommethod('diagonal')
harden_mask = _frommethod('harden_mask')
ids = _frommethod('ids')
maximum = _maximum_operation()
mean = _frommethod('mean')
minimum = _minimum_operation()
nonzero = _frommethod('nonzero')
prod = _frommethod('prod')
product = _frommethod('prod')
ravel = _frommethod('ravel')
repeat = _frommethod('repeat')
shrink_mask = _frommethod('shrink_mask')
soften_mask = _frommethod('soften_mask')
std = _frommethod('std')
sum = _frommethod('sum')
swapaxes = _frommethod('swapaxes')
# take = _frommethod('take')
trace = _frommethod('trace')
var = _frommethod('var')
count = _frommethod('count')
def take(a, indices, axis=None, out=None, mode='raise'):
"""
"""
a = masked_array(a)
return a.take(indices, axis=axis, out=out, mode=mode)
def power(a, b, third=None):
"""
Returns element-wise base array raised to power from second array.
This is the masked array version of `numpy.power`. For details see
`numpy.power`.
See Also
--------
numpy.power
Notes
-----
The *out* argument to `numpy.power` is not supported, `third` has to be
None.
"""
if third is not None:
raise MaskError("3-argument power not supported.")
# Get the masks
ma = getmask(a)
mb = getmask(b)
m = mask_or(ma, mb)
# Get the rawdata
fa = getdata(a)
fb = getdata(b)
# Get the type of the result (so that we preserve subclasses)
if isinstance(a, MaskedArray):
basetype = type(a)
else:
basetype = MaskedArray
# Get the result and view it as a (subclass of) MaskedArray
with np.errstate(divide='ignore', invalid='ignore'):
result = np.where(m, fa, umath.power(fa, fb)).view(basetype)
result._update_from(a)
# Find where we're in trouble w/ NaNs and Infs
invalid = np.logical_not(np.isfinite(result.view(ndarray)))
# Add the initial mask
if m is not nomask:
if not (result.ndim):
return masked
result._mask = np.logical_or(m, invalid)
# Fix the invalid parts
if invalid.any():
if not result.ndim:
return masked
elif result._mask is nomask:
result._mask = invalid
result._data[invalid] = result.fill_value
return result
def argsort(a, axis=None, kind='quicksort', order=None, fill_value=None):
"Function version of the eponymous method."
if fill_value is None:
fill_value = default_fill_value(a)
d = filled(a, fill_value)
if axis is None:
return d.argsort(kind=kind, order=order)
return d.argsort(axis, kind=kind, order=order)
argsort.__doc__ = MaskedArray.argsort.__doc__
argmin = _frommethod('argmin')
argmax = _frommethod('argmax')
def sort(a, axis=-1, kind='quicksort', order=None, endwith=True, fill_value=None):
"Function version of the eponymous method."
a = narray(a, copy=True, subok=True)
if axis is None:
a = a.flatten()
axis = 0
if fill_value is None:
if endwith:
# nan > inf
if np.issubdtype(a.dtype, np.floating):
filler = np.nan
else:
filler = minimum_fill_value(a)
else:
filler = maximum_fill_value(a)
else:
filler = fill_value
sindx = filled(a, filler).argsort(axis=axis, kind=kind, order=order)
# save meshgrid memory for 1d arrays
if a.ndim == 1:
indx = sindx
else:
indx = np.meshgrid(*[np.arange(x) for x in a.shape], sparse=True,
indexing='ij')
indx[axis] = sindx
return a[indx]
sort.__doc__ = MaskedArray.sort.__doc__
def compressed(x):
"""
Return all the non-masked data as a 1-D array.
This function is equivalent to calling the "compressed" method of a
`MaskedArray`, see `MaskedArray.compressed` for details.
See Also
--------
MaskedArray.compressed
Equivalent method.
"""
if not isinstance(x, MaskedArray):
x = asanyarray(x)
return x.compressed()
def concatenate(arrays, axis=0):
"""
Concatenate a sequence of arrays along the given axis.
Parameters
----------
arrays : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
result : MaskedArray
The concatenated array with any masked entries preserved.
See Also
--------
numpy.concatenate : Equivalent function in the top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.arange(3)
>>> a[1] = ma.masked
>>> b = ma.arange(2, 5)
>>> a
masked_array(data = [0 -- 2],
mask = [False True False],
fill_value = 999999)
>>> b
masked_array(data = [2 3 4],
mask = False,
fill_value = 999999)
>>> ma.concatenate([a, b])
masked_array(data = [0 -- 2 2 3 4],
mask = [False True False False False False],
fill_value = 999999)
"""
d = np.concatenate([getdata(a) for a in arrays], axis)
rcls = get_masked_subclass(*arrays)
data = d.view(rcls)
# Check whether one of the arrays has a non-empty mask.
for x in arrays:
if getmask(x) is not nomask:
break
else:
return data
# OK, so we have to concatenate the masks
dm = np.concatenate([getmaskarray(a) for a in arrays], axis)
# If we decide to keep a '_shrinkmask' option, we want to check that
# all of them are True, and then check for dm.any()
if not dm.dtype.fields and not dm.any():
data._mask = nomask
else:
data._mask = dm.reshape(d.shape)
return data
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
This function is the equivalent of `numpy.diag` that takes masked
values into account, see `numpy.diag` for details.
See Also
--------
numpy.diag : Equivalent function for ndarrays.
"""
output = np.diag(v, k).view(MaskedArray)
if getmask(v) is not nomask:
output._mask = np.diag(v._mask, k)
return output
def expand_dims(x, axis):
"""
Expand the shape of an array.
Expands the shape of the array by including a new axis before the one
specified by the `axis` parameter. This function behaves the same as
`numpy.expand_dims` but preserves masked elements.
See Also
--------
numpy.expand_dims : Equivalent function in top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.array([1, 2, 4])
>>> x[1] = ma.masked
>>> x
masked_array(data = [1 -- 4],
mask = [False True False],
fill_value = 999999)
>>> np.expand_dims(x, axis=0)
array([[1, 2, 4]])
>>> ma.expand_dims(x, axis=0)
masked_array(data =
[[1 -- 4]],
mask =
[[False True False]],
fill_value = 999999)
The same result can be achieved using slicing syntax with `np.newaxis`.
>>> x[np.newaxis, :]
masked_array(data =
[[1 -- 4]],
mask =
[[False True False]],
fill_value = 999999)
"""
result = n_expand_dims(x, axis)
if isinstance(x, MaskedArray):
new_shape = result.shape
result = x.view()
result.shape = new_shape
if result._mask is not nomask:
result._mask.shape = new_shape
return result
def left_shift(a, n):
"""
Shift the bits of an integer to the left.
This is the masked array version of `numpy.left_shift`, for details
see that function.
See Also
--------
numpy.left_shift
"""
m = getmask(a)
if m is nomask:
d = umath.left_shift(filled(a), n)
return masked_array(d)
else:
d = umath.left_shift(filled(a, 0), n)
return masked_array(d, mask=m)
def right_shift(a, n):
"""
Shift the bits of an integer to the right.
This is the masked array version of `numpy.right_shift`, for details
see that function.
See Also
--------
numpy.right_shift
"""
m = getmask(a)
if m is nomask:
d = umath.right_shift(filled(a), n)
return masked_array(d)
else:
d = umath.right_shift(filled(a, 0), n)
return masked_array(d, mask=m)
def put(a, indices, values, mode='raise'):
"""
Set storage-indexed locations to corresponding values.
This function is equivalent to `MaskedArray.put`, see that method
for details.
See Also
--------
MaskedArray.put
"""
# We can't use 'frommethod', the order of arguments is different
try:
return a.put(indices, values, mode=mode)
except AttributeError:
return narray(a, copy=False).put(indices, values, mode=mode)
def putmask(a, mask, values): # , mode='raise'):
"""
Changes elements of an array based on conditional and input values.
This is the masked array version of `numpy.putmask`, for details see
`numpy.putmask`.
See Also
--------
numpy.putmask
Notes
-----
Using a masked array as `values` will **not** transform a `ndarray` into
a `MaskedArray`.
"""
# We can't use 'frommethod', the order of arguments is different
if not isinstance(a, MaskedArray):
a = a.view(MaskedArray)
(valdata, valmask) = (getdata(values), getmask(values))
if getmask(a) is nomask:
if valmask is not nomask:
a._sharedmask = True
a._mask = make_mask_none(a.shape, a.dtype)
np.copyto(a._mask, valmask, where=mask)
elif a._hardmask:
if valmask is not nomask:
m = a._mask.copy()
np.copyto(m, valmask, where=mask)
a.mask |= m
else:
if valmask is nomask:
valmask = getmaskarray(values)
np.copyto(a._mask, valmask, where=mask)
np.copyto(a._data, valdata, where=mask)
return
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
This function is exactly equivalent to `numpy.transpose`.
See Also
--------
numpy.transpose : Equivalent function in top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.arange(4).reshape((2,2))
>>> x[1, 1] = ma.masked
>>>> x
masked_array(data =
[[0 1]
[2 --]],
mask =
[[False False]
[False True]],
fill_value = 999999)
>>> ma.transpose(x)
masked_array(data =
[[0 2]
[1 --]],
mask =
[[False False]
[False True]],
fill_value = 999999)
"""
# We can't use 'frommethod', as 'transpose' doesn't take keywords
try:
return a.transpose(axes)
except AttributeError:
return narray(a, copy=False).transpose(axes).view(MaskedArray)
def reshape(a, new_shape, order='C'):
"""
Returns an array containing the same data with a new shape.
Refer to `MaskedArray.reshape` for full documentation.
See Also
--------
MaskedArray.reshape : equivalent function
"""
# We can't use 'frommethod', it whine about some parameters. Dmmit.
try:
return a.reshape(new_shape, order=order)
except AttributeError:
_tmp = narray(a, copy=False).reshape(new_shape, order=order)
return _tmp.view(MaskedArray)
def resize(x, new_shape):
"""
Return a new masked array with the specified size and shape.
This is the masked equivalent of the `numpy.resize` function. The new
array is filled with repeated copies of `x` (in the order that the
data are stored in memory). If `x` is masked, the new array will be
masked, and the new mask will be a repetition of the old one.
See Also
--------
numpy.resize : Equivalent function in the top level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.array([[1, 2] ,[3, 4]])
>>> a[0, 1] = ma.masked
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value = 999999)
>>> np.resize(a, (3, 3))
array([[1, 2, 3],
[4, 1, 2],
[3, 4, 1]])
>>> ma.resize(a, (3, 3))
masked_array(data =
[[1 -- 3]
[4 1 --]
[3 4 1]],
mask =
[[False True False]
[False False True]
[False False False]],
fill_value = 999999)
A MaskedArray is always returned, regardless of the input type.
>>> a = np.array([[1, 2] ,[3, 4]])
>>> ma.resize(a, (3, 3))
masked_array(data =
[[1 2 3]
[4 1 2]
[3 4 1]],
mask =
False,
fill_value = 999999)
"""
# We can't use _frommethods here, as N.resize is notoriously whiny.
m = getmask(x)
if m is not nomask:
m = np.resize(m, new_shape)
result = np.resize(x, new_shape).view(get_masked_subclass(x))
if result.ndim:
result._mask = m
return result
def rank(obj):
"""
maskedarray version of the numpy function.
.. note::
Deprecated since 1.10.0
"""
# 2015-04-12, 1.10.0
warnings.warn(
"`rank` is deprecated; use the `ndim` function instead. ",
np.VisibleDeprecationWarning, stacklevel=2)
return np.ndim(getdata(obj))
rank.__doc__ = np.rank.__doc__
def ndim(obj):
"""
maskedarray version of the numpy function.
"""
return np.ndim(getdata(obj))
ndim.__doc__ = np.ndim.__doc__
def shape(obj):
"maskedarray version of the numpy function."
return np.shape(getdata(obj))
shape.__doc__ = np.shape.__doc__
def size(obj, axis=None):
"maskedarray version of the numpy function."
return np.size(getdata(obj), axis)
size.__doc__ = np.size.__doc__
##############################################################################
# Extra functions #
##############################################################################
def where(condition, x=_NoValue, y=_NoValue):
"""
Return a masked array with elements from x or y, depending on condition.
Returns a masked array, shaped like condition, where the elements
are from `x` when `condition` is True, and from `y` otherwise.
If neither `x` nor `y` are given, the function returns a tuple of
indices where `condition` is True (the result of
``condition.nonzero()``).
Parameters
----------
condition : array_like, bool
The condition to meet. For each True element, yield the corresponding
element from `x`, otherwise from `y`.
x, y : array_like, optional
Values from which to choose. `x` and `y` need to have the same shape
as condition, or be broadcast-able to that shape.
Returns
-------
out : MaskedArray or tuple of ndarrays
The resulting masked array if `x` and `y` were given, otherwise
the result of ``condition.nonzero()``.
See Also
--------
numpy.where : Equivalent function in the top-level NumPy module.
Examples
--------
>>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0],
... [1, 0, 1],
... [0, 1, 0]])
>>> print(x)
[[0.0 -- 2.0]
[-- 4.0 --]
[6.0 -- 8.0]]
>>> np.ma.where(x > 5) # return the indices where x > 5
(array([2, 2]), array([0, 2]))
>>> print(np.ma.where(x > 5, x, -3.1416))
[[-3.1416 -- -3.1416]
[-- -3.1416 --]
[6.0 -- 8.0]]
"""
missing = (x is _NoValue, y is _NoValue).count(True)
if missing == 1:
raise ValueError("Must provide both 'x' and 'y' or neither.")
if missing == 2:
return filled(condition, 0).nonzero()
# Both x and y are provided
# Get the condition
fc = filled(condition, 0).astype(MaskType)
notfc = np.logical_not(fc)
# Get the data
xv = getdata(x)
yv = getdata(y)
if x is masked:
ndtype = yv.dtype
elif y is masked:
ndtype = xv.dtype
else:
ndtype = np.find_common_type([xv.dtype, yv.dtype], [])
# Construct an empty array and fill it
d = np.empty(fc.shape, dtype=ndtype).view(MaskedArray)
np.copyto(d._data, xv.astype(ndtype), where=fc)
np.copyto(d._data, yv.astype(ndtype), where=notfc)
# Create an empty mask and fill it
mask = np.zeros(fc.shape, dtype=MaskType)
np.copyto(mask, getmask(x), where=fc)
np.copyto(mask, getmask(y), where=notfc)
mask |= getmaskarray(condition)
# Use d._mask instead of d.mask to avoid copies
d._mask = mask if mask.any() else nomask
return d
def choose(indices, choices, out=None, mode='raise'):
"""
Use an index array to construct a new array from a set of choices.
Given an array of integers and a set of n choice arrays, this method
will create a new array that merges each of the choice arrays. Where a
value in `a` is i, the new array will have the value that choices[i]
contains in the same place.
Parameters
----------
a : ndarray of ints
This array must contain integers in ``[0, n-1]``, where n is the
number of choices.
choices : sequence of arrays
Choice arrays. The index array and all of the choices should be
broadcastable to the same shape.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and `dtype`.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' : raise an error
* 'wrap' : wrap around
* 'clip' : clip to the range
Returns
-------
merged_array : array
See Also
--------
choose : equivalent function
Examples
--------
>>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]])
>>> a = np.array([2, 1, 0])
>>> np.ma.choose(a, choice)
masked_array(data = [3 2 1],
mask = False,
fill_value=999999)
"""
def fmask(x):
"Returns the filled array, or True if masked."
if x is masked:
return True
return filled(x)
def nmask(x):
"Returns the mask, True if ``masked``, False if ``nomask``."
if x is masked:
return True
return getmask(x)
# Get the indices.
c = filled(indices, 0)
# Get the masks.
masks = [nmask(x) for x in choices]
data = [fmask(x) for x in choices]
# Construct the mask
outputmask = np.choose(c, masks, mode=mode)
outputmask = make_mask(mask_or(outputmask, getmask(indices)),
copy=0, shrink=True)
# Get the choices.
d = np.choose(c, data, mode=mode, out=out).view(MaskedArray)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(outputmask)
return out
d.__setmask__(outputmask)
return d
def round_(a, decimals=0, out=None):
"""
Return a copy of a, rounded to 'decimals' places.
When 'decimals' is negative, it specifies the number of positions
to the left of the decimal point. The real and imaginary parts of
complex numbers are rounded separately. Nothing is done if the
array is not of float type and 'decimals' is greater than or equal
to 0.
Parameters
----------
decimals : int
Number of decimals to round to. May be negative.
out : array_like
Existing array to use for output.
If not given, returns a default copy of a.
Notes
-----
If out is given and does not have a mask attribute, the mask of a
is lost!
"""
if out is None:
return np.round_(a, decimals, out)
else:
np.round_(getdata(a), decimals, out)
if hasattr(out, '_mask'):
out._mask = getmask(a)
return out
round = round_
# Needed by dot, so move here from extras.py. It will still be exported
# from extras.py for compatibility.
def mask_rowcols(a, axis=None):
"""
Mask rows and/or columns of a 2D array that contain masked values.
Mask whole rows and/or columns of a 2D array that contain
masked values. The masking behavior is selected using the
`axis` parameter.
- If `axis` is None, rows *and* columns are masked.
- If `axis` is 0, only rows are masked.
- If `axis` is 1 or -1, only columns are masked.
Parameters
----------
a : array_like, MaskedArray
The array to mask. If not a MaskedArray instance (or if no array
elements are masked). The result is a MaskedArray with `mask` set
to `nomask` (False). Must be a 2D array.
axis : int, optional
Axis along which to perform the operation. If None, applies to a
flattened version of the array.
Returns
-------
a : MaskedArray
A modified version of the input array, masked depending on the value
of the `axis` parameter.
Raises
------
NotImplementedError
If input array `a` is not 2D.
See Also
--------
mask_rows : Mask rows of a 2D array that contain masked values.
mask_cols : Mask cols of a 2D array that contain masked values.
masked_where : Mask where a condition is met.
Notes
-----
The input array's mask is modified by this function.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.zeros((3, 3), dtype=np.int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
masked_array(data =
[[0 0 0]
[0 -- 0]
[0 0 0]],
mask =
[[False False False]
[False True False]
[False False False]],
fill_value=999999)
>>> ma.mask_rowcols(a)
masked_array(data =
[[0 -- 0]
[-- -- --]
[0 -- 0]],
mask =
[[False True False]
[ True True True]
[False True False]],
fill_value=999999)
"""
a = array(a, subok=False)
if a.ndim != 2:
raise NotImplementedError("mask_rowcols works for 2D arrays only.")
m = getmask(a)
# Nothing is masked: return a
if m is nomask or not m.any():
return a
maskedval = m.nonzero()
a._mask = a._mask.copy()
if not axis:
a[np.unique(maskedval[0])] = masked
if axis in [None, 1, -1]:
a[:, np.unique(maskedval[1])] = masked
return a
# Include masked dot here to avoid import problems in getting it from
# extras.py. Note that it is not included in __all__, but rather exported
# from extras in order to avoid backward compatibility problems.
def dot(a, b, strict=False, out=None):
"""
Return the dot product of two arrays.
This function is the equivalent of `numpy.dot` that takes masked values
into account. Note that `strict` and `out` are in different position
than in the method version. In order to maintain compatibility with the
corresponding method, it is recommended that the optional arguments be
treated as keyword only. At some point that may be mandatory.
.. note::
Works only with 2-D arrays at the moment.
Parameters
----------
a, b : masked_array_like
Inputs arrays.
strict : bool, optional
Whether masked data are propagated (True) or set to 0 (False) for
the computation. Default is False. Propagating the mask means that
if a masked value appears in a row or column, the whole row or
column is considered masked.
out : masked_array, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
.. versionadded:: 1.10.2
See Also
--------
numpy.dot : Equivalent function for ndarrays.
Examples
--------
>>> a = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]])
>>> b = ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]])
>>> np.ma.dot(a, b)
masked_array(data =
[[21 26]
[45 64]],
mask =
[[False False]
[False False]],
fill_value = 999999)
>>> np.ma.dot(a, b, strict=True)
masked_array(data =
[[-- --]
[-- 64]],
mask =
[[ True True]
[ True False]],
fill_value = 999999)
"""
# !!!: Works only with 2D arrays. There should be a way to get it to run
# with higher dimension
if strict and (a.ndim == 2) and (b.ndim == 2):
a = mask_rowcols(a, 0)
b = mask_rowcols(b, 1)
am = ~getmaskarray(a)
bm = ~getmaskarray(b)
if out is None:
d = np.dot(filled(a, 0), filled(b, 0))
m = ~np.dot(am, bm)
if d.ndim == 0:
d = np.asarray(d)
r = d.view(get_masked_subclass(a, b))
r.__setmask__(m)
return r
else:
d = np.dot(filled(a, 0), filled(b, 0), out._data)
if out.mask.shape != d.shape:
out._mask = np.empty(d.shape, MaskType)
np.dot(am, bm, out._mask)
np.logical_not(out._mask, out._mask)
return out
def inner(a, b):
"""
Returns the inner product of a and b for arrays of floating point types.
Like the generic NumPy equivalent the product sum is over the last dimension
of a and b.
Notes
-----
The first argument is not conjugated.
"""
fa = filled(a, 0)
fb = filled(b, 0)
if len(fa.shape) == 0:
fa.shape = (1,)
if len(fb.shape) == 0:
fb.shape = (1,)
return np.inner(fa, fb).view(MaskedArray)
inner.__doc__ = doc_note(np.inner.__doc__,
"Masked values are replaced by 0.")
innerproduct = inner
def outer(a, b):
"maskedarray version of the numpy function."
fa = filled(a, 0).ravel()
fb = filled(b, 0).ravel()
d = np.outer(fa, fb)
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
return masked_array(d)
ma = getmaskarray(a)
mb = getmaskarray(b)
m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=0)
return masked_array(d, mask=m)
outer.__doc__ = doc_note(np.outer.__doc__,
"Masked values are replaced by 0.")
outerproduct = outer
def _convolve_or_correlate(f, a, v, mode, propagate_mask):
"""
Helper function for ma.correlate and ma.convolve
"""
if propagate_mask:
# results which are contributed to by either item in any pair being invalid
mask = (
f(getmaskarray(a), np.ones(np.shape(v), dtype=np.bool), mode=mode)
| f(np.ones(np.shape(a), dtype=np.bool), getmaskarray(v), mode=mode)
)
data = f(getdata(a), getdata(v), mode=mode)
else:
# results which are not contributed to by any pair of valid elements
mask = ~f(~getmaskarray(a), ~getmaskarray(v))
data = f(filled(a, 0), filled(v, 0), mode=mode)
return masked_array(data, mask=mask)
def correlate(a, v, mode='valid', propagate_mask=True):
"""
Cross-correlation of two 1-dimensional sequences.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `np.convolve` docstring. Note that the default
is 'valid', unlike `convolve`, which uses 'full'.
propagate_mask : bool
If True, then a result element is masked if any masked element contributes towards it.
If False, then a result element is only masked if no non-masked element
contribute towards it
Returns
-------
out : MaskedArray
Discrete cross-correlation of `a` and `v`.
See Also
--------
numpy.correlate : Equivalent function in the top-level NumPy module.
"""
return _convolve_or_correlate(np.correlate, a, v, mode, propagate_mask)
def convolve(a, v, mode='full', propagate_mask=True):
"""
Returns the discrete, linear convolution of two one-dimensional sequences.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `np.convolve` docstring.
propagate_mask : bool
If True, then if any masked element is included in the sum for a result
element, then the result is masked.
If False, then the result element is only masked if no non-masked cells
contribute towards it
Returns
-------
out : MaskedArray
Discrete, linear convolution of `a` and `v`.
See Also
--------
numpy.convolve : Equivalent function in the top-level NumPy module.
"""
return _convolve_or_correlate(np.convolve, a, v, mode, propagate_mask)
def allequal(a, b, fill_value=True):
"""
Return True if all entries of a and b are equal, using
fill_value as a truth value where either or both are masked.
Parameters
----------
a, b : array_like
Input arrays to compare.
fill_value : bool, optional
Whether masked values in a or b are considered equal (True) or not
(False).
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance, False otherwise. If either array contains NaN,
then False is returned.
See Also
--------
all, any
numpy.ma.allclose
Examples
--------
>>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
>>> a
masked_array(data = [10000000000.0 1e-07 --],
mask = [False False True],
fill_value=1e+20)
>>> b = array([1e10, 1e-7, -42.0])
>>> b
array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01])
>>> ma.allequal(a, b, fill_value=False)
False
>>> ma.allequal(a, b)
True
"""
m = mask_or(getmask(a), getmask(b))
if m is nomask:
x = getdata(a)
y = getdata(b)
d = umath.equal(x, y)
return d.all()
elif fill_value:
x = getdata(a)
y = getdata(b)
d = umath.equal(x, y)
dm = array(d, mask=m, copy=False)
return dm.filled(True).all(None)
else:
return False
def allclose(a, b, masked_equal=True, rtol=1e-5, atol=1e-8):
"""
Returns True if two arrays are element-wise equal within a tolerance.
This function is equivalent to `allclose` except that masked values
are treated as equal (default) or unequal, depending on the `masked_equal`
argument.
Parameters
----------
a, b : array_like
Input arrays to compare.
masked_equal : bool, optional
Whether masked values in `a` and `b` are considered equal (True) or not
(False). They are considered equal by default.
rtol : float, optional
Relative tolerance. The relative difference is equal to ``rtol * b``.
Default is 1e-5.
atol : float, optional
Absolute tolerance. The absolute difference is equal to `atol`.
Default is 1e-8.
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance, False otherwise. If either array contains NaN, then
False is returned.
See Also
--------
all, any
numpy.allclose : the non-masked `allclose`.
Notes
-----
If the following equation is element-wise True, then `allclose` returns
True::
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
Return True if all elements of `a` and `b` are equal subject to
given tolerances.
Examples
--------
>>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
>>> a
masked_array(data = [10000000000.0 1e-07 --],
mask = [False False True],
fill_value = 1e+20)
>>> b = ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1])
>>> ma.allclose(a, b)
False
>>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
>>> b = ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1])
>>> ma.allclose(a, b)
True
>>> ma.allclose(a, b, masked_equal=False)
False
Masked values are not compared directly.
>>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
>>> b = ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1])
>>> ma.allclose(a, b)
True
>>> ma.allclose(a, b, masked_equal=False)
False
"""
x = masked_array(a, copy=False)
y = masked_array(b, copy=False)
# make sure y is an inexact type to avoid abs(MIN_INT); will cause
# casting of x later.
dtype = np.result_type(y, 1.)
if y.dtype != dtype:
y = masked_array(y, dtype=dtype, copy=False)
m = mask_or(getmask(x), getmask(y))
xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False)
# If we have some infs, they should fall at the same place.
if not np.all(xinf == filled(np.isinf(y), False)):
return False
# No infs at all
if not np.any(xinf):
d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)),
masked_equal)
return np.all(d)
if not np.all(filled(x[xinf] == y[xinf], masked_equal)):
return False
x = x[~xinf]
y = y[~xinf]
d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)),
masked_equal)
return np.all(d)
def asarray(a, dtype=None, order=None):
"""
Convert the input to a masked array of the given data-type.
No copy is performed if the input is already an `ndarray`. If `a` is
a subclass of `MaskedArray`, a base class `MaskedArray` is returned.
Parameters
----------
a : array_like
Input data, in any form that can be converted to a masked array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists, ndarrays and masked arrays.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
Masked array interpretation of `a`.
See Also
--------
asanyarray : Similar to `asarray`, but conserves subclasses.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.]])
>>> np.ma.asarray(x)
masked_array(data =
[[ 0. 1. 2. 3. 4.]
[ 5. 6. 7. 8. 9.]],
mask =
False,
fill_value = 1e+20)
>>> type(np.ma.asarray(x))
<class 'numpy.ma.core.MaskedArray'>
"""
order = order or 'C'
return masked_array(a, dtype=dtype, copy=False, keep_mask=True,
subok=False, order=order)
def asanyarray(a, dtype=None):
"""
Convert the input to a masked array, conserving subclasses.
If `a` is a subclass of `MaskedArray`, its class is conserved.
No copy is performed if the input is already an `ndarray`.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
MaskedArray interpretation of `a`.
See Also
--------
asarray : Similar to `asanyarray`, but does not conserve subclass.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.]])
>>> np.ma.asanyarray(x)
masked_array(data =
[[ 0. 1. 2. 3. 4.]
[ 5. 6. 7. 8. 9.]],
mask =
False,
fill_value = 1e+20)
>>> type(np.ma.asanyarray(x))
<class 'numpy.ma.core.MaskedArray'>
"""
return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True)
##############################################################################
# Pickling #
##############################################################################
def dump(a, F):
"""
Pickle a masked array to a file.
This is a wrapper around ``cPickle.dump``.
Parameters
----------
a : MaskedArray
The array to be pickled.
F : str or file-like object
The file to pickle `a` to. If a string, the full path to the file.
"""
if not hasattr(F, 'readline'):
F = open(F, 'w')
return pickle.dump(a, F)
def dumps(a):
"""
Return a string corresponding to the pickling of a masked array.
This is a wrapper around ``cPickle.dumps``.
Parameters
----------
a : MaskedArray
The array for which the string representation of the pickle is
returned.
"""
return pickle.dumps(a)
def load(F):
"""
Wrapper around ``cPickle.load`` which accepts either a file-like object
or a filename.
Parameters
----------
F : str or file
The file or file name to load.
See Also
--------
dump : Pickle an array
Notes
-----
This is different from `numpy.load`, which does not use cPickle but loads
the NumPy binary .npy format.
"""
if not hasattr(F, 'readline'):
F = open(F, 'r')
return pickle.load(F)
def loads(strg):
"""
Load a pickle from the current string.
The result of ``cPickle.loads(strg)`` is returned.
Parameters
----------
strg : str
The string to load.
See Also
--------
dumps : Return a string corresponding to the pickling of a masked array.
"""
return pickle.loads(strg)
def fromfile(file, dtype=float, count=-1, sep=''):
raise NotImplementedError(
"fromfile() not yet implemented for a MaskedArray.")
def fromflex(fxarray):
"""
Build a masked array from a suitable flexible-type array.
The input array has to have a data-type with ``_data`` and ``_mask``
fields. This type of array is output by `MaskedArray.toflex`.
Parameters
----------
fxarray : ndarray
The structured input array, containing ``_data`` and ``_mask``
fields. If present, other fields are discarded.
Returns
-------
result : MaskedArray
The constructed masked array.
See Also
--------
MaskedArray.toflex : Build a flexible-type array from a masked array.
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4)
>>> rec = x.toflex()
>>> rec
array([[(0, False), (1, True), (2, False)],
[(3, True), (4, False), (5, True)],
[(6, False), (7, True), (8, False)]],
dtype=[('_data', '<i4'), ('_mask', '|b1')])
>>> x2 = np.ma.fromflex(rec)
>>> x2
masked_array(data =
[[0 -- 2]
[-- 4 --]
[6 -- 8]],
mask =
[[False True False]
[ True False True]
[False True False]],
fill_value = 999999)
Extra fields can be present in the structured array but are discarded:
>>> dt = [('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')]
>>> rec2 = np.zeros((2, 2), dtype=dt)
>>> rec2
array([[(0, False, 0.0), (0, False, 0.0)],
[(0, False, 0.0), (0, False, 0.0)]],
dtype=[('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')])
>>> y = np.ma.fromflex(rec2)
>>> y
masked_array(data =
[[0 0]
[0 0]],
mask =
[[False False]
[False False]],
fill_value = 999999)
"""
return masked_array(fxarray['_data'], mask=fxarray['_mask'])
class _convert2ma:
"""
Convert functions from numpy to numpy.ma.
Parameters
----------
_methodname : string
Name of the method to transform.
"""
__doc__ = None
def __init__(self, funcname, params=None):
self._func = getattr(np, funcname)
self.__doc__ = self.getdoc()
self._extras = params or {}
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
doc = getattr(self._func, '__doc__', None)
sig = get_object_signature(self._func)
if doc:
# Add the signature of the function at the beginning of the doc
if sig:
sig = "%s%s\n" % (self._func.__name__, sig)
doc = sig + doc
return doc
def __call__(self, *args, **params):
# Find the common parameters to the call and the definition
_extras = self._extras
common_params = set(params).intersection(_extras)
# Drop the common parameters from the call
for p in common_params:
_extras[p] = params.pop(p)
# Get the result
result = self._func.__call__(*args, **params).view(MaskedArray)
if "fill_value" in common_params:
result.fill_value = _extras.get("fill_value", None)
if "hardmask" in common_params:
result._hardmask = bool(_extras.get("hard_mask", False))
return result
arange = _convert2ma('arange', params=dict(fill_value=None, hardmask=False))
clip = np.clip
diff = np.diff
empty = _convert2ma('empty', params=dict(fill_value=None, hardmask=False))
empty_like = _convert2ma('empty_like')
frombuffer = _convert2ma('frombuffer')
fromfunction = _convert2ma('fromfunction')
identity = _convert2ma(
'identity', params=dict(fill_value=None, hardmask=False))
indices = np.indices
ones = _convert2ma('ones', params=dict(fill_value=None, hardmask=False))
ones_like = np.ones_like
squeeze = np.squeeze
zeros = _convert2ma('zeros', params=dict(fill_value=None, hardmask=False))
zeros_like = np.zeros_like
def append(a, b, axis=None):
"""Append values to the end of an array.
.. versionadded:: 1.9.0
Parameters
----------
a : array_like
Values are appended to a copy of this array.
b : array_like
These values are appended to a copy of `a`. It must be of the
correct shape (the same shape as `a`, excluding `axis`). If `axis`
is not specified, `b` can be any shape and will be flattened
before use.
axis : int, optional
The axis along which `v` are appended. If `axis` is not given,
both `a` and `b` are flattened before use.
Returns
-------
append : MaskedArray
A copy of `a` with `b` appended to `axis`. Note that `append`
does not occur in-place: a new array is allocated and filled. If
`axis` is None, the result is a flattened array.
See Also
--------
numpy.append : Equivalent function in the top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_values([1, 2, 3], 2)
>>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7)
>>> print(ma.append(a, b))
[1 -- 3 4 5 6 -- 8 9]
"""
return concatenate([a, b], axis)
|
nacl-webkit/chrome_deps | refs/heads/master | native_client_sdk/src/build_tools/manifest_util.py | 2 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import hashlib
import json
import string
import sys
import urllib2
MANIFEST_VERSION = 2
# Some commonly-used key names.
ARCHIVES_KEY = 'archives'
BUNDLES_KEY = 'bundles'
NAME_KEY = 'name'
REVISION_KEY = 'revision'
VERSION_KEY = 'version'
# Valid values for the archive.host_os field
HOST_OS_LITERALS = frozenset(['mac', 'win', 'linux', 'all'])
# Valid keys for various sdk objects, used for validation.
VALID_ARCHIVE_KEYS = frozenset(['host_os', 'size', 'checksum', 'url'])
# Valid values for bundle.stability field
STABILITY_LITERALS = [
'obsolete', 'post_stable', 'stable', 'beta', 'dev', 'canary']
# Valid values for bundle-recommended field.
YES_NO_LITERALS = ['yes', 'no']
VALID_BUNDLES_KEYS = frozenset([
ARCHIVES_KEY, NAME_KEY, VERSION_KEY, REVISION_KEY,
'description', 'desc_url', 'stability', 'recommended', 'repath',
'sdk_revision'
])
VALID_MANIFEST_KEYS = frozenset(['manifest_version', BUNDLES_KEY])
def GetHostOS():
'''Returns the host_os value that corresponds to the current host OS'''
return {
'linux2': 'linux',
'darwin': 'mac',
'cygwin': 'win',
'win32': 'win'
}[sys.platform]
def DictToJSON(pydict):
"""Convert a dict to a JSON-formatted string."""
pretty_string = json.dumps(pydict, sort_keys=False, indent=2)
# json.dumps sometimes returns trailing whitespace and does not put
# a newline at the end. This code fixes these problems.
pretty_lines = pretty_string.split('\n')
return '\n'.join([line.rstrip() for line in pretty_lines]) + '\n'
def DownloadAndComputeHash(from_stream, to_stream=None, progress_func=None):
'''Download the archive data from from-stream and generate sha1 and
size info.
Args:
from_stream: An input stream that supports read.
to_stream: [optional] the data is written to to_stream if it is
provided.
progress_func: [optional] A function used to report download progress. If
provided, progress_func is called with progress=0 at the
beginning of the download, periodically with progress=1
during the download, and progress=100 at the end.
Return
A tuple (sha1, size) where sha1 is a sha1-hash for the archive data and
size is the size of the archive data in bytes.'''
# Use a no-op progress function if none is specified.
def progress_no_op(progress):
pass
if not progress_func:
progress_func = progress_no_op
sha1_hash = hashlib.sha1()
size = 0
progress_func(progress=0)
while(1):
data = from_stream.read(32768)
if not data:
break
sha1_hash.update(data)
size += len(data)
if to_stream:
to_stream.write(data)
progress_func(size)
progress_func(progress=100)
return sha1_hash.hexdigest(), size
class Error(Exception):
"""Generic error/exception for manifest_util module"""
pass
class Archive(dict):
"""A placeholder for sdk archive information. We derive Archive from
dict so that it is easily serializable. """
def __init__(self, host_os_name):
""" Create a new archive for the given host-os name. """
super(Archive, self).__init__()
self['host_os'] = host_os_name
def CopyFrom(self, src):
"""Update the content of the archive by copying values from the given
dictionary.
Args:
src: The dictionary whose values must be copied to the archive."""
for key, value in src.items():
self[key] = value
def Validate(self, error_on_unknown_keys=False):
"""Validate the content of the archive object. Raise an Error if
an invalid or missing field is found.
Args:
error_on_unknown_keys: If True, raise an Error when unknown keys are
found in the archive.
"""
host_os = self.get('host_os', None)
if host_os and host_os not in HOST_OS_LITERALS:
raise Error('Invalid host-os name in archive')
# Ensure host_os has a valid string. We'll use it for pretty printing.
if not host_os:
host_os = 'all (default)'
if not self.get('url', None):
raise Error('Archive "%s" has no URL' % host_os)
if not self.get('size', None):
raise Error('Archive "%s" has no size' % host_os)
checksum = self.get('checksum', None)
if not checksum:
raise Error('Archive "%s" has no checksum' % host_os)
elif not isinstance(checksum, dict):
raise Error('Archive "%s" has a checksum, but it is not a dict' % host_os)
elif not len(checksum):
raise Error('Archive "%s" has an empty checksum dict' % host_os)
# Verify that all key names are valid.
if error_on_unknown_keys:
for key in self:
if key not in VALID_ARCHIVE_KEYS:
raise Error('Archive "%s" has invalid attribute "%s"' % (
host_os, key))
def UpdateVitals(self, revision):
"""Update the size and checksum information for this archive
based on the content currently at the URL.
This allows the template mandifest to be maintained without
the need to size and checksums to be present.
"""
template = string.Template(self['url'])
self['url'] = template.substitute({'revision': revision})
from_stream = urllib2.urlopen(self['url'])
sha1_hash, size = DownloadAndComputeHash(from_stream)
self['size'] = size
self['checksum'] = { 'sha1': sha1_hash }
def __getattr__(self, name):
"""Retrieve values from this dict using attributes.
This allows for foo.bar instead of foo['bar'].
Args:
name: the name of the key, 'bar' in the example above.
Returns:
The value associated with that key."""
if name not in self:
raise AttributeError(name)
# special case, self.checksum returns the sha1, not the checksum dict.
if name == 'checksum':
return self.GetChecksum()
return self.__getitem__(name)
def __setattr__(self, name, value):
"""Set values in this dict using attributes.
This allows for foo.bar instead of foo['bar'].
Args:
name: The name of the key, 'bar' in the example above.
value: The value to associate with that key."""
# special case, self.checksum returns the sha1, not the checksum dict.
if name == 'checksum':
self.setdefault('checksum', {})['sha1'] = value
return
return self.__setitem__(name, value)
def GetChecksum(self, hash_type='sha1'):
"""Returns a given cryptographic checksum of the archive"""
return self['checksum'][hash_type]
class Bundle(dict):
"""A placeholder for sdk bundle information. We derive Bundle from
dict so that it is easily serializable."""
def __init__(self, obj):
""" Create a new bundle with the given bundle name."""
if isinstance(obj, str) or isinstance(obj, unicode):
dict.__init__(self, [(ARCHIVES_KEY, []), (NAME_KEY, obj)])
else:
dict.__init__(self, obj)
def MergeWithBundle(self, bundle):
"""Merge this bundle with |bundle|.
Merges dict in |bundle| with this one in such a way that keys are not
duplicated: the values of the keys in |bundle| take precedence in the
resulting dictionary.
Archives in |bundle| will be appended to archives in self.
Args:
bundle: The other bundle. Must be a dict.
"""
assert self is not bundle
for k, v in bundle.iteritems():
if k == ARCHIVES_KEY:
for archive in v:
self.get(k, []).append(archive)
else:
self[k] = v
def __str__(self):
return self.GetDataAsString()
def GetDataAsString(self):
"""Returns the JSON bundle object, pretty-printed"""
return DictToJSON(self)
def LoadDataFromString(self, json_string):
"""Load a JSON bundle string. Raises an exception if json_string
is not well-formed JSON.
Args:
json_string: a JSON-formatted string containing the bundle
"""
self.CopyFrom(json.loads(json_string))
def CopyFrom(self, source):
"""Update the content of the bundle by copying values from the given
dictionary.
Args:
source: The dictionary whose values must be copied to the bundle."""
for key, value in source.items():
if key == ARCHIVES_KEY:
archives = []
for a in value:
new_archive = Archive(a['host_os'])
new_archive.CopyFrom(a)
archives.append(new_archive)
self[ARCHIVES_KEY] = archives
else:
self[key] = value
def Validate(self, add_missing_info=False, error_on_unknown_keys=False):
"""Validate the content of the bundle. Raise an Error if an invalid or
missing field is found.
Args:
error_on_unknown_keys: If True, raise an Error when unknown keys are
found in the bundle.
"""
# Check required fields.
if not self.get(NAME_KEY):
raise Error('Bundle has no name')
if self.get(REVISION_KEY) == None:
raise Error('Bundle "%s" is missing a revision number' % self[NAME_KEY])
if self.get(VERSION_KEY) == None:
raise Error('Bundle "%s" is missing a version number' % self[NAME_KEY])
if not self.get('description'):
raise Error('Bundle "%s" is missing a description' % self[NAME_KEY])
if not self.get('stability'):
raise Error('Bundle "%s" is missing stability info' % self[NAME_KEY])
if self.get('recommended') == None:
raise Error('Bundle "%s" is missing the recommended field' %
self[NAME_KEY])
# Check specific values
if self['stability'] not in STABILITY_LITERALS:
raise Error('Bundle "%s" has invalid stability field: "%s"' %
(self[NAME_KEY], self['stability']))
if self['recommended'] not in YES_NO_LITERALS:
raise Error(
'Bundle "%s" has invalid recommended field: "%s"' %
(self[NAME_KEY], self['recommended']))
# Verify that all key names are valid.
if error_on_unknown_keys:
for key in self:
if key not in VALID_BUNDLES_KEYS:
raise Error('Bundle "%s" has invalid attribute "%s"' %
(self[NAME_KEY], key))
# Validate the archives
for archive in self[ARCHIVES_KEY]:
if add_missing_info and 'size' not in archive:
archive.UpdateVitals(self[REVISION_KEY])
archive.Validate(error_on_unknown_keys)
def GetArchive(self, host_os_name):
"""Retrieve the archive for the given host os.
Args:
host_os_name: name of host os whose archive must be retrieved.
Return:
An Archive instance or None if it doesn't exist."""
for archive in self[ARCHIVES_KEY]:
if archive.host_os == host_os_name or archive.host_os == 'all':
return archive
return None
def GetHostOSArchive(self):
"""Retrieve the archive for the current host os."""
return self.GetArchive(GetHostOS())
def GetHostOSArchives(self):
"""Retrieve all archives for the current host os, or marked all.
"""
return [archive for archive in self.GetArchives()
if archive.host_os in (GetHostOS(), 'all')]
def GetArchives(self):
"""Returns all the archives in this bundle"""
return self[ARCHIVES_KEY]
def AddArchive(self, archive):
"""Add an archive to this bundle."""
self[ARCHIVES_KEY].append(archive)
def RemoveAllArchivesForHostOS(self, host_os_name):
"""Remove an archive from this Bundle."""
if host_os_name == 'all':
del self[ARCHIVES_KEY][:]
else:
for i, archive in enumerate(self[ARCHIVES_KEY]):
if archive.host_os == host_os_name:
del self[ARCHIVES_KEY][i]
def __getattr__(self, name):
"""Retrieve values from this dict using attributes.
This allows for foo.bar instead of foo['bar'].
Args:
name: the name of the key, 'bar' in the example above.
Returns:
The value associated with that key."""
if name not in self:
raise AttributeError(name)
return self.__getitem__(name)
def __setattr__(self, name, value):
"""Set values in this dict using attributes.
This allows for foo.bar instead of foo['bar'].
Args:
name: The name of the key, 'bar' in the example above.
value: The value to associate with that key."""
self.__setitem__(name, value)
def __eq__(self, bundle):
"""Test if two bundles are equal.
Normally the default comparison for two dicts is fine, but in this case we
don't care about the list order of the archives.
Args:
bundle: The other bundle to compare against.
Returns:
True if the bundles are equal."""
if not isinstance(bundle, Bundle):
return False
if len(self.keys()) != len(bundle.keys()):
return False
for key in self.keys():
if key not in bundle:
return False
# special comparison for ARCHIVE_KEY because we don't care about the list
# ordering.
if key == ARCHIVES_KEY:
if len(self[key]) != len(bundle[key]):
return False
for archive in self[key]:
if archive != bundle.GetArchive(archive.host_os):
return False
elif self[key] != bundle[key]:
return False
return True
def __ne__(self, bundle):
"""Test if two bundles are unequal.
See __eq__ for more info."""
return not self.__eq__(bundle)
class SDKManifest(object):
"""This class contains utilities for manipulation an SDK manifest string
For ease of unit-testing, this class should not contain any file I/O.
"""
def __init__(self):
"""Create a new SDKManifest object with default contents"""
self._manifest_data = {
"manifest_version": MANIFEST_VERSION,
"bundles": [],
}
def Validate(self, add_missing_info=False):
"""Validate the Manifest file and raises an exception for problems"""
# Validate the manifest top level
if self._manifest_data["manifest_version"] > MANIFEST_VERSION:
raise Error("Manifest version too high: %s" %
self._manifest_data["manifest_version"])
# Verify that all key names are valid.
for key in self._manifest_data:
if key not in VALID_MANIFEST_KEYS:
raise Error('Manifest has invalid attribute "%s"' % key)
# Validate each bundle
for bundle in self._manifest_data[BUNDLES_KEY]:
bundle.Validate(add_missing_info)
def GetBundle(self, name):
"""Get a bundle from the array of bundles.
Args:
name: the name of the bundle to return.
Return:
The first bundle with the given name, or None if it is not found."""
if not BUNDLES_KEY in self._manifest_data:
return None
bundles = [bundle for bundle in self._manifest_data[BUNDLES_KEY]
if bundle[NAME_KEY] == name]
if len(bundles) > 1:
sys.stderr.write("WARNING: More than one bundle with name"
"'%s' exists.\n" % name)
return bundles[0] if len(bundles) > 0 else None
def GetBundles(self):
"""Return all the bundles in the manifest."""
return self._manifest_data[BUNDLES_KEY]
def SetBundle(self, new_bundle):
"""Replace named bundle. Add if absent.
Args:
bundle: The bundle.
"""
name = new_bundle[NAME_KEY]
if not BUNDLES_KEY in self._manifest_data:
self._manifest_data[BUNDLES_KEY] = []
bundles = self._manifest_data[BUNDLES_KEY]
# Delete any bundles from the list, then add the new one. This has the
# effect of replacing the bundle if it already exists. It also removes all
# duplicate bundles.
for i, bundle in enumerate(bundles):
if bundle[NAME_KEY] == name:
del bundles[i]
bundles.append(copy.deepcopy(new_bundle))
def BundleNeedsUpdate(self, bundle):
"""Decides if a bundle needs to be updated.
A bundle needs to be updated if it is not installed (doesn't exist in this
manifest file) or if its revision is later than the revision in this file.
Args:
bundle: The Bundle to test.
Returns:
True if Bundle needs to be updated.
"""
if NAME_KEY not in bundle:
raise KeyError("Bundle must have a 'name' key.")
local_bundle = self.GetBundle(bundle[NAME_KEY])
return (local_bundle == None) or (
(local_bundle[VERSION_KEY], local_bundle[REVISION_KEY]) <
(bundle[VERSION_KEY], bundle[REVISION_KEY]))
def MergeBundle(self, bundle, allow_existing=True):
"""Merge a Bundle into this manifest.
The new bundle is added if not present, or merged into the existing bundle.
Args:
bundle: The bundle to merge.
"""
if NAME_KEY not in bundle:
raise KeyError("Bundle must have a 'name' key.")
local_bundle = self.GetBundle(bundle.name)
if not local_bundle:
self.SetBundle(bundle)
else:
if not allow_existing:
raise Error('cannot merge manifest bundle \'%s\', it already exists'
% bundle.name)
local_bundle.MergeWithBundle(bundle)
def MergeManifest(self, manifest):
'''Merge another manifest into this manifest, disallowing overiding.
Args
manifest: The manifest to merge.
'''
for bundle in manifest.GetBundles():
self.MergeBundle(bundle, allow_existing=False)
def FilterBundles(self, predicate):
"""Filter the list of bundles by |predicate|.
For all bundles in this manifest, if predicate(bundle) is False, the bundle
is removed from the manifest.
Args:
predicate: a function that take a bundle and returns whether True to keep
it or False to remove it.
"""
self._manifest_data[BUNDLES_KEY] = filter(predicate, self.GetBundles())
def LoadDataFromString(self, json_string, add_missing_info=False):
"""Load a JSON manifest string. Raises an exception if json_string
is not well-formed JSON.
Args:
json_string: a JSON-formatted string containing the previous manifest
all_hosts: True indicates that we should load bundles for all hosts.
False (default) says to only load bundles for the current host"""
new_manifest = json.loads(json_string)
for key, value in new_manifest.items():
if key == BUNDLES_KEY:
# Remap each bundle in |value| to a Bundle instance
bundles = []
for b in value:
new_bundle = Bundle(b[NAME_KEY])
new_bundle.CopyFrom(b)
bundles.append(new_bundle)
self._manifest_data[key] = bundles
else:
self._manifest_data[key] = value
self.Validate(add_missing_info)
def __str__(self):
return self.GetDataAsString()
def __eq__(self, other):
# Access to protected member _manifest_data of a client class
# pylint: disable=W0212
if (self._manifest_data['manifest_version'] !=
other._manifest_data['manifest_version']):
return False
self_bundle_names = set(b.name for b in self.GetBundles())
other_bundle_names = set(b.name for b in other.GetBundles())
if self_bundle_names != other_bundle_names:
return False
for bundle_name in self_bundle_names:
if self.GetBundle(bundle_name) != other.GetBundle(bundle_name):
return False
return True
def __ne__(self, other):
return not (self == other)
def GetDataAsString(self):
"""Returns the current JSON manifest object, pretty-printed"""
return DictToJSON(self._manifest_data)
|
alilotfi/django | refs/heads/master | tests/migrations/migrations_test_apps/lookuperror_a/migrations/0001_initial.py | 381 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='A1',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
],
),
]
|
byte-foundry/tornado-fontconverter | refs/heads/master | unittestfont.py | 2 | import unittest, mimetypes, os, sys
import boto, boto.s3
from boto.s3.connection import S3Connection
from upload import Upload
from bucket import Bucket
from delete import Delete
EXT = ["woff", "ttf", "otf", "svg", "eot"]
exts = '.woff', '.ttf', '.otf', '.svg', '.eot'
def test_type(type):
for ext in EXT:
mimetypes.add_type('application/xfont-' + ext, '.' + ext, True)
return (mimetypes.guess_extension(type, True))
class TestFunctions(unittest.TestCase):
if (sys.argv[-1] != '-v'):
print 'Run with -v option if you want to activate verbose mode'
def setUp(self):
self.fileinfo = {"body":"", "filename":"", "format":"" }
with open('testfont/testfont.svg', 'r+') as content:
self.fileinfo['body'] = content.read()
self.fileinfo['filename'] = content.name
self.fileinfo['format'] = 'otf'
content.close()
self.conn = S3Connection(os.environ['ACCESS_KEY'], os.environ['SECRET_KEY'])
self.path = 'test1/test2/test3/'
self.testBucket = self.conn.create_bucket('testbucket-prototypo')
self.testBucket.set_acl('private')
def test_sendtobucket_onefont(self):
'''TEST : Checking S3 Bucket data sending - one font'''
self.assertTrue(Bucket.send_to_bucket(self.testBucket, 'test1/test2/test3/', 'testfont/', '.ttf', 'testfont/'))
def test_sendtobucket_allfonts(self):
'''TEST : Checking S3 Bucket data sending - all fonts'''
match = exts + ('.afm', '.css')
self.assertTrue(Bucket.send_to_bucket(self.testBucket, 'test11/test22/test33/', 'testfont/', match, 'testfont/'))
def test_deletebucket_first(self):
'''TEST : Checking S3 Bucket data deletion'''
self.assertTrue(Delete.delete_from_bucket('test1/test2/test3/'))
def test_deletebucket_second(self):
'''TEST : Checking S3 Bucket data deletion'''
self.assertTrue(Delete.delete_from_bucket('test11/test22/test33/'))
def test_svg_size(self):
'''TEST : Checking whether svg file isn't empty'''
self.assertGreater(os.stat(self.fileinfo['filename']).st_size, 0)
def test_css_generator(self):
'''TEST: Checking CSS File'''
template = "@font-face {\
\n\tfont-family: '" + 'prototypo' + "';\
\n\tsrc: url('" + 'testfont' + ".eot');\
\n\tsrc: url('" + 'testfont' + ".eot?#iefix') format('embedded-opentype'),\
\n\turl('" + 'testfont' + ".woff') format('woff'),\
\n\turl('" + 'testfont' + ".ttf') format('truetype'),\
\n\turl('" + 'testfont' + ".svg#ywftsvg') format('svg');\
\n\tfont-style: normal;\
\n\tfont-weight: normal;\
\n}\n\n"
self.assertEqual(Upload.css_generator('.', 'testfont', 'prototypo'), template)
def test_font_converter(self):
'''TEST: Checking converted font file'''
self.assertTrue(Upload.font_converter(self.fileinfo['filename'], self.fileinfo['format']))
def test_fontpack_generator(self):
'''TEST : Checking font-pack'''
self.assertTrue(Upload.fontpack_generator(self.fileinfo['filename']))
def test_zipfile(self):
'''TEST : Checking zip archive'''
self.assertTrue(Upload.zipdir('testfont'))
def test_count_files(self):
'''TEST : Checking zip archive completion'''
self.assertEqual(Upload.count_files('testfont/'), 7)
def test_svgtype(self):
'''TEST: Checking SVG File'''
self.assertEqual(mimetypes.guess_type(self.fileinfo['filename'])[0], 'image/svg+xml')
|
jtiki/djangocms-cascade | refs/heads/master | cmsplugin_cascade/link/forms.py | 1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.sites.models import Site
from django.apps import apps
from django.forms import fields
from django.forms.models import ModelForm
from django.utils.module_loading import import_string
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ObjectDoesNotExist
from cms.models import Page
if 'django_select2' in settings.INSTALLED_APPS:
SelectWidget = import_string('django_select2.forms.Select2Widget')
else:
SelectWidget = import_string('django.forms.widgets.Select')
class LinkSearchField(fields.ChoiceField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('widget', SelectWidget)
super(LinkSearchField, self).__init__(*args, **kwargs)
def clean(self, value):
try:
return int(value)
except (TypeError, ValueError):
pass
class LinkForm(ModelForm):
"""
Form class to add fake fields for rendering the ModelAdmin's form, which later are used to
populate the glossary of the model.
"""
LINK_TYPE_CHOICES = (('cmspage', _("CMS Page")), ('exturl', _("External URL")), ('email', _("Mail To")),)
link_type = fields.ChoiceField()
cms_page = LinkSearchField(required=False, label='',
help_text=_("An internal link onto CMS pages of this site"))
ext_url = fields.URLField(required=False, label='', help_text=_("Link onto external page"))
mail_to = fields.EmailField(required=False, label='', help_text=_("Open Email program with this address"))
class Meta:
fields = ('glossary',)
def __init__(self, raw_data=None, *args, **kwargs):
instance = kwargs.get('instance')
default_link_type = {'type': self.LINK_TYPE_CHOICES[0][0]}
initial = instance and dict(instance.glossary) or {'link': default_link_type}
initial.update(kwargs.pop('initial', {}))
link_type = initial['link']['type']
self.base_fields['link_type'].choices = self.LINK_TYPE_CHOICES
self.base_fields['link_type'].initial = link_type
if raw_data and raw_data.get('shared_glossary'):
# convert this into an optional field since it is disabled with ``shared_glossary`` set
self.base_fields['link_type'].required = False
set_initial_linktype = getattr(self, 'set_initial_{0}'.format(link_type), None)
# populate choice field for selecting a CMS page
try:
site = instance.page.site
except AttributeError:
site = Site.objects.get_current()
choices = ((p.pk, '{0} ({1})'.format(p.get_page_title(), p.get_absolute_url()))
for p in Page.objects.drafts().on_site(site))
self.base_fields['cms_page'].choices = choices
if callable(set_initial_linktype):
set_initial_linktype(initial)
kwargs.update(initial=initial)
super(LinkForm, self).__init__(raw_data, *args, **kwargs)
def clean_glossary(self):
"""
This method rectifies the behavior of JSONFormFieldBase.clean which
converts the value of empty fields to None, while it shall be an empty dict.
"""
glossary = self.cleaned_data['glossary']
if glossary is None:
glossary = {}
return glossary
def clean(self):
cleaned_data = super(LinkForm, self).clean()
if self.is_valid():
if 'link_data' in cleaned_data:
cleaned_data['glossary'].update(link=cleaned_data['link_data'])
del self.cleaned_data['link_data']
elif 'link_type' in cleaned_data:
cleaned_data['glossary'].update(link={'type': cleaned_data['link_type']})
else:
cleaned_data['glossary'].update(link={'type': 'none'})
return cleaned_data
def clean_cms_page(self):
if self.cleaned_data.get('link_type') == 'cmspage':
self.cleaned_data['link_data'] = {
'type': 'cmspage',
'model': 'cms.Page',
'pk': self.cleaned_data['cms_page'],
}
def clean_ext_url(self):
if self.cleaned_data.get('link_type') == 'exturl':
self.cleaned_data['link_data'] = {'type': 'exturl', 'url': self.cleaned_data['ext_url']}
def clean_mail_to(self):
if self.cleaned_data.get('link_type') == 'email':
self.cleaned_data['link_data'] = {'type': 'email', 'email': self.cleaned_data['mail_to']}
def set_initial_none(self, initial):
pass
def set_initial_cmspage(self, initial):
try:
# check if that page still exists, otherwise return nothing
Model = apps.get_model(*initial['link']['model'].split('.'))
initial['cms_page'] = Model.objects.get(pk=initial['link']['pk']).pk
except (KeyError, ObjectDoesNotExist):
pass
def set_initial_exturl(self, initial):
try:
initial['ext_url'] = initial['link']['url']
except KeyError:
pass
def set_initial_email(self, initial):
try:
initial['mail_to'] = initial['link']['email']
except KeyError:
pass
@classmethod
def get_form_class(cls):
"""
Hook to return a form class for editing a CMSPlugin inheriting from ``LinkPluginBase``.
"""
return cls
@classmethod
def unset_required_for(cls, sharable_fields):
"""
Fields borrowed by `SharedGlossaryAdmin` to build its temporary change form, only are
required if they are declared in `sharable_fields`. Otherwise just deactivate them.
"""
if 'link' not in sharable_fields:
cls.base_fields['link_type'].required = False
class TextLinkFormMixin(object):
"""
To be used in combination with `LinkForm` for easily accessing the field `link_content`.
"""
def clean(self):
cleaned_data = super(TextLinkFormMixin, self).clean()
if self.is_valid():
cleaned_data['glossary'].update(link_content=cleaned_data['link_content'])
return cleaned_data
|
gustavo94/UVaProblems | refs/heads/master | 573-The snail.py | 1 | from sys import stdin
from math import floor
# exapmle input:
ex_in = "6 3 1 10\n10 2 1 50\n50 5 3 14\n50 6 4 1\n50 6 3 1\n1 1 1 1\n97 56 3 10\n56 3 1 5\n0 0 0 0"
def cal_limit_day(U,D,fatigue):
limit_day = floor((U-D)/fatigue)
if limit_day <= 0:
limit_day = 1
return limit_day
def total_down_by_fatigue(day, fatigue):
fatigue_application_times = ((day-1)*day)/2
return fatigue_application_times * fatigue
def climbed_distance(day,U,fatigue):
last_day_of_climbing = floor(U/fatigue)
if day > last_day_of_climbing:
return (last_day_of_climbing * U) - (total_down_by_fatigue(last_day_of_climbing,fatigue))
return (U*day) - (total_down_by_fatigue(day,fatigue))
def total_climbed_before_night_at(day,U,fatigue,D): #total climbed at the end of the day
night_sliding = D*(day-1)
return climbed_distance(day,U,fatigue) - night_sliding
def total_climbed_after_night_at(day,U,fatigue,D): #total climbed at the end of the night
night_sliding = D*(day)
return climbed_distance(day,U,fatigue) - night_sliding
def can_the_snail_climb_the_well_at(day,U,fatigue,D,H):
total_climbed_at_day = total_climbed_before_night_at(day,U,fatigue,D)
answer = False
if (total_climbed_at_day > H):
answer = True
return answer
def day_for_succes(limit_day,U,fatigue,D,H):
day = limit_day
for day in range(limit_day,0,-1):
if not(can_the_snail_climb_the_well_at(day-1,U,fatigue,D,H)):
break
return day
def day_for_fail(day,U,fatigue,D):
min_limit = 1
total_climbed = total_climbed_after_night_at(day,U,fatigue,D)
while True: #binary search
if total_climbed >= 0:
min_limit = day
day = day *2
total_climbed = total_climbed_after_night_at(day,U,fatigue,D)
else:
if total_climbed_after_night_at((day-1),U,fatigue,D) >= 0 : #if the previos day the total climbed is positive then day is the day of failure
break
middle_day = floor((day+min_limit)/2)
if total_climbed_after_night_at(middle_day,U,fatigue,D) >= 0:
min_limit = middle_day
else:
day = middle_day
return day
# def brute_force(H,U,D,fatigue):
# day = 1
# height = 0
# while True:
# height += U
# U -= fatigue
# if U < 0:
# U = 0
# if height > H:
# print("success on day " + str(day))
# break
# height -= D
# if height < 0:
# print("failure on day "+ str(day))
# break
# day += 1
def main():
lines = stdin.read().splitlines() #Use ctrl+d to finish read process
# lines = ex_in.splitlines() # Testing
for curr_Line in lines:
H,U,D,F = [int(param) for param in curr_Line.split()] #the semantic of H,U,D and F is on the problem description
if H <= 0:
return
fatigue = U * (F/100)
limit_day = cal_limit_day(U,D,fatigue) #This day the snail cannot climb more because de fatigue
answer = can_the_snail_climb_the_well_at(limit_day,U,fatigue,D,H)
if answer:
day = day_for_succes(limit_day,U,fatigue,D,H)
print("success on day " + str(day))
else:
day = day_for_fail(limit_day,U,fatigue,D)
# print(total_climbed_before_night_at(31,U,fatigue,D))
print("failure on day "+ str(day))
# brute_force(H,U,D,fatigue)
return
main()
|
wbwseeker/spaCy | refs/heads/master | spacy/tests/tokenizer/test_tokens_from_list.py | 5 | from __future__ import unicode_literals
import pytest
def test1(en_tokenizer):
words = ['JAPAN', 'GET', 'LUCKY']
tokens = en_tokenizer.tokens_from_list(words)
assert len(tokens) == 3
assert tokens[0].orth_ == 'JAPAN'
|
elyezer/test-automation-examples | refs/heads/master | examples/unittest_example.py | 1 | import unittest
import calculator
class CalculatorTestCase(unittest.TestCase):
def test_add(self):
self.assertEqual(calculator.add(40, 2), 42)
def test_sub(self):
self.assertEqual(calculator.sub(40, 2), 38)
def test_mul(self):
self.assertEqual(calculator.mul(40, 2), 80)
def test_div(self):
self.assertEqual(calculator.div(40, 2), 20)
def test_add_strings(self):
self.assertEqual(calculator.add('a', 'b'), 'ab')
def test_fail(self):
self.assertEqual(calculator.sub(2, 4), 0)
|
litecoin-project/Electrum-obsolete | refs/heads/master-ltc | gui/qt/network_dialog.py | 8 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys, time, datetime, re, threading
from electrum.i18n import _
from electrum.util import print_error, print_msg
import os.path, json, ast, traceback
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from electrum import DEFAULT_SERVERS, DEFAULT_PORTS
from util import *
protocol_names = ['TCP', 'HTTP', 'SSL', 'HTTPS']
protocol_letters = 'thsg'
class NetworkDialog(QDialog):
def __init__(self, network, config, parent):
QDialog.__init__(self,parent)
self.setModal(1)
self.setWindowTitle(_('Network'))
self.setMinimumSize(375, 20)
self.network = network
self.config = config
self.protocol = None
if parent:
n = len(network.interfaces)
if n:
status = _("Blockchain") + ": " + "%d "%(network.blockchain.height()) + _("blocks") + ".\n" + _("Getting block headers from %d nodes.")%n
else:
status = _("Not connected")
if network.is_connected():
status += "\n" + _("Server") + ": %s"%(network.interface.host)
else:
status += "\n" + _("Disconnected from server")
else:
import random
status = _("Please choose a server.") + "\n" + _("Select 'Cancel' if you are offline.")
server = network.default_server
self.servers = network.get_servers()
vbox = QVBoxLayout()
vbox.setSpacing(30)
hbox = QHBoxLayout()
l = QLabel()
l.setPixmap(QPixmap(":icons/network.png"))
hbox.addStretch(10)
hbox.addWidget(l)
hbox.addWidget(QLabel(status))
hbox.addStretch(50)
msg = _("Electrum sends your wallet addresses to a single server, in order to receive your transaction history.") + "\n\n" \
+ _("In addition, Electrum connects to several nodes in order to download block headers and find out the longest blockchain.") + " " \
+ _("This blockchain is used to verify the transactions sent by the address server.")
hbox.addWidget(HelpButton(msg))
vbox.addLayout(hbox)
# grid layout
grid = QGridLayout()
grid.setSpacing(8)
vbox.addLayout(grid)
# protocol
self.server_protocol = QComboBox()
self.server_host = QLineEdit()
self.server_host.setFixedWidth(200)
self.server_port = QLineEdit()
self.server_port.setFixedWidth(60)
self.server_protocol.addItems(protocol_names)
self.server_protocol.connect(self.server_protocol, SIGNAL('currentIndexChanged(int)'), self.change_protocol)
grid.addWidget(QLabel(_('Protocol') + ':'), 3, 0)
grid.addWidget(self.server_protocol, 3, 1)
# server
grid.addWidget(QLabel(_('Server') + ':'), 0, 0)
# auto connect
self.autocycle_cb = QCheckBox(_('Auto-connect'))
self.autocycle_cb.setChecked(self.config.get('auto_cycle', True))
grid.addWidget(self.autocycle_cb, 0, 1)
if not self.config.is_modifiable('auto_cycle'): self.autocycle_cb.setEnabled(False)
msg = _("If auto-connect is enabled, Electrum will always use a server that is on the longest blockchain.") + " " \
+ _("If it is disabled, Electrum will warn you if your server is lagging.")
grid.addWidget(HelpButton(msg), 0, 4)
grid.addWidget(self.server_host, 0, 2, 1, 2)
grid.addWidget(self.server_port, 0, 3)
label = _('Active Servers') if network.irc_servers else _('Default Servers')
self.servers_list_widget = QTreeWidget(parent)
self.servers_list_widget.setHeaderLabels( [ label, _('Limit') ] )
self.servers_list_widget.setMaximumHeight(150)
self.servers_list_widget.setColumnWidth(0, 240)
if server:
host, port, protocol = server.split(':')
self.change_server(host, protocol)
self.set_protocol(self.network.protocol)
self.servers_list_widget.connect(self.servers_list_widget,
SIGNAL('currentItemChanged(QTreeWidgetItem*,QTreeWidgetItem*)'),
lambda x,y: self.server_changed(x))
grid.addWidget(self.servers_list_widget, 1, 1, 1, 3)
if not config.is_modifiable('server'):
for w in [self.server_host, self.server_port, self.server_protocol, self.servers_list_widget]: w.setEnabled(False)
def enable_set_server():
enabled = not self.autocycle_cb.isChecked()
self.server_host.setEnabled(enabled)
self.server_port.setEnabled(enabled)
self.servers_list_widget.setEnabled(enabled)
self.autocycle_cb.clicked.connect(enable_set_server)
enable_set_server()
# proxy setting
self.proxy_mode = QComboBox()
self.proxy_host = QLineEdit()
self.proxy_host.setFixedWidth(200)
self.proxy_port = QLineEdit()
self.proxy_port.setFixedWidth(60)
self.proxy_mode.addItems(['NONE', 'SOCKS4', 'SOCKS5', 'HTTP'])
def check_for_disable(index = False):
if self.proxy_mode.currentText() != 'NONE':
self.proxy_host.setEnabled(True)
self.proxy_port.setEnabled(True)
else:
self.proxy_host.setEnabled(False)
self.proxy_port.setEnabled(False)
check_for_disable()
self.proxy_mode.connect(self.proxy_mode, SIGNAL('currentIndexChanged(int)'), check_for_disable)
if not self.config.is_modifiable('proxy'):
for w in [self.proxy_host, self.proxy_port, self.proxy_mode]: w.setEnabled(False)
proxy_config = network.proxy if network.proxy else { "mode":"none", "host":"localhost", "port":"8080"}
self.proxy_mode.setCurrentIndex(self.proxy_mode.findText(str(proxy_config.get("mode").upper())))
self.proxy_host.setText(proxy_config.get("host"))
self.proxy_port.setText(proxy_config.get("port"))
grid.addWidget(QLabel(_('Proxy') + ':'), 4, 0)
grid.addWidget(self.proxy_mode, 4, 1)
grid.addWidget(self.proxy_host, 4, 2)
grid.addWidget(self.proxy_port, 4, 3)
# buttons
vbox.addLayout(ok_cancel_buttons(self))
self.setLayout(vbox)
def init_servers_list(self):
self.servers_list_widget.clear()
for _host, d in self.servers.items():
if d.get(self.protocol):
pruning_level = d.get('pruning','')
self.servers_list_widget.addTopLevelItem(QTreeWidgetItem( [ _host, pruning_level ] ))
def set_protocol(self, protocol):
if protocol != self.protocol:
self.protocol = protocol
self.init_servers_list()
def change_protocol(self, index):
p = protocol_letters[index]
host = unicode(self.server_host.text())
pp = self.servers.get(host, DEFAULT_PORTS)
if p not in pp.keys():
p = pp.keys()[0]
port = pp[p]
self.server_host.setText( host )
self.server_port.setText( port )
self.set_protocol(p)
def server_changed(self, x):
if x:
self.change_server(str(x.text(0)), self.protocol)
def change_server(self, host, protocol):
pp = self.servers.get(host, DEFAULT_PORTS)
if protocol:
port = pp.get(protocol)
if not port: protocol = None
if not protocol:
if 's' in pp.keys():
protocol = 's'
port = pp.get(protocol)
else:
protocol = pp.keys()[0]
port = pp.get(protocol)
self.server_host.setText( host )
self.server_port.setText( port )
self.server_protocol.setCurrentIndex(protocol_letters.index(protocol))
if not self.servers: return
for p in protocol_letters:
i = protocol_letters.index(p)
j = self.server_protocol.model().index(i,0)
#if p not in pp.keys(): # and self.interface.is_connected:
# self.server_protocol.model().setData(j, QVariant(0), Qt.UserRole-1)
#else:
# self.server_protocol.model().setData(j, QVariant(33), Qt.UserRole-1)
def do_exec(self):
if not self.exec_():
return
host = str( self.server_host.text() )
port = str( self.server_port.text() )
protocol = protocol_letters[self.server_protocol.currentIndex()]
if self.proxy_mode.currentText() != 'NONE':
proxy = { 'mode':str(self.proxy_mode.currentText()).lower(),
'host':str(self.proxy_host.text()),
'port':str(self.proxy_port.text()) }
else:
proxy = None
auto_connect = self.autocycle_cb.isChecked()
self.network.set_parameters(host, port, protocol, proxy, auto_connect)
return True
|
Nebucatnetzer/tamagotchi | refs/heads/master | pygame/lib/python3.4/site-packages/pip/_vendor/colorama/win32.py | 535 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
# from winbase.h
STDOUT = -11
STDERR = -12
try:
import ctypes
from ctypes import LibraryLoader
windll = LibraryLoader(ctypes.WinDLL)
from ctypes import wintypes
except (AttributeError, ImportError):
windll = None
SetConsoleTextAttribute = lambda *_: None
winapi_test = lambda *_: None
else:
from ctypes import byref, Structure, c_char, POINTER
COORD = wintypes._COORD
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
COORD,
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
_FillConsoleOutputCharacterA.argtypes = [
wintypes.HANDLE,
c_char,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
_SetConsoleTitleW = windll.kernel32.SetConsoleTitleA
_SetConsoleTitleW.argtypes = [
wintypes.LPCSTR
]
_SetConsoleTitleW.restype = wintypes.BOOL
handles = {
STDOUT: _GetStdHandle(STDOUT),
STDERR: _GetStdHandle(STDERR),
}
def winapi_test():
handle = handles[STDOUT]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return bool(success)
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position, adjust=True):
position = COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = COORD(position.Y - 1, position.X - 1)
if adjust:
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = handles[stream_id]
return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = c_char(char.encode())
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = _FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = handles[stream_id]
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return _FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
def SetConsoleTitle(title):
return _SetConsoleTitleW(title)
|
loulich/Couchpotato | refs/heads/master | couchpotato/core/media/movie/providers/userscript/trakt.py | 37 | from couchpotato.core.media._base.providers.userscript.base import UserscriptBase
autoload = 'Trakt'
class Trakt(UserscriptBase):
version = 2
includes = ['*://trakt.tv/movies/*', '*://*.trakt.tv/movies/*']
excludes = ['*://trakt.tv/movies/*/*', '*://*.trakt.tv/movies/*/*']
|
ocadotechnology/django-tastypie | refs/heads/master | tests/gis/api/resources.py | 58 | from django.contrib.auth.models import User
from tastypie import fields
from tastypie.resources import ALL
from tastypie.contrib.gis.resources import ModelResource
from tastypie.authorization import Authorization
from gis.models import GeoNote
class UserResource(ModelResource):
class Meta:
resource_name = 'users'
queryset = User.objects.all()
authorization = Authorization()
class GeoNoteResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
class Meta:
resource_name = 'geonotes'
queryset = GeoNote.objects.all()
authorization = Authorization()
filtering = {
'points': ALL,
'lines': ALL,
'polys': ALL,
}
|
cyberden/CouchPotatoServer | refs/heads/develop | libs/suds/bindings/binding.py | 192 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Provides classes for (WS) SOAP bindings.
"""
from logging import getLogger
from suds import *
from suds.sax import Namespace
from suds.sax.parser import Parser
from suds.sax.document import Document
from suds.sax.element import Element
from suds.sudsobject import Factory, Object
from suds.mx import Content
from suds.mx.literal import Literal as MxLiteral
from suds.umx.basic import Basic as UmxBasic
from suds.umx.typed import Typed as UmxTyped
from suds.bindings.multiref import MultiRef
from suds.xsd.query import TypeQuery, ElementQuery
from suds.xsd.sxbasic import Element as SchemaElement
from suds.options import Options
from suds.plugin import PluginContainer
from copy import deepcopy
log = getLogger(__name__)
envns = ('SOAP-ENV', 'http://schemas.xmlsoap.org/soap/envelope/')
class Binding:
"""
The soap binding class used to process outgoing and imcoming
soap messages per the WSDL port binding.
@cvar replyfilter: The reply filter function.
@type replyfilter: (lambda s,r: r)
@ivar wsdl: The wsdl.
@type wsdl: L{suds.wsdl.Definitions}
@ivar schema: The collective schema contained within the wsdl.
@type schema: L{xsd.schema.Schema}
@ivar options: A dictionary options.
@type options: L{Options}
"""
replyfilter = (lambda s,r: r)
def __init__(self, wsdl):
"""
@param wsdl: A wsdl.
@type wsdl: L{wsdl.Definitions}
"""
self.wsdl = wsdl
self.multiref = MultiRef()
def schema(self):
return self.wsdl.schema
def options(self):
return self.wsdl.options
def unmarshaller(self, typed=True):
"""
Get the appropriate XML decoder.
@return: Either the (basic|typed) unmarshaller.
@rtype: L{UmxTyped}
"""
if typed:
return UmxTyped(self.schema())
else:
return UmxBasic()
def marshaller(self):
"""
Get the appropriate XML encoder.
@return: An L{MxLiteral} marshaller.
@rtype: L{MxLiteral}
"""
return MxLiteral(self.schema(), self.options().xstq)
def param_defs(self, method):
"""
Get parameter definitions.
Each I{pdef} is a tuple (I{name}, L{xsd.sxbase.SchemaObject})
@param method: A servic emethod.
@type method: I{service.Method}
@return: A collection of parameter definitions
@rtype: [I{pdef},..]
"""
raise Exception, 'not implemented'
def get_message(self, method, args, kwargs):
"""
Get the soap message for the specified method, args and soapheaders.
This is the entry point for creating the outbound soap message.
@param method: The method being invoked.
@type method: I{service.Method}
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The soap envelope.
@rtype: L{Document}
"""
content = self.headercontent(method)
header = self.header(content)
content = self.bodycontent(method, args, kwargs)
body = self.body(content)
env = self.envelope(header, body)
if self.options().prefixes:
body.normalizePrefixes()
env.promotePrefixes()
else:
env.refitPrefixes()
return Document(env)
def get_reply(self, method, reply):
"""
Process the I{reply} for the specified I{method} by sax parsing the I{reply}
and then unmarshalling into python object(s).
@param method: The name of the invoked method.
@type method: str
@param reply: The reply XML received after invoking the specified method.
@type reply: str
@return: The unmarshalled reply. The returned value is an L{Object} for a
I{list} depending on whether the service returns a single object or a
collection.
@rtype: tuple ( L{Element}, L{Object} )
"""
reply = self.replyfilter(reply)
sax = Parser()
replyroot = sax.parse(string=reply)
plugins = PluginContainer(self.options().plugins)
plugins.message.parsed(reply=replyroot)
soapenv = replyroot.getChild('Envelope')
soapenv.promotePrefixes()
soapbody = soapenv.getChild('Body')
self.detect_fault(soapbody)
soapbody = self.multiref.process(soapbody)
nodes = self.replycontent(method, soapbody)
rtypes = self.returned_types(method)
if len(rtypes) > 1:
result = self.replycomposite(rtypes, nodes)
return (replyroot, result)
if len(rtypes) == 1:
if rtypes[0].unbounded():
result = self.replylist(rtypes[0], nodes)
return (replyroot, result)
if len(nodes):
unmarshaller = self.unmarshaller()
resolved = rtypes[0].resolve(nobuiltin=True)
result = unmarshaller.process(nodes[0], resolved)
return (replyroot, result)
return (replyroot, None)
def detect_fault(self, body):
"""
Detect I{hidden} soapenv:Fault element in the soap body.
@param body: The soap envelope body.
@type body: L{Element}
@raise WebFault: When found.
"""
fault = body.getChild('Fault', envns)
if fault is None:
return
unmarshaller = self.unmarshaller(False)
p = unmarshaller.process(fault)
if self.options().faults:
raise WebFault(p, fault)
return self
def replylist(self, rt, nodes):
"""
Construct a I{list} reply. This mehod is called when it has been detected
that the reply is a list.
@param rt: The return I{type}.
@type rt: L{suds.xsd.sxbase.SchemaObject}
@param nodes: A collection of XML nodes.
@type nodes: [L{Element},...]
@return: A list of I{unmarshalled} objects.
@rtype: [L{Object},...]
"""
result = []
resolved = rt.resolve(nobuiltin=True)
unmarshaller = self.unmarshaller()
for node in nodes:
sobject = unmarshaller.process(node, resolved)
result.append(sobject)
return result
def replycomposite(self, rtypes, nodes):
"""
Construct a I{composite} reply. This method is called when it has been
detected that the reply has multiple root nodes.
@param rtypes: A list of known return I{types}.
@type rtypes: [L{suds.xsd.sxbase.SchemaObject},...]
@param nodes: A collection of XML nodes.
@type nodes: [L{Element},...]
@return: The I{unmarshalled} composite object.
@rtype: L{Object},...
"""
dictionary = {}
for rt in rtypes:
dictionary[rt.name] = rt
unmarshaller = self.unmarshaller()
composite = Factory.object('reply')
for node in nodes:
tag = node.name
rt = dictionary.get(tag, None)
if rt is None:
if node.get('id') is None:
raise Exception('<%s/> not mapped to message part' % tag)
else:
continue
resolved = rt.resolve(nobuiltin=True)
sobject = unmarshaller.process(node, resolved)
value = getattr(composite, tag, None)
if value is None:
if rt.unbounded():
value = []
setattr(composite, tag, value)
value.append(sobject)
else:
setattr(composite, tag, sobject)
else:
if not isinstance(value, list):
value = [value,]
setattr(composite, tag, value)
value.append(sobject)
return composite
def get_fault(self, reply):
"""
Extract the fault from the specified soap reply. If I{faults} is True, an
exception is raised. Otherwise, the I{unmarshalled} fault L{Object} is
returned. This method is called when the server raises a I{web fault}.
@param reply: A soap reply message.
@type reply: str
@return: A fault object.
@rtype: tuple ( L{Element}, L{Object} )
"""
reply = self.replyfilter(reply)
sax = Parser()
faultroot = sax.parse(string=reply)
soapenv = faultroot.getChild('Envelope')
soapbody = soapenv.getChild('Body')
fault = soapbody.getChild('Fault')
unmarshaller = self.unmarshaller(False)
p = unmarshaller.process(fault)
if self.options().faults:
raise WebFault(p, faultroot)
return (faultroot, p.detail)
def mkparam(self, method, pdef, object):
"""
Builds a parameter for the specified I{method} using the parameter
definition (pdef) and the specified value (object).
@param method: A method name.
@type method: str
@param pdef: A parameter definition.
@type pdef: tuple: (I{name}, L{xsd.sxbase.SchemaObject})
@param object: The parameter value.
@type object: any
@return: The parameter fragment.
@rtype: L{Element}
"""
marshaller = self.marshaller()
content = \
Content(tag=pdef[0],
value=object,
type=pdef[1],
real=pdef[1].resolve())
return marshaller.process(content)
def mkheader(self, method, hdef, object):
"""
Builds a soapheader for the specified I{method} using the header
definition (hdef) and the specified value (object).
@param method: A method name.
@type method: str
@param hdef: A header definition.
@type hdef: tuple: (I{name}, L{xsd.sxbase.SchemaObject})
@param object: The header value.
@type object: any
@return: The parameter fragment.
@rtype: L{Element}
"""
marshaller = self.marshaller()
if isinstance(object, (list, tuple)):
tags = []
for item in object:
tags.append(self.mkheader(method, hdef, item))
return tags
content = Content(tag=hdef[0], value=object, type=hdef[1])
return marshaller.process(content)
def envelope(self, header, body):
"""
Build the B{<Envelope/>} for an soap outbound message.
@param header: The soap message B{header}.
@type header: L{Element}
@param body: The soap message B{body}.
@type body: L{Element}
@return: The soap envelope containing the body and header.
@rtype: L{Element}
"""
env = Element('Envelope', ns=envns)
env.addPrefix(Namespace.xsins[0], Namespace.xsins[1])
env.append(header)
env.append(body)
return env
def header(self, content):
"""
Build the B{<Body/>} for an soap outbound message.
@param content: The header content.
@type content: L{Element}
@return: the soap body fragment.
@rtype: L{Element}
"""
header = Element('Header', ns=envns)
header.append(content)
return header
def bodycontent(self, method, args, kwargs):
"""
Get the content for the soap I{body} node.
@param method: A service method.
@type method: I{service.Method}
@param args: method parameter values
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The xml content for the <body/>
@rtype: [L{Element},..]
"""
raise Exception, 'not implemented'
def headercontent(self, method):
"""
Get the content for the soap I{Header} node.
@param method: A service method.
@type method: I{service.Method}
@return: The xml content for the <body/>
@rtype: [L{Element},..]
"""
n = 0
content = []
wsse = self.options().wsse
if wsse is not None:
content.append(wsse.xml())
headers = self.options().soapheaders
if not isinstance(headers, (tuple,list,dict)):
headers = (headers,)
if len(headers) == 0:
return content
pts = self.headpart_types(method)
if isinstance(headers, (tuple,list)):
for header in headers:
if isinstance(header, Element):
content.append(deepcopy(header))
continue
if len(pts) == n: break
h = self.mkheader(method, pts[n], header)
ns = pts[n][1].namespace('ns0')
h.setPrefix(ns[0], ns[1])
content.append(h)
n += 1
else:
for pt in pts:
header = headers.get(pt[0])
if header is None:
continue
h = self.mkheader(method, pt, header)
ns = pt[1].namespace('ns0')
h.setPrefix(ns[0], ns[1])
content.append(h)
return content
def replycontent(self, method, body):
"""
Get the reply body content.
@param method: A service method.
@type method: I{service.Method}
@param body: The soap body
@type body: L{Element}
@return: the body content
@rtype: [L{Element},...]
"""
raise Exception, 'not implemented'
def body(self, content):
"""
Build the B{<Body/>} for an soap outbound message.
@param content: The body content.
@type content: L{Element}
@return: the soap body fragment.
@rtype: L{Element}
"""
body = Element('Body', ns=envns)
body.append(content)
return body
def bodypart_types(self, method, input=True):
"""
Get a list of I{parameter definitions} (pdef) defined for the specified method.
Each I{pdef} is a tuple (I{name}, L{xsd.sxbase.SchemaObject})
@param method: A service method.
@type method: I{service.Method}
@param input: Defines input/output message.
@type input: boolean
@return: A list of parameter definitions
@rtype: [I{pdef},]
"""
result = []
if input:
parts = method.soap.input.body.parts
else:
parts = method.soap.output.body.parts
for p in parts:
if p.element is not None:
query = ElementQuery(p.element)
else:
query = TypeQuery(p.type)
pt = query.execute(self.schema())
if pt is None:
raise TypeNotFound(query.ref)
if p.type is not None:
pt = PartElement(p.name, pt)
if input:
if pt.name is None:
result.append((p.name, pt))
else:
result.append((pt.name, pt))
else:
result.append(pt)
return result
def headpart_types(self, method, input=True):
"""
Get a list of I{parameter definitions} (pdef) defined for the specified method.
Each I{pdef} is a tuple (I{name}, L{xsd.sxbase.SchemaObject})
@param method: A service method.
@type method: I{service.Method}
@param input: Defines input/output message.
@type input: boolean
@return: A list of parameter definitions
@rtype: [I{pdef},]
"""
result = []
if input:
headers = method.soap.input.headers
else:
headers = method.soap.output.headers
for header in headers:
part = header.part
if part.element is not None:
query = ElementQuery(part.element)
else:
query = TypeQuery(part.type)
pt = query.execute(self.schema())
if pt is None:
raise TypeNotFound(query.ref)
if part.type is not None:
pt = PartElement(part.name, pt)
if input:
if pt.name is None:
result.append((part.name, pt))
else:
result.append((pt.name, pt))
else:
result.append(pt)
return result
def returned_types(self, method):
"""
Get the L{xsd.sxbase.SchemaObject} returned by the I{method}.
@param method: A service method.
@type method: I{service.Method}
@return: The name of the type return by the method.
@rtype: [I{rtype},..]
"""
result = []
for rt in self.bodypart_types(method, input=False):
result.append(rt)
return result
class PartElement(SchemaElement):
"""
A part used to represent a message part when the part
references a schema type and thus assumes to be an element.
@ivar resolved: The part type.
@type resolved: L{suds.xsd.sxbase.SchemaObject}
"""
def __init__(self, name, resolved):
"""
@param name: The part name.
@type name: str
@param resolved: The part type.
@type resolved: L{suds.xsd.sxbase.SchemaObject}
"""
root = Element('element', ns=Namespace.xsdns)
SchemaElement.__init__(self, resolved.schema, root)
self.__resolved = resolved
self.name = name
self.form_qualified = False
def implany(self):
return self
def optional(self):
return True
def namespace(self, prefix=None):
return Namespace.default
def resolve(self, nobuiltin=False):
if nobuiltin and self.__resolved.builtin():
return self
else:
return self.__resolved
|
ryfeus/lambda-packs | refs/heads/master | LightGBM_sklearn_scipy_numpy/source/sklearn/externals/joblib/_memory_helpers.py | 52 | try:
# Available in Python 3
from tokenize import open as open_py_source
except ImportError:
# Copied from python3 tokenize
from codecs import lookup, BOM_UTF8
import re
from io import TextIOWrapper, open
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def _detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that
should be used to decode a Python source file. It requires one
argment, readline, in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are
present, but disagree, a SyntaxError will be raised. If the encoding
cookie is an invalid charset, raise a SyntaxError. Note that if a
utf-8 bom is found, 'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be
returned.
"""
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
matches = cookie_re.findall(line_string)
if not matches:
return None
encoding = _get_normal_name(matches[0])
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def open_py_source(filename):
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = open(filename, 'rb')
encoding, lines = _detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, line_buffering=True)
text.mode = 'r'
return text
|
sbidoul/buildbot | refs/heads/master | master/buildbot/test/unit/test_data_forceschedulers.py | 8 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.data import forceschedulers
from buildbot.schedulers.forcesched import ForceScheduler
from buildbot.test.util import endpoint
expected_default = {
'all_fields': [{'columns': 1,
'default': '',
'fields': [{'default': 'force build',
'fullName': 'reason',
'hide': False,
'label': 'reason',
'multiple': False,
'name': 'reason',
'regex': None,
'required': False,
'size': 20,
'tablabel': 'reason',
'type': 'text'},
{'default': '',
'fullName': 'username',
'hide': False,
'label': 'Your name:',
'multiple': False,
'name': 'username',
'need_email': True,
'regex': None,
'required': False,
'size': 30,
'tablabel': 'Your name:',
'type': 'username'}],
'fullName': None,
'hide': False,
'label': '',
'layout': 'vertical',
'multiple': False,
'name': '',
'regex': None,
'required': False,
'tablabel': '',
'type': 'nested'},
{'columns': 2,
'default': '',
'fields': [{'default': '',
'fullName': 'branch',
'hide': False,
'label': 'Branch:',
'multiple': False,
'name': 'branch',
'regex': None,
'required': False,
'size': 10,
'tablabel': 'Branch:',
'type': 'text'},
{'default': '',
'fullName': 'project',
'hide': False,
'label': 'Project:',
'multiple': False,
'name': 'project',
'regex': None,
'required': False,
'size': 10,
'tablabel': 'Project:',
'type': 'text'},
{'default': '',
'fullName': 'repository',
'hide': False,
'label': 'Repository:',
'multiple': False,
'name': 'repository',
'regex': None,
'required': False,
'size': 10,
'tablabel': 'Repository:',
'type': 'text'},
{'default': '',
'fullName': 'revision',
'hide': False,
'label': 'Revision:',
'multiple': False,
'name': 'revision',
'regex': None,
'required': False,
'size': 10,
'tablabel': 'Revision:',
'type': 'text'}],
'fullName': None,
'hide': False,
'label': '',
'layout': 'vertical',
'multiple': False,
'name': '',
'regex': None,
'required': False,
'tablabel': '',
'type': 'nested'}],
'builder_names': [u'builder'],
'button_name': u'defaultforce',
'label': u'defaultforce',
'name': u'defaultforce',
'enabled': True}
class ForceschedulerEndpoint(endpoint.EndpointMixin, unittest.TestCase):
endpointClass = forceschedulers.ForceSchedulerEndpoint
resourceTypeClass = forceschedulers.ForceScheduler
def setUp(self):
self.setUpEndpoint()
scheds = [ForceScheduler(
name="defaultforce",
builderNames=["builder"])]
self.master.allSchedulers = lambda: scheds
def tearDown(self):
self.tearDownEndpoint()
@defer.inlineCallbacks
def test_get_existing(self):
res = yield self.callGet(('forceschedulers', "defaultforce"))
self.validateData(res)
self.assertEqual(res, expected_default)
@defer.inlineCallbacks
def test_get_missing(self):
res = yield self.callGet(('forceschedulers', 'foo'))
self.assertEqual(res, None)
class ForceSchedulersEndpoint(endpoint.EndpointMixin, unittest.TestCase):
endpointClass = forceschedulers.ForceSchedulersEndpoint
resourceTypeClass = forceschedulers.ForceScheduler
def setUp(self):
self.setUpEndpoint()
scheds = [ForceScheduler(
name="defaultforce",
builderNames=["builder"])]
self.master.allSchedulers = lambda: scheds
def tearDown(self):
self.tearDownEndpoint()
@defer.inlineCallbacks
def test_get_existing(self):
res = yield self.callGet(('forceschedulers', ))
self.assertEqual(res, [expected_default])
|
biomodels/MODEL1172940336 | refs/heads/master | setup.py | 1 | from setuptools import setup, find_packages
setup(name='MODEL1172940336',
version=20140916,
description='MODEL1172940336 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/MODEL1172940336',
maintainer='Stanley Gu',
maintainer_url='[email protected]',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
) |
MalloyPower/parsing-python | refs/heads/master | front-end/testsuite-python-lib/Python-2.6/Lib/test/test_slice.py | 56 | # tests for slice objects; in particular the indices method.
import unittest
from test import test_support
from cPickle import loads, dumps
import sys
class SliceTest(unittest.TestCase):
def test_constructor(self):
self.assertRaises(TypeError, slice)
self.assertRaises(TypeError, slice, 1, 2, 3, 4)
def test_repr(self):
self.assertEqual(repr(slice(1, 2, 3)), "slice(1, 2, 3)")
def test_hash(self):
# Verify clearing of SF bug #800796
self.assertRaises(TypeError, hash, slice(5))
self.assertRaises(TypeError, slice(5).__hash__)
def test_cmp(self):
s1 = slice(1, 2, 3)
s2 = slice(1, 2, 3)
s3 = slice(1, 2, 4)
self.assertEqual(s1, s2)
self.assertNotEqual(s1, s3)
class Exc(Exception):
pass
class BadCmp(object):
def __eq__(self, other):
raise Exc
__hash__ = None # Silence Py3k warning
s1 = slice(BadCmp())
s2 = slice(BadCmp())
self.assertRaises(Exc, cmp, s1, s2)
self.assertEqual(s1, s1)
s1 = slice(1, BadCmp())
s2 = slice(1, BadCmp())
self.assertEqual(s1, s1)
self.assertRaises(Exc, cmp, s1, s2)
s1 = slice(1, 2, BadCmp())
s2 = slice(1, 2, BadCmp())
self.assertEqual(s1, s1)
self.assertRaises(Exc, cmp, s1, s2)
def test_members(self):
s = slice(1)
self.assertEqual(s.start, None)
self.assertEqual(s.stop, 1)
self.assertEqual(s.step, None)
s = slice(1, 2)
self.assertEqual(s.start, 1)
self.assertEqual(s.stop, 2)
self.assertEqual(s.step, None)
s = slice(1, 2, 3)
self.assertEqual(s.start, 1)
self.assertEqual(s.stop, 2)
self.assertEqual(s.step, 3)
class AnyClass:
pass
obj = AnyClass()
s = slice(obj)
self.assert_(s.stop is obj)
def test_indices(self):
self.assertEqual(slice(None ).indices(10), (0, 10, 1))
self.assertEqual(slice(None, None, 2).indices(10), (0, 10, 2))
self.assertEqual(slice(1, None, 2).indices(10), (1, 10, 2))
self.assertEqual(slice(None, None, -1).indices(10), (9, -1, -1))
self.assertEqual(slice(None, None, -2).indices(10), (9, -1, -2))
self.assertEqual(slice(3, None, -2).indices(10), (3, -1, -2))
# issue 3004 tests
self.assertEqual(slice(None, -9).indices(10), (0, 1, 1))
self.assertEqual(slice(None, -10).indices(10), (0, 0, 1))
self.assertEqual(slice(None, -11).indices(10), (0, 0, 1))
self.assertEqual(slice(None, -10, -1).indices(10), (9, 0, -1))
self.assertEqual(slice(None, -11, -1).indices(10), (9, -1, -1))
self.assertEqual(slice(None, -12, -1).indices(10), (9, -1, -1))
self.assertEqual(slice(None, 9).indices(10), (0, 9, 1))
self.assertEqual(slice(None, 10).indices(10), (0, 10, 1))
self.assertEqual(slice(None, 11).indices(10), (0, 10, 1))
self.assertEqual(slice(None, 8, -1).indices(10), (9, 8, -1))
self.assertEqual(slice(None, 9, -1).indices(10), (9, 9, -1))
self.assertEqual(slice(None, 10, -1).indices(10), (9, 9, -1))
self.assertEqual(
slice(-100, 100 ).indices(10),
slice(None).indices(10)
)
self.assertEqual(
slice(100, -100, -1).indices(10),
slice(None, None, -1).indices(10)
)
self.assertEqual(slice(-100L, 100L, 2L).indices(10), (0, 10, 2))
self.assertEqual(range(10)[::sys.maxint - 1], [0])
self.assertRaises(OverflowError, slice(None).indices, 1L<<100)
def test_setslice_without_getslice(self):
tmp = []
class X(object):
def __setslice__(self, i, j, k):
tmp.append((i, j, k))
x = X()
x[1:2] = 42
self.assertEquals(tmp, [(1, 2, 42)])
def test_pickle(self):
s = slice(10, 20, 3)
for protocol in (0,1,2):
t = loads(dumps(s, protocol))
self.assertEqual(s, t)
self.assertEqual(s.indices(15), t.indices(15))
self.assertNotEqual(id(s), id(t))
def test_main():
test_support.run_unittest(SliceTest)
if __name__ == "__main__":
test_main()
|
steveb/heat | refs/heads/master | heat/common/cache.py | 4 | #
# Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The code related to integration between oslo.cache module and heat."""
from oslo_cache import core
from oslo_config import cfg
from heat.common.i18n import _
def register_cache_configurations(conf):
"""Register all configurations required for oslo.cache.
The procedure registers all configurations required for oslo.cache.
It should be called before configuring of cache region
:param conf: instance of heat configuration
:returns: updated heat configuration
"""
# register global configurations for caching in heat
core.configure(conf)
# register heat specific configurations
constraint_cache_group = cfg.OptGroup('constraint_validation_cache')
constraint_cache_opts = [
cfg.IntOpt('expiration_time', default=60,
help=_(
'TTL, in seconds, for any cached item in the '
'dogpile.cache region used for caching of validation '
'constraints.')),
cfg.BoolOpt("caching", default=True,
help=_(
'Toggle to enable/disable caching when Orchestration '
'Engine validates property constraints of stack.'
'During property validation with constraints '
'Orchestration Engine caches requests to other '
'OpenStack services. Please note that the global '
'toggle for oslo.cache(enabled=True in [cache] group) '
'must be enabled to use this feature.'))
]
conf.register_group(constraint_cache_group)
conf.register_opts(constraint_cache_opts, group=constraint_cache_group)
extension_cache_group = cfg.OptGroup('service_extension_cache')
extension_cache_opts = [
cfg.IntOpt('expiration_time', default=3600,
help=_(
'TTL, in seconds, for any cached item in the '
'dogpile.cache region used for caching of service '
'extensions.')),
cfg.BoolOpt('caching', default=True,
help=_(
'Toggle to enable/disable caching when Orchestration '
'Engine retrieves extensions from other OpenStack '
'services. Please note that the global toggle for '
'oslo.cache(enabled=True in [cache] group) must be '
'enabled to use this feature.'))
]
conf.register_group(extension_cache_group)
conf.register_opts(extension_cache_opts, group=extension_cache_group)
find_cache_group = cfg.OptGroup('resource_finder_cache')
find_cache_opts = [
cfg.IntOpt('expiration_time', default=3600,
help=_(
'TTL, in seconds, for any cached item in the '
'dogpile.cache region used for caching of OpenStack '
'service finder functions.')),
cfg.BoolOpt('caching', default=True,
help=_(
'Toggle to enable/disable caching when Orchestration '
'Engine looks for other OpenStack service resources '
'using name or id. Please note that the global '
'toggle for oslo.cache(enabled=True in [cache] group) '
'must be enabled to use this feature.'))
]
conf.register_group(find_cache_group)
conf.register_opts(find_cache_opts, group=find_cache_group)
return conf
# variable that stores an initialized cache region for heat
_REGION = None
def get_cache_region():
global _REGION
if not _REGION:
_REGION = core.configure_cache_region(
conf=register_cache_configurations(cfg.CONF),
region=core.create_region())
return _REGION
|
madafoo/cjdns | refs/heads/master | node_build/dependencies/libuv/build/gyp/test/same-rule-output-file-name/src/touch.py | 679 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
f = open(sys.argv[1], 'w+')
f.write('Hello from touch.py\n')
f.close()
|
libracore/erpnext | refs/heads/v12 | erpnext/setup/doctype/party_type/party_type.py | 10 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class PartyType(Document):
pass
@frappe.whitelist()
def get_party_type(doctype, txt, searchfield, start, page_len, filters):
cond = ''
if filters and filters.get('account'):
account_type = frappe.db.get_value('Account', filters.get('account'), 'account_type')
cond = "and account_type = '%s'" % account_type
return frappe.db.sql("""select name from `tabParty Type`
where `{key}` LIKE %(txt)s {cond}
order by name limit %(start)s, %(page_len)s"""
.format(key=searchfield, cond=cond), {
'txt': '%' + txt + '%',
'start': start, 'page_len': page_len
})
|
Serag8/Bachelor | refs/heads/master | google_appengine/lib/webapp2-2.3/webapp2_extras/config.py | 67 | # -*- coding: utf-8 -*-
"""
webapp2_extras.config
=====================
Configuration object for webapp2.
This module is deprecated. See :class:`webapp2.WSGIApplication.config`.
:copyright: 2011 by tipfy.org.
:license: Apache Sotware License, see LICENSE for details.
"""
from __future__ import absolute_import
import warnings
import webapp2
warnings.warn(DeprecationWarning(
'webapp2_extras.config is deprecated. '
'The WSGIApplication uses webapp2.Config instead.'),
stacklevel=1)
#: Value used for missing default values.
DEFAULT_VALUE = object()
#: Value used for required values.
REQUIRED_VALUE = object()
class Config(dict):
"""A simple configuration dictionary keyed by module name. This is a
dictionary of dictionaries. It requires all values to be dictionaries
and applies updates and default values to the inner dictionaries instead
of the first level one.
The configuration object can be set as a ``config`` attribute of
:class:`WSGIApplication`::
import webapp2
from webapp2_extras import config as webapp2_config
my_config = {}
my_config['my.module'] = {
'foo': 'bar',
}
app = webapp2.WSGIApplication(routes=[
webapp2.Route('/', name='home', handler=MyHandler)
])
app.config = webapp2_config.Config(my_config)
Then to read configuration values, get them from the app::
class MyHandler(RequestHandler):
def get(self):
foo = self.app.config['my.module']['foo']
# ...
"""
#: Loaded module configurations.
loaded = None
def __init__(self, values=None, defaults=None):
"""Initializes the configuration object.
:param values:
A dictionary of configuration dictionaries for modules.
:param defaults:
A dictionary of configuration dictionaries for initial default
values. These modules are marked as loaded.
"""
self.loaded = []
if values is not None:
assert isinstance(values, dict)
for module, config in values.iteritems():
self.update(module, config)
if defaults is not None:
assert isinstance(defaults, dict)
for module, config in defaults.iteritems():
self.setdefault(module, config)
self.loaded.append(module)
def __getitem__(self, module):
"""Returns the configuration for a module. If it is not already
set, loads a ``default_config`` variable from the given module and
updates the configuration with those default values
Every module that allows some kind of configuration sets a
``default_config`` global variable that is loaded by this function,
cached and used in case the requested configuration was not defined
by the user.
:param module:
The module name.
:returns:
A configuration value.
"""
if module not in self.loaded:
# Load default configuration and update config.
values = webapp2.import_string(module + '.default_config',
silent=True)
if values:
self.setdefault(module, values)
self.loaded.append(module)
try:
return dict.__getitem__(self, module)
except KeyError:
raise KeyError('Module %r is not configured.' % module)
def __setitem__(self, module, values):
"""Sets a configuration for a module, requiring it to be a dictionary.
:param module:
A module name for the configuration, e.g.: `webapp2.ext.i18n`.
:param values:
A dictionary of configurations for the module.
"""
assert isinstance(values, dict), 'Module configuration must be a dict.'
dict.__setitem__(self, module, SubConfig(module, values))
def get(self, module, default=DEFAULT_VALUE):
"""Returns a configuration for a module. If default is not provided,
returns an empty dict if the module is not configured.
:param module:
The module name.
:params default:
Default value to return if the module is not configured. If not
set, returns an empty dict.
:returns:
A module configuration.
"""
if default is DEFAULT_VALUE:
default = {}
return dict.get(self, module, default)
def setdefault(self, module, values):
"""Sets a default configuration dictionary for a module.
:param module:
The module to set default configuration, e.g.: `webapp2.ext.i18n`.
:param values:
A dictionary of configurations for the module.
:returns:
The module configuration dictionary.
"""
assert isinstance(values, dict), 'Module configuration must be a dict.'
if module not in self:
dict.__setitem__(self, module, SubConfig(module))
module_dict = dict.__getitem__(self, module)
for key, value in values.iteritems():
module_dict.setdefault(key, value)
return module_dict
def update(self, module, values):
"""Updates the configuration dictionary for a module.
:param module:
The module to update the configuration, e.g.: `webapp2.ext.i18n`.
:param values:
A dictionary of configurations for the module.
"""
assert isinstance(values, dict), 'Module configuration must be a dict.'
if module not in self:
dict.__setitem__(self, module, SubConfig(module))
dict.__getitem__(self, module).update(values)
def get_config(self, module, key=None, default=REQUIRED_VALUE):
"""Returns a configuration value for a module and optionally a key.
Will raise a KeyError if they the module is not configured or the key
doesn't exist and a default is not provided.
:param module:
The module name.
:params key:
The configuration key.
:param default:
Default value to return if the key doesn't exist.
:returns:
A module configuration.
"""
module_dict = self.__getitem__(module)
if key is None:
return module_dict
return module_dict.get(key, default)
class SubConfig(dict):
def __init__(self, module, values=None):
dict.__init__(self, values or ())
self.module = module
def __getitem__(self, key):
try:
value = dict.__getitem__(self, key)
except KeyError:
raise KeyError('Module %r does not have the config key %r' %
(self.module, key))
if value is REQUIRED_VALUE:
raise KeyError('Module %r requires the config key %r to be '
'set.' % (self.module, key))
return value
def get(self, key, default=None):
if key not in self:
value = default
else:
value = dict.__getitem__(self, key)
if value is REQUIRED_VALUE:
raise KeyError('Module %r requires the config key %r to be '
'set.' % (self.module, key))
return value
|
MikeMcShaffry/gamecode3 | refs/heads/master | Dev/Source/3rdParty/bullet-2.73/Extras/LibXML/check-xml-test-suite.py | 347 | #!/usr/bin/python
import sys
import time
import os
import string
sys.path.insert(0, "python")
import libxml2
test_nr = 0
test_succeed = 0
test_failed = 0
test_error = 0
#
# the testsuite description
#
CONF="xml-test-suite/xmlconf/xmlconf.xml"
LOG="check-xml-test-suite.log"
log = open(LOG, "w")
#
# Error and warning handlers
#
error_nr = 0
error_msg = ''
def errorHandler(ctx, str):
global error_nr
global error_msg
error_nr = error_nr + 1
if len(error_msg) < 300:
if len(error_msg) == 0 or error_msg[-1] == '\n':
error_msg = error_msg + " >>" + str
else:
error_msg = error_msg + str
libxml2.registerErrorHandler(errorHandler, None)
#warning_nr = 0
#warning = ''
#def warningHandler(ctx, str):
# global warning_nr
# global warning
#
# warning_nr = warning_nr + 1
# warning = warning + str
#
#libxml2.registerWarningHandler(warningHandler, None)
#
# Used to load the XML testsuite description
#
def loadNoentDoc(filename):
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt == None:
return None
ctxt.replaceEntities(1)
ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
if ctxt.wellFormed() != 1:
doc.freeDoc()
return None
return doc
#
# The conformance testing routines
#
def testNotWf(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt == None:
return -1
ret = ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
if doc != None:
doc.freeDoc()
if ret == 0 or ctxt.wellFormed() != 0:
print "%s: error: Well Formedness error not detected" % (id)
log.write("%s: error: Well Formedness error not detected\n" % (id))
return 0
return 1
def testNotWfEnt(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt == None:
return -1
ctxt.replaceEntities(1)
ret = ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
if doc != None:
doc.freeDoc()
if ret == 0 or ctxt.wellFormed() != 0:
print "%s: error: Well Formedness error not detected" % (id)
log.write("%s: error: Well Formedness error not detected\n" % (id))
return 0
return 1
def testNotWfEntDtd(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt == None:
return -1
ctxt.replaceEntities(1)
ctxt.loadSubset(1)
ret = ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
if doc != None:
doc.freeDoc()
if ret == 0 or ctxt.wellFormed() != 0:
print "%s: error: Well Formedness error not detected" % (id)
log.write("%s: error: Well Formedness error not detected\n" % (id))
return 0
return 1
def testWfEntDtd(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt == None:
return -1
ctxt.replaceEntities(1)
ctxt.loadSubset(1)
ret = ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
if doc == None or ret != 0 or ctxt.wellFormed() == 0:
print "%s: error: wrongly failed to parse the document" % (id)
log.write("%s: error: wrongly failed to parse the document\n" % (id))
if doc != None:
doc.freeDoc()
return 0
if error_nr != 0:
print "%s: warning: WF document generated an error msg" % (id)
log.write("%s: error: WF document generated an error msg\n" % (id))
doc.freeDoc()
return 2
doc.freeDoc()
return 1
def testError(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt == None:
return -1
ctxt.replaceEntities(1)
ctxt.loadSubset(1)
ret = ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
if doc != None:
doc.freeDoc()
if ctxt.wellFormed() == 0:
print "%s: warning: failed to parse the document but accepted" % (id)
log.write("%s: warning: failed to parse the document but accepte\n" % (id))
return 2
if error_nr != 0:
print "%s: warning: WF document generated an error msg" % (id)
log.write("%s: error: WF document generated an error msg\n" % (id))
return 2
return 1
def testInvalid(filename, id):
global error_nr
global error_msg
global log
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt == None:
return -1
ctxt.validate(1)
ret = ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
valid = ctxt.isValid()
if doc == None:
print "%s: error: wrongly failed to parse the document" % (id)
log.write("%s: error: wrongly failed to parse the document\n" % (id))
return 0
if valid == 1:
print "%s: error: Validity error not detected" % (id)
log.write("%s: error: Validity error not detected\n" % (id))
doc.freeDoc()
return 0
if error_nr == 0:
print "%s: warning: Validity error not reported" % (id)
log.write("%s: warning: Validity error not reported\n" % (id))
doc.freeDoc()
return 2
doc.freeDoc()
return 1
def testValid(filename, id):
global error_nr
global error_msg
error_nr = 0
error_msg = ''
ctxt = libxml2.createFileParserCtxt(filename)
if ctxt == None:
return -1
ctxt.validate(1)
ctxt.parseDocument()
try:
doc = ctxt.doc()
except:
doc = None
valid = ctxt.isValid()
if doc == None:
print "%s: error: wrongly failed to parse the document" % (id)
log.write("%s: error: wrongly failed to parse the document\n" % (id))
return 0
if valid != 1:
print "%s: error: Validity check failed" % (id)
log.write("%s: error: Validity check failed\n" % (id))
doc.freeDoc()
return 0
if error_nr != 0 or valid != 1:
print "%s: warning: valid document reported an error" % (id)
log.write("%s: warning: valid document reported an error\n" % (id))
doc.freeDoc()
return 2
doc.freeDoc()
return 1
def runTest(test):
global test_nr
global test_succeed
global test_failed
global error_msg
global log
uri = test.prop('URI')
id = test.prop('ID')
if uri == None:
print "Test without ID:", uri
return -1
if id == None:
print "Test without URI:", id
return -1
base = test.getBase(None)
URI = libxml2.buildURI(uri, base)
if os.access(URI, os.R_OK) == 0:
print "Test %s missing: base %s uri %s" % (URI, base, uri)
return -1
type = test.prop('TYPE')
if type == None:
print "Test %s missing TYPE" % (id)
return -1
extra = None
if type == "invalid":
res = testInvalid(URI, id)
elif type == "valid":
res = testValid(URI, id)
elif type == "not-wf":
extra = test.prop('ENTITIES')
# print URI
#if extra == None:
# res = testNotWfEntDtd(URI, id)
#elif extra == 'none':
# res = testNotWf(URI, id)
#elif extra == 'general':
# res = testNotWfEnt(URI, id)
#elif extra == 'both' or extra == 'parameter':
res = testNotWfEntDtd(URI, id)
#else:
# print "Unknow value %s for an ENTITIES test value" % (extra)
# return -1
elif type == "error":
res = testError(URI, id)
else:
# TODO skipped for now
return -1
test_nr = test_nr + 1
if res > 0:
test_succeed = test_succeed + 1
elif res == 0:
test_failed = test_failed + 1
elif res < 0:
test_error = test_error + 1
# Log the ontext
if res != 1:
log.write(" File: %s\n" % (URI))
content = string.strip(test.content)
while content[-1] == '\n':
content = content[0:-1]
if extra != None:
log.write(" %s:%s:%s\n" % (type, extra, content))
else:
log.write(" %s:%s\n\n" % (type, content))
if error_msg != '':
log.write(" ----\n%s ----\n" % (error_msg))
error_msg = ''
log.write("\n")
return 0
def runTestCases(case):
profile = case.prop('PROFILE')
if profile != None and \
string.find(profile, "IBM XML Conformance Test Suite - Production") < 0:
print "=>", profile
test = case.children
while test != None:
if test.name == 'TEST':
runTest(test)
if test.name == 'TESTCASES':
runTestCases(test)
test = test.next
conf = loadNoentDoc(CONF)
if conf == None:
print "Unable to load %s" % CONF
sys.exit(1)
testsuite = conf.getRootElement()
if testsuite.name != 'TESTSUITE':
print "Expecting TESTSUITE root element: aborting"
sys.exit(1)
profile = testsuite.prop('PROFILE')
if profile != None:
print profile
start = time.time()
case = testsuite.children
while case != None:
if case.name == 'TESTCASES':
old_test_nr = test_nr
old_test_succeed = test_succeed
old_test_failed = test_failed
old_test_error = test_error
runTestCases(case)
print " Ran %d tests: %d suceeded, %d failed and %d generated an error" % (
test_nr - old_test_nr, test_succeed - old_test_succeed,
test_failed - old_test_failed, test_error - old_test_error)
case = case.next
conf.freeDoc()
log.close()
print "Ran %d tests: %d suceeded, %d failed and %d generated an error in %.2f s." % (
test_nr, test_succeed, test_failed, test_error, time.time() - start)
|
jsoref/django | refs/heads/master | tests/forms_tests/tests/test_fields.py | 64 | # -*- coding: utf-8 -*-
"""
##########
# Fields #
##########
Each Field class does some sort of validation. Each Field has a clean() method,
which either raises django.forms.ValidationError or returns the "clean"
data -- usually a Unicode object, but, in some rare cases, a list.
Each Field's __init__() takes at least these parameters:
required -- Boolean that specifies whether the field is required.
True by default.
widget -- A Widget class, or instance of a Widget class, that should be
used for this Field when displaying it. Each Field has a default
Widget that it'll use if you don't specify this. In most cases,
the default widget is TextInput.
label -- A verbose name for this field, for use in displaying this field in
a form. By default, Django will use a "pretty" version of the form
field name, if the Field is part of a Form.
initial -- A value to use in this Field's initial display. This value is
*not* used as a fallback if data isn't given.
Other than that, the Field subclasses have class-specific options for
__init__(). For example, CharField has a max_length option.
"""
from __future__ import unicode_literals
import datetime
import os
import pickle
import re
import uuid
from decimal import Decimal
from unittest import skipIf
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import (
BooleanField, CharField, ChoiceField, ComboField, DateField, DateTimeField,
DecimalField, DurationField, EmailField, Field, FileField, FilePathField,
FloatField, Form, GenericIPAddressField, HiddenInput, ImageField,
IntegerField, MultipleChoiceField, NullBooleanField, NumberInput,
PasswordInput, RadioSelect, RegexField, SlugField, SplitDateTimeField,
Textarea, TextInput, TimeField, TypedChoiceField, TypedMultipleChoiceField,
URLField, UUIDField, ValidationError, Widget, forms,
)
from django.test import SimpleTestCase
from django.utils import formats, six, translation
from django.utils._os import upath
from django.utils.duration import duration_string
try:
from PIL import Image
except ImportError:
Image = None
def fix_os_paths(x):
if isinstance(x, six.string_types):
return x.replace('\\', '/')
elif isinstance(x, tuple):
return tuple(fix_os_paths(list(x)))
elif isinstance(x, list):
return [fix_os_paths(y) for y in x]
else:
return x
class FieldsTests(SimpleTestCase):
def assertWidgetRendersTo(self, field, to):
class _Form(Form):
f = field
self.assertHTMLEqual(str(_Form()['f']), to)
def test_field_sets_widget_is_required(self):
self.assertTrue(Field(required=True).widget.is_required)
self.assertFalse(Field(required=False).widget.is_required)
def test_cooperative_multiple_inheritance(self):
class A(object):
def __init__(self):
self.class_a_var = True
super(A, self).__init__()
class ComplexField(Field, A):
def __init__(self):
super(ComplexField, self).__init__()
f = ComplexField()
self.assertTrue(f.class_a_var)
# CharField ###################################################################
def test_charfield_1(self):
f = CharField()
self.assertEqual('1', f.clean(1))
self.assertEqual('hello', f.clean('hello'))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertEqual('[1, 2, 3]', f.clean([1, 2, 3]))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, None)
def test_charfield_2(self):
f = CharField(required=False)
self.assertEqual('1', f.clean(1))
self.assertEqual('hello', f.clean('hello'))
self.assertEqual('', f.clean(None))
self.assertEqual('', f.clean(''))
self.assertEqual('[1, 2, 3]', f.clean([1, 2, 3]))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, None)
def test_charfield_3(self):
f = CharField(max_length=10, required=False)
self.assertEqual('12345', f.clean('12345'))
self.assertEqual('1234567890', f.clean('1234567890'))
msg = "'Ensure this value has at most 10 characters (it has 11).'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean('1234567890a')
self.assertEqual(f.max_length, 10)
self.assertEqual(f.min_length, None)
def test_charfield_4(self):
f = CharField(min_length=10, required=False)
self.assertEqual('', f.clean(''))
msg = "'Ensure this value has at least 10 characters (it has 5).'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean('12345')
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertEqual('1234567890a', f.clean('1234567890a'))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, 10)
def test_charfield_5(self):
f = CharField(min_length=10, required=True)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
msg = "'Ensure this value has at least 10 characters (it has 5).'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean('12345')
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertEqual('1234567890a', f.clean('1234567890a'))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, 10)
def test_charfield_length_not_int(self):
"""
Ensure that setting min_length or max_length to something that is not a
number returns an exception.
"""
self.assertRaises(ValueError, CharField, min_length='a')
self.assertRaises(ValueError, CharField, max_length='a')
self.assertRaises(ValueError, CharField, 'a')
def test_charfield_widget_attrs(self):
"""
Ensure that CharField.widget_attrs() always returns a dictionary.
Refs #15912
"""
# Return an empty dictionary if max_length is None
f = CharField()
self.assertEqual(f.widget_attrs(TextInput()), {})
self.assertEqual(f.widget_attrs(Textarea()), {})
# Otherwise, return a maxlength attribute equal to max_length
f = CharField(max_length=10)
self.assertEqual(f.widget_attrs(TextInput()), {'maxlength': '10'})
self.assertEqual(f.widget_attrs(PasswordInput()), {'maxlength': '10'})
self.assertEqual(f.widget_attrs(Textarea()), {'maxlength': '10'})
def test_charfield_strip(self):
"""
Ensure that values have whitespace stripped and that strip=False works.
"""
f = CharField()
self.assertEqual(f.clean(' 1'), '1')
self.assertEqual(f.clean('1 '), '1')
f = CharField(strip=False)
self.assertEqual(f.clean(' 1'), ' 1')
self.assertEqual(f.clean('1 '), '1 ')
def test_charfield_disabled(self):
f = CharField(disabled=True)
self.assertWidgetRendersTo(f, '<input type="text" name="f" id="id_f" disabled />')
# IntegerField ################################################################
def test_integerfield_1(self):
f = IntegerField()
self.assertWidgetRendersTo(f, '<input type="number" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(1, f.clean('1'))
self.assertIsInstance(f.clean('1'), int)
self.assertEqual(23, f.clean('23'))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, 'a')
self.assertEqual(42, f.clean(42))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, 3.14)
self.assertEqual(1, f.clean('1 '))
self.assertEqual(1, f.clean(' 1'))
self.assertEqual(1, f.clean(' 1 '))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '1a')
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_integerfield_2(self):
f = IntegerField(required=False)
self.assertIsNone(f.clean(''))
self.assertEqual('None', repr(f.clean('')))
self.assertIsNone(f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertEqual(1, f.clean('1'))
self.assertIsInstance(f.clean('1'), int)
self.assertEqual(23, f.clean('23'))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, 'a')
self.assertEqual(1, f.clean('1 '))
self.assertEqual(1, f.clean(' 1'))
self.assertEqual(1, f.clean(' 1 '))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '1a')
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_integerfield_3(self):
f = IntegerField(max_value=10)
self.assertWidgetRendersTo(f, '<input max="10" type="number" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(1, f.clean(1))
self.assertEqual(10, f.clean(10))
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 10.'", f.clean, 11)
self.assertEqual(10, f.clean('10'))
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 10.'", f.clean, '11')
self.assertEqual(f.max_value, 10)
self.assertEqual(f.min_value, None)
def test_integerfield_4(self):
f = IntegerField(min_value=10)
self.assertWidgetRendersTo(f, '<input id="id_f" type="number" name="f" min="10" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 10.'", f.clean, 1)
self.assertEqual(10, f.clean(10))
self.assertEqual(11, f.clean(11))
self.assertEqual(10, f.clean('10'))
self.assertEqual(11, f.clean('11'))
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, 10)
def test_integerfield_5(self):
f = IntegerField(min_value=10, max_value=20)
self.assertWidgetRendersTo(f, '<input id="id_f" max="20" type="number" name="f" min="10" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 10.'", f.clean, 1)
self.assertEqual(10, f.clean(10))
self.assertEqual(11, f.clean(11))
self.assertEqual(10, f.clean('10'))
self.assertEqual(11, f.clean('11'))
self.assertEqual(20, f.clean(20))
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 20.'", f.clean, 21)
self.assertEqual(f.max_value, 20)
self.assertEqual(f.min_value, 10)
def test_integerfield_localized(self):
"""
Make sure localized IntegerField's widget renders to a text input with
no number input specific attributes.
"""
f1 = IntegerField(localize=True)
self.assertWidgetRendersTo(f1, '<input id="id_f" name="f" type="text" />')
def test_integerfield_float(self):
f = IntegerField()
self.assertEqual(1, f.clean(1.0))
self.assertEqual(1, f.clean('1.0'))
self.assertEqual(1, f.clean(' 1.0 '))
self.assertEqual(1, f.clean('1.'))
self.assertEqual(1, f.clean(' 1. '))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '1.5')
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '…')
def test_integerfield_big_num(self):
f = IntegerField()
self.assertEqual(9223372036854775808, f.clean(9223372036854775808))
self.assertEqual(9223372036854775808, f.clean('9223372036854775808'))
self.assertEqual(9223372036854775808, f.clean('9223372036854775808.0'))
def test_integerfield_subclass(self):
"""
Test that class-defined widget is not overwritten by __init__ (#22245).
"""
class MyIntegerField(IntegerField):
widget = Textarea
f = MyIntegerField()
self.assertEqual(f.widget.__class__, Textarea)
f = MyIntegerField(localize=True)
self.assertEqual(f.widget.__class__, Textarea)
# FloatField ##################################################################
def test_floatfield_1(self):
f = FloatField()
self.assertWidgetRendersTo(f, '<input step="any" type="number" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(1.0, f.clean('1'))
self.assertIsInstance(f.clean('1'), float)
self.assertEqual(23.0, f.clean('23'))
self.assertEqual(3.1400000000000001, f.clean('3.14'))
self.assertEqual(3.1400000000000001, f.clean(3.14))
self.assertEqual(42.0, f.clean(42))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'a')
self.assertEqual(1.0, f.clean('1.0 '))
self.assertEqual(1.0, f.clean(' 1.0'))
self.assertEqual(1.0, f.clean(' 1.0 '))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '1.0a')
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'Infinity')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'NaN')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '-Inf')
def test_floatfield_2(self):
f = FloatField(required=False)
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(None))
self.assertEqual(1.0, f.clean('1'))
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_floatfield_3(self):
f = FloatField(max_value=1.5, min_value=0.5)
self.assertWidgetRendersTo(f, '<input step="any" name="f" min="0.5" max="1.5" type="number" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 1.5.'", f.clean, '1.6')
self.assertRaisesMessage(
ValidationError, "'Ensure this value is greater than or equal to 0.5.'",
f.clean, '0.4'
)
self.assertEqual(1.5, f.clean('1.5'))
self.assertEqual(0.5, f.clean('0.5'))
self.assertEqual(f.max_value, 1.5)
self.assertEqual(f.min_value, 0.5)
def test_floatfield_widget_attrs(self):
f = FloatField(widget=NumberInput(attrs={'step': 0.01, 'max': 1.0, 'min': 0.0}))
self.assertWidgetRendersTo(f, '<input step="0.01" name="f" min="0.0" max="1.0" type="number" id="id_f" />')
def test_floatfield_localized(self):
"""
Make sure localized FloatField's widget renders to a text input with
no number input specific attributes.
"""
f = FloatField(localize=True)
self.assertWidgetRendersTo(f, '<input id="id_f" name="f" type="text" />')
def test_floatfield_changed(self):
f = FloatField()
n = 4.35
self.assertFalse(f.has_changed(n, '4.3500'))
with translation.override('fr'), self.settings(USE_L10N=True):
f = FloatField(localize=True)
localized_n = formats.localize_input(n) # -> '4,35' in French
self.assertFalse(f.has_changed(n, localized_n))
# DecimalField ################################################################
def test_decimalfield_1(self):
f = DecimalField(max_digits=4, decimal_places=2)
self.assertWidgetRendersTo(f, '<input id="id_f" step="0.01" type="number" name="f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(f.clean('1'), Decimal("1"))
self.assertIsInstance(f.clean('1'), Decimal)
self.assertEqual(f.clean('23'), Decimal("23"))
self.assertEqual(f.clean('3.14'), Decimal("3.14"))
self.assertEqual(f.clean(3.14), Decimal("3.14"))
self.assertEqual(f.clean(Decimal('3.14')), Decimal("3.14"))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'NaN')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'Inf')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '-Inf')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'a')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'łąść')
self.assertEqual(f.clean('1.0 '), Decimal("1.0"))
self.assertEqual(f.clean(' 1.0'), Decimal("1.0"))
self.assertEqual(f.clean(' 1.0 '), Decimal("1.0"))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '1.0a')
self.assertRaisesMessage(
ValidationError, "'Ensure that there are no more than 4 digits in total.'",
f.clean, '123.45'
)
self.assertRaisesMessage(
ValidationError, "'Ensure that there are no more than 2 decimal places.'",
f.clean, '1.234'
)
self.assertRaisesMessage(
ValidationError, "'Ensure that there are no more than 2 digits before the decimal point.'",
f.clean, '123.4'
)
self.assertEqual(f.clean('-12.34'), Decimal("-12.34"))
self.assertRaisesMessage(
ValidationError, "'Ensure that there are no more than 4 digits in total.'",
f.clean, '-123.45'
)
self.assertEqual(f.clean('-.12'), Decimal("-0.12"))
self.assertEqual(f.clean('-00.12'), Decimal("-0.12"))
self.assertEqual(f.clean('-000.12'), Decimal("-0.12"))
self.assertRaisesMessage(
ValidationError, "'Ensure that there are no more than 2 decimal places.'",
f.clean, '-000.123'
)
self.assertRaisesMessage(
ValidationError, "'Ensure that there are no more than 4 digits in total.'",
f.clean, '-000.12345'
)
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '--0.12')
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_decimalfield_2(self):
f = DecimalField(max_digits=4, decimal_places=2, required=False)
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(None))
self.assertEqual(f.clean('1'), Decimal("1"))
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_decimalfield_3(self):
f = DecimalField(max_digits=4, decimal_places=2, max_value=Decimal('1.5'), min_value=Decimal('0.5'))
self.assertWidgetRendersTo(f, '<input step="0.01" name="f" min="0.5" max="1.5" type="number" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 1.5.'", f.clean, '1.6')
self.assertRaisesMessage(
ValidationError, "'Ensure this value is greater than or equal to 0.5.'",
f.clean, '0.4'
)
self.assertEqual(f.clean('1.5'), Decimal("1.5"))
self.assertEqual(f.clean('0.5'), Decimal("0.5"))
self.assertEqual(f.clean('.5'), Decimal("0.5"))
self.assertEqual(f.clean('00.50'), Decimal("0.50"))
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertEqual(f.max_value, Decimal('1.5'))
self.assertEqual(f.min_value, Decimal('0.5'))
def test_decimalfield_4(self):
f = DecimalField(decimal_places=2)
self.assertRaisesMessage(
ValidationError, "'Ensure that there are no more than 2 decimal places.'",
f.clean, '0.00000001'
)
def test_decimalfield_5(self):
f = DecimalField(max_digits=3)
# Leading whole zeros "collapse" to one digit.
self.assertEqual(f.clean('0000000.10'), Decimal("0.1"))
# But a leading 0 before the . doesn't count towards max_digits
self.assertEqual(f.clean('0000000.100'), Decimal("0.100"))
# Only leading whole zeros "collapse" to one digit.
self.assertEqual(f.clean('000000.02'), Decimal('0.02'))
self.assertRaisesMessage(
ValidationError, "'Ensure that there are no more than 3 digits in total.'",
f.clean, '000000.0002'
)
self.assertEqual(f.clean('.002'), Decimal("0.002"))
def test_decimalfield_6(self):
f = DecimalField(max_digits=2, decimal_places=2)
self.assertEqual(f.clean('.01'), Decimal(".01"))
self.assertRaisesMessage(
ValidationError, "'Ensure that there are no more than 0 digits before the decimal point.'",
f.clean, '1.1'
)
def test_decimalfield_scientific(self):
f = DecimalField(max_digits=2, decimal_places=2)
self.assertEqual(f.clean('1E+2'), Decimal('1E+2'))
self.assertEqual(f.clean('1e+2'), Decimal('1E+2'))
with self.assertRaisesMessage(ValidationError, "Ensure that there are no more"):
f.clean('0.546e+2')
def test_decimalfield_widget_attrs(self):
f = DecimalField(max_digits=6, decimal_places=2)
self.assertEqual(f.widget_attrs(Widget()), {})
self.assertEqual(f.widget_attrs(NumberInput()), {'step': '0.01'})
f = DecimalField(max_digits=10, decimal_places=0)
self.assertEqual(f.widget_attrs(NumberInput()), {'step': '1'})
f = DecimalField(max_digits=19, decimal_places=19)
self.assertEqual(f.widget_attrs(NumberInput()), {'step': '1e-19'})
f = DecimalField(max_digits=20)
self.assertEqual(f.widget_attrs(NumberInput()), {'step': 'any'})
f = DecimalField(max_digits=6, widget=NumberInput(attrs={'step': '0.01'}))
self.assertWidgetRendersTo(f, '<input step="0.01" name="f" type="number" id="id_f" />')
def test_decimalfield_localized(self):
"""
Make sure localized DecimalField's widget renders to a text input with
no number input specific attributes.
"""
f = DecimalField(localize=True)
self.assertWidgetRendersTo(f, '<input id="id_f" name="f" type="text" />')
def test_decimalfield_changed(self):
f = DecimalField(max_digits=2, decimal_places=2)
d = Decimal("0.1")
self.assertFalse(f.has_changed(d, '0.10'))
self.assertTrue(f.has_changed(d, '0.101'))
with translation.override('fr'), self.settings(USE_L10N=True):
f = DecimalField(max_digits=2, decimal_places=2, localize=True)
localized_d = formats.localize_input(d) # -> '0,1' in French
self.assertFalse(f.has_changed(d, localized_d))
# DateField ###################################################################
def test_datefield_1(self):
f = DateField()
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('2006-10-25'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('10/25/2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('10/25/06'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('Oct 25 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('October 25 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('October 25, 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('25 October 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('25 October, 2006'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '2006-4-31')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '200a-10-25')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '25/10/06')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
def test_datefield_2(self):
f = DateField(required=False)
self.assertIsNone(f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertIsNone(f.clean(''))
self.assertEqual('None', repr(f.clean('')))
def test_datefield_3(self):
f = DateField(input_formats=['%Y %m %d'])
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('2006 10 25'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '2006-10-25')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '10/25/2006')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '10/25/06')
def test_datefield_4(self):
# Test whitespace stripping behavior (#5714)
f = DateField()
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' 10/25/2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' 10/25/06 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' Oct 25 2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' October 25 2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' October 25, 2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' 25 October 2006 '))
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ' ')
def test_datefield_5(self):
# Test null bytes (#18982)
f = DateField()
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, 'a\x00b')
def test_datefield_changed(self):
format = '%d/%m/%Y'
f = DateField(input_formats=[format])
d = datetime.date(2007, 9, 17)
self.assertFalse(f.has_changed(d, '17/09/2007'))
def test_datefield_strptime(self):
"""Test that field.strptime doesn't raise an UnicodeEncodeError (#16123)"""
f = DateField()
try:
f.strptime('31 мая 2011', '%d-%b-%y')
except Exception as e:
# assertIsInstance or assertRaises cannot be used because UnicodeEncodeError
# is a subclass of ValueError
self.assertEqual(e.__class__, ValueError)
# TimeField ###################################################################
def test_timefield_1(self):
f = TimeField()
self.assertEqual(datetime.time(14, 25), f.clean(datetime.time(14, 25)))
self.assertEqual(datetime.time(14, 25, 59), f.clean(datetime.time(14, 25, 59)))
self.assertEqual(datetime.time(14, 25), f.clean('14:25'))
self.assertEqual(datetime.time(14, 25, 59), f.clean('14:25:59'))
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, 'hello')
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, '1:24 p.m.')
def test_timefield_2(self):
f = TimeField(input_formats=['%I:%M %p'])
self.assertEqual(datetime.time(14, 25), f.clean(datetime.time(14, 25)))
self.assertEqual(datetime.time(14, 25, 59), f.clean(datetime.time(14, 25, 59)))
self.assertEqual(datetime.time(4, 25), f.clean('4:25 AM'))
self.assertEqual(datetime.time(16, 25), f.clean('4:25 PM'))
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, '14:30:45')
def test_timefield_3(self):
f = TimeField()
# Test whitespace stripping behavior (#5714)
self.assertEqual(datetime.time(14, 25), f.clean(' 14:25 '))
self.assertEqual(datetime.time(14, 25, 59), f.clean(' 14:25:59 '))
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ' ')
def test_timefield_changed(self):
t1 = datetime.time(12, 51, 34, 482548)
t2 = datetime.time(12, 51)
f = TimeField(input_formats=['%H:%M', '%H:%M %p'])
self.assertTrue(f.has_changed(t1, '12:51'))
self.assertFalse(f.has_changed(t2, '12:51'))
self.assertFalse(f.has_changed(t2, '12:51 PM'))
# DateTimeField ###############################################################
def test_datetimefield_1(self):
f = DateTimeField()
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(
datetime.datetime(2006, 10, 25, 14, 30, 59),
f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59))
)
self.assertEqual(
datetime.datetime(2006, 10, 25, 14, 30, 59, 200),
f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200))
)
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006-10-25 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006-10-25 14:30:45.0002'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('2006-10-25 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006-10-25 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006-10-25 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('2006-10-25'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('10/25/2006 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('10/25/2006 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/2006 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/2006 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('10/25/2006'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('10/25/06 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('10/25/06 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/06 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/06 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('10/25/06'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, 'hello')
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, '2006-10-25 4:30 p.m.')
def test_datetimefield_2(self):
f = DateTimeField(input_formats=['%Y %m %d %I:%M %p'])
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(
datetime.datetime(2006, 10, 25, 14, 30, 59),
f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59))
)
self.assertEqual(
datetime.datetime(2006, 10, 25, 14, 30, 59, 200),
f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200))
)
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006 10 25 2:30 PM'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, '2006-10-25 14:30:45')
def test_datetimefield_3(self):
f = DateTimeField(required=False)
self.assertIsNone(f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertIsNone(f.clean(''))
self.assertEqual('None', repr(f.clean('')))
def test_datetimefield_4(self):
f = DateTimeField()
# Test whitespace stripping behavior (#5714)
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 2006-10-25 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 2006-10-25 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 10/25/2006 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(' 10/25/2006 14:30 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 10/25/2006 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 10/25/06 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 10/25/06 '))
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, ' ')
def test_datetimefield_5(self):
f = DateTimeField(input_formats=['%Y.%m.%d %H:%M:%S.%f'])
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006.10.25 14:30:45.0002'))
def test_datetimefield_changed(self):
format = '%Y %m %d %I:%M %p'
f = DateTimeField(input_formats=[format])
d = datetime.datetime(2006, 9, 17, 14, 30, 0)
self.assertFalse(f.has_changed(d, '2006 09 17 2:30 PM'))
# DurationField ###########################################################
def test_durationfield_1(self):
f = DurationField()
self.assertEqual(datetime.timedelta(seconds=30), f.clean('30'))
self.assertEqual(
datetime.timedelta(minutes=15, seconds=30),
f.clean('15:30')
)
self.assertEqual(
datetime.timedelta(hours=1, minutes=15, seconds=30),
f.clean('1:15:30')
)
self.assertEqual(
datetime.timedelta(
days=1, hours=1, minutes=15, seconds=30, milliseconds=300),
f.clean('1 1:15:30.3')
)
def test_durationfield_2(self):
class DurationForm(Form):
duration = DurationField(initial=datetime.timedelta(hours=1))
f = DurationForm()
self.assertHTMLEqual(
'<input id="id_duration" type="text" name="duration" value="01:00:00">',
str(f['duration'])
)
def test_durationfield_prepare_value(self):
field = DurationField()
td = datetime.timedelta(minutes=15, seconds=30)
self.assertEqual(field.prepare_value(td), duration_string(td))
self.assertEqual(field.prepare_value('arbitrary'), 'arbitrary')
self.assertIsNone(field.prepare_value(None))
# RegexField ##################################################################
def test_regexfield_1(self):
f = RegexField('^[0-9][A-F][0-9]$')
self.assertEqual('2A2', f.clean('2A2'))
self.assertEqual('3F3', f.clean('3F3'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '3G3')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, ' 2A2')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '2A2 ')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
def test_regexfield_2(self):
f = RegexField('^[0-9][A-F][0-9]$', required=False)
self.assertEqual('2A2', f.clean('2A2'))
self.assertEqual('3F3', f.clean('3F3'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '3G3')
self.assertEqual('', f.clean(''))
def test_regexfield_3(self):
f = RegexField(re.compile('^[0-9][A-F][0-9]$'))
self.assertEqual('2A2', f.clean('2A2'))
self.assertEqual('3F3', f.clean('3F3'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '3G3')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, ' 2A2')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '2A2 ')
def test_regexfield_5(self):
f = RegexField('^[0-9]+$', min_length=5, max_length=10)
self.assertRaisesMessage(
ValidationError, "'Ensure this value has at least 5 characters (it has 3).'",
f.clean, '123'
)
six.assertRaisesRegex(
self, ValidationError,
"'Ensure this value has at least 5 characters \(it has 3\)\.',"
" u?'Enter a valid value\.'",
f.clean, 'abc'
)
self.assertEqual('12345', f.clean('12345'))
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertRaisesMessage(
ValidationError, "'Ensure this value has at most 10 characters (it has 11).'",
f.clean, '12345678901'
)
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '12345a')
def test_regexfield_6(self):
"""
Ensure that it works with unicode characters.
Refs #.
"""
f = RegexField('^\w+$')
self.assertEqual('éèøçÎÎ你好', f.clean('éèøçÎÎ你好'))
def test_change_regex_after_init(self):
f = RegexField('^[a-z]+$')
f.regex = '^[0-9]+$'
self.assertEqual('1234', f.clean('1234'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, 'abcd')
# EmailField ##################################################################
# See also validators tests for validate_email specific tests
def test_emailfield_1(self):
f = EmailField()
self.assertWidgetRendersTo(f, '<input type="email" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual('[email protected]', f.clean('[email protected]'))
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'foo')
self.assertEqual('[email protected]\xe4\xf6\xfc\xdfabc.part.com',
f.clean('[email protected]äöüßabc.part.com'))
def test_email_regexp_for_performance(self):
f = EmailField()
# Check for runaway regex security problem. This will take for-freeking-ever
# if the security fix isn't in place.
addr = '[email protected]'
self.assertEqual(addr, f.clean(addr))
def test_emailfield_not_required(self):
f = EmailField(required=False)
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
self.assertEqual('[email protected]', f.clean('[email protected]'))
self.assertEqual('[email protected]', f.clean(' [email protected] \t \t '))
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'foo')
def test_emailfield_min_max_length(self):
f = EmailField(min_length=10, max_length=15)
self.assertWidgetRendersTo(f, '<input id="id_f" type="email" name="f" maxlength="15" />')
self.assertRaisesMessage(
ValidationError, "'Ensure this value has at least 10 characters (it has 9).'",
f.clean, '[email protected]'
)
self.assertEqual('[email protected]', f.clean('[email protected]'))
self.assertRaisesMessage(
ValidationError, "'Ensure this value has at most 15 characters (it has 20).'",
f.clean, '[email protected]'
)
# FileField ##################################################################
def test_filefield_1(self):
f = FileField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '', '')
self.assertEqual('files/test1.pdf', f.clean('', 'files/test1.pdf'))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None, '')
self.assertEqual('files/test2.pdf', f.clean(None, 'files/test2.pdf'))
self.assertRaisesMessage(
ValidationError, "'No file was submitted. Check the encoding type on the form.'",
f.clean, SimpleUploadedFile('', b'')
)
self.assertRaisesMessage(
ValidationError, "'No file was submitted. Check the encoding type on the form.'",
f.clean, SimpleUploadedFile('', b''), ''
)
self.assertEqual('files/test3.pdf', f.clean(None, 'files/test3.pdf'))
self.assertRaisesMessage(
ValidationError, "'No file was submitted. Check the encoding type on the form.'",
f.clean, 'some content that is not a file'
)
self.assertRaisesMessage(
ValidationError, "'The submitted file is empty.'",
f.clean, SimpleUploadedFile('name', None)
)
self.assertRaisesMessage(
ValidationError, "'The submitted file is empty.'",
f.clean, SimpleUploadedFile('name', b'')
)
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('name', b'Some File Content'))))
self.assertIsInstance(
f.clean(SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह'.encode('utf-8'))),
SimpleUploadedFile
)
self.assertIsInstance(
f.clean(SimpleUploadedFile('name', b'Some File Content'), 'files/test4.pdf'),
SimpleUploadedFile
)
def test_filefield_2(self):
f = FileField(max_length=5)
self.assertRaisesMessage(
ValidationError, "'Ensure this filename has at most 5 characters (it has 18).'",
f.clean, SimpleUploadedFile('test_maxlength.txt', b'hello world')
)
self.assertEqual('files/test1.pdf', f.clean('', 'files/test1.pdf'))
self.assertEqual('files/test2.pdf', f.clean(None, 'files/test2.pdf'))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('name', b'Some File Content'))))
def test_filefield_3(self):
f = FileField(allow_empty_file=True)
self.assertEqual(SimpleUploadedFile,
type(f.clean(SimpleUploadedFile('name', b''))))
def test_filefield_changed(self):
'''
Test for the behavior of has_changed for FileField. The value of data will
more than likely come from request.FILES. The value of initial data will
likely be a filename stored in the database. Since its value is of no use to
a FileField it is ignored.
'''
f = FileField()
# No file was uploaded and no initial data.
self.assertFalse(f.has_changed('', None))
# A file was uploaded and no initial data.
self.assertTrue(f.has_changed('', {'filename': 'resume.txt', 'content': 'My resume'}))
# A file was not uploaded, but there is initial data
self.assertFalse(f.has_changed('resume.txt', None))
# A file was uploaded and there is initial data (file identity is not dealt
# with here)
self.assertTrue(f.has_changed('resume.txt', {'filename': 'resume.txt', 'content': 'My resume'}))
# ImageField ##################################################################
@skipIf(Image is None, "Pillow is required to test ImageField")
def test_imagefield_annotate_with_image_after_clean(self):
f = ImageField()
img_path = os.path.dirname(upath(__file__)) + '/filepath_test_files/1x1.png'
with open(img_path, 'rb') as img_file:
img_data = img_file.read()
img_file = SimpleUploadedFile('1x1.png', img_data)
img_file.content_type = 'text/plain'
uploaded_file = f.clean(img_file)
self.assertEqual('PNG', uploaded_file.image.format)
self.assertEqual('image/png', uploaded_file.content_type)
@skipIf(Image is None, "Pillow is required to test ImageField")
def test_imagefield_annotate_with_bitmap_image_after_clean(self):
"""
This also tests the situation when Pillow doesn't detect the MIME type
of the image (#24948).
"""
from PIL.BmpImagePlugin import BmpImageFile
try:
Image.register_mime(BmpImageFile.format, None)
f = ImageField()
img_path = os.path.dirname(upath(__file__)) + '/filepath_test_files/1x1.bmp'
with open(img_path, 'rb') as img_file:
img_data = img_file.read()
img_file = SimpleUploadedFile('1x1.bmp', img_data)
img_file.content_type = 'text/plain'
uploaded_file = f.clean(img_file)
self.assertEqual('BMP', uploaded_file.image.format)
self.assertIsNone(uploaded_file.content_type)
finally:
Image.register_mime(BmpImageFile.format, 'image/bmp')
# URLField ##################################################################
def test_urlfield_1(self):
f = URLField()
self.assertWidgetRendersTo(f, '<input type="url" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual('http://localhost', f.clean('http://localhost'))
self.assertEqual('http://example.com', f.clean('http://example.com'))
self.assertEqual('http://example.com.', f.clean('http://example.com.'))
self.assertEqual('http://www.example.com', f.clean('http://www.example.com'))
self.assertEqual('http://www.example.com:8000/test', f.clean('http://www.example.com:8000/test'))
self.assertEqual('http://valid-with-hyphens.com', f.clean('valid-with-hyphens.com'))
self.assertEqual('http://subdomain.domain.com', f.clean('subdomain.domain.com'))
self.assertEqual('http://200.8.9.10', f.clean('http://200.8.9.10'))
self.assertEqual('http://200.8.9.10:8000/test', f.clean('http://200.8.9.10:8000/test'))
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'com.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, '.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://invalid-.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://-invalid.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://inv-.alid-.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://inv-.-alid.com')
self.assertEqual('http://valid-----hyphens.com', f.clean('http://valid-----hyphens.com'))
self.assertEqual(
'http://some.idn.xyz\xe4\xf6\xfc\xdfabc.domain.com:123/blah',
f.clean('http://some.idn.xyzäöüßabc.domain.com:123/blah')
)
self.assertEqual(
'http://www.example.com/s/http://code.djangoproject.com/ticket/13804',
f.clean('www.example.com/s/http://code.djangoproject.com/ticket/13804')
)
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, '[a')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://[a')
def test_url_regex_ticket11198(self):
f = URLField()
# hangs "forever" if catastrophic backtracking in ticket:#11198 not fixed
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://%s' % ("X" * 200,))
# a second test, to make sure the problem is really addressed, even on
# domains that don't fail the domain label length check in the regex
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://%s' % ("X" * 60,))
def test_urlfield_2(self):
f = URLField(required=False)
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
self.assertEqual('http://example.com', f.clean('http://example.com'))
self.assertEqual('http://www.example.com', f.clean('http://www.example.com'))
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://.com')
def test_urlfield_5(self):
f = URLField(min_length=15, max_length=20)
self.assertWidgetRendersTo(f, '<input id="id_f" type="url" name="f" maxlength="20" />')
self.assertRaisesMessage(
ValidationError, "'Ensure this value has at least 15 characters (it has 12).'",
f.clean, 'http://f.com'
)
self.assertEqual('http://example.com', f.clean('http://example.com'))
self.assertRaisesMessage(
ValidationError, "'Ensure this value has at most 20 characters (it has 37).'",
f.clean, 'http://abcdefghijklmnopqrstuvwxyz.com'
)
def test_urlfield_6(self):
f = URLField(required=False)
self.assertEqual('http://example.com', f.clean('example.com'))
self.assertEqual('', f.clean(''))
self.assertEqual('https://example.com', f.clean('https://example.com'))
def test_urlfield_7(self):
f = URLField()
self.assertEqual('http://example.com', f.clean('http://example.com'))
self.assertEqual('http://example.com/test', f.clean('http://example.com/test'))
self.assertEqual('http://example.com?some_param=some_value',
f.clean('http://example.com?some_param=some_value'))
def test_urlfield_9(self):
f = URLField()
urls = (
'http://עברית.idn.icann.org/',
'http://sãopaulo.com/',
'http://sãopaulo.com.br/',
'http://пример.испытание/',
'http://مثال.إختبار/',
'http://例子.测试/',
'http://例子.測試/',
'http://उदाहरण.परीक्षा/',
'http://例え.テスト/',
'http://مثال.آزمایشی/',
'http://실례.테스트/',
'http://العربية.idn.icann.org/',
)
for url in urls:
# Valid IDN
self.assertEqual(url, f.clean(url))
def test_urlfield_10(self):
"""Test URLField correctly validates IPv6 (#18779)."""
f = URLField()
urls = (
'http://[12:34::3a53]/',
'http://[a34:9238::]:8080/',
)
for url in urls:
self.assertEqual(url, f.clean(url))
def test_urlfield_not_string(self):
f = URLField(required=False)
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 23)
def test_urlfield_normalization(self):
f = URLField()
self.assertEqual(f.clean('http://example.com/ '), 'http://example.com/')
# BooleanField ################################################################
def test_booleanfield_1(self):
f = BooleanField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(True, f.clean(True))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, False)
self.assertEqual(True, f.clean(1))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, 0)
self.assertEqual(True, f.clean('Django rocks'))
self.assertEqual(True, f.clean('True'))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, 'False')
def test_booleanfield_2(self):
f = BooleanField(required=False)
self.assertEqual(False, f.clean(''))
self.assertEqual(False, f.clean(None))
self.assertEqual(True, f.clean(True))
self.assertEqual(False, f.clean(False))
self.assertEqual(True, f.clean(1))
self.assertEqual(False, f.clean(0))
self.assertEqual(True, f.clean('1'))
self.assertEqual(False, f.clean('0'))
self.assertEqual(True, f.clean('Django rocks'))
self.assertEqual(False, f.clean('False'))
self.assertEqual(False, f.clean('false'))
self.assertEqual(False, f.clean('FaLsE'))
def test_boolean_picklable(self):
self.assertIsInstance(pickle.loads(pickle.dumps(BooleanField())), BooleanField)
def test_booleanfield_changed(self):
f = BooleanField()
self.assertFalse(f.has_changed(None, None))
self.assertFalse(f.has_changed(None, ''))
self.assertFalse(f.has_changed('', None))
self.assertFalse(f.has_changed('', ''))
self.assertTrue(f.has_changed(False, 'on'))
self.assertFalse(f.has_changed(True, 'on'))
self.assertTrue(f.has_changed(True, ''))
# Initial value may have mutated to a string due to show_hidden_initial (#19537)
self.assertTrue(f.has_changed('False', 'on'))
# ChoiceField #################################################################
def test_choicefield_1(self):
f = ChoiceField(choices=[('1', 'One'), ('2', 'Two')])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual('1', f.clean(1))
self.assertEqual('1', f.clean('1'))
self.assertRaisesMessage(
ValidationError, "'Select a valid choice. 3 is not one of the available choices.'",
f.clean, '3'
)
def test_choicefield_2(self):
f = ChoiceField(choices=[('1', 'One'), ('2', 'Two')], required=False)
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
self.assertEqual('1', f.clean(1))
self.assertEqual('1', f.clean('1'))
self.assertRaisesMessage(
ValidationError, "'Select a valid choice. 3 is not one of the available choices.'",
f.clean, '3'
)
def test_choicefield_3(self):
f = ChoiceField(choices=[('J', 'John'), ('P', 'Paul')])
self.assertEqual('J', f.clean('J'))
self.assertRaisesMessage(
ValidationError, "'Select a valid choice. John is not one of the available choices.'",
f.clean, 'John'
)
def test_choicefield_4(self):
f = ChoiceField(
choices=[
('Numbers', (('1', 'One'), ('2', 'Two'))),
('Letters', (('3', 'A'), ('4', 'B'))), ('5', 'Other'),
]
)
self.assertEqual('1', f.clean(1))
self.assertEqual('1', f.clean('1'))
self.assertEqual('3', f.clean(3))
self.assertEqual('3', f.clean('3'))
self.assertEqual('5', f.clean(5))
self.assertEqual('5', f.clean('5'))
self.assertRaisesMessage(
ValidationError, "'Select a valid choice. 6 is not one of the available choices.'",
f.clean, '6'
)
def test_choicefield_callable(self):
choices = lambda: [('J', 'John'), ('P', 'Paul')]
f = ChoiceField(choices=choices)
self.assertEqual('J', f.clean('J'))
def test_choicefield_callable_may_evaluate_to_different_values(self):
choices = []
def choices_as_callable():
return choices
class ChoiceFieldForm(Form):
choicefield = ChoiceField(choices=choices_as_callable)
choices = [('J', 'John')]
form = ChoiceFieldForm()
self.assertEqual([('J', 'John')], list(form.fields['choicefield'].choices))
choices = [('P', 'Paul')]
form = ChoiceFieldForm()
self.assertEqual([('P', 'Paul')], list(form.fields['choicefield'].choices))
def test_choicefield_disabled(self):
f = ChoiceField(choices=[('J', 'John'), ('P', 'Paul')], disabled=True)
self.assertWidgetRendersTo(f,
'<select id="id_f" name="f" disabled><option value="J">John</option>'
'<option value="P">Paul</option></select>')
# TypedChoiceField ############################################################
# TypedChoiceField is just like ChoiceField, except that coerced types will
# be returned:
def test_typedchoicefield_1(self):
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int)
self.assertEqual(1, f.clean('1'))
self.assertRaisesMessage(
ValidationError, "'Select a valid choice. 2 is not one of the available choices.'",
f.clean, '2'
)
def test_typedchoicefield_2(self):
# Different coercion, same validation.
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=float)
self.assertEqual(1.0, f.clean('1'))
def test_typedchoicefield_3(self):
# This can also cause weirdness: be careful (bool(-1) == True, remember)
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=bool)
self.assertEqual(True, f.clean('-1'))
def test_typedchoicefield_4(self):
# Even more weirdness: if you have a valid choice but your coercion function
# can't coerce, you'll still get a validation error. Don't do this!
f = TypedChoiceField(choices=[('A', 'A'), ('B', 'B')], coerce=int)
self.assertRaisesMessage(
ValidationError, "'Select a valid choice. B is not one of the available choices.'",
f.clean, 'B'
)
# Required fields require values
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
def test_typedchoicefield_5(self):
# Non-required fields aren't required
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False)
self.assertEqual('', f.clean(''))
# If you want cleaning an empty value to return a different type, tell the field
def test_typedchoicefield_6(self):
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False, empty_value=None)
self.assertIsNone(f.clean(''))
def test_typedchoicefield_has_changed(self):
# has_changed should not trigger required validation
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=True)
self.assertFalse(f.has_changed(None, ''))
self.assertFalse(f.has_changed(1, '1'))
self.assertFalse(f.has_changed('1', '1'))
def test_typedchoicefield_special_coerce(self):
"""
Test a coerce function which results in a value not present in choices.
Refs #21397.
"""
def coerce_func(val):
return Decimal('1.%s' % val)
f = TypedChoiceField(choices=[(1, "1"), (2, "2")], coerce=coerce_func, required=True)
self.assertEqual(Decimal('1.2'), f.clean('2'))
self.assertRaisesMessage(
ValidationError, "'This field is required.'",
f.clean, ''
)
self.assertRaisesMessage(
ValidationError, "'Select a valid choice. 3 is not one of the available choices.'",
f.clean, '3'
)
# NullBooleanField ############################################################
def test_nullbooleanfield_1(self):
f = NullBooleanField()
self.assertIsNone(f.clean(''))
self.assertEqual(True, f.clean(True))
self.assertEqual(False, f.clean(False))
self.assertIsNone(f.clean(None))
self.assertEqual(False, f.clean('0'))
self.assertEqual(True, f.clean('1'))
self.assertIsNone(f.clean('2'))
self.assertIsNone(f.clean('3'))
self.assertIsNone(f.clean('hello'))
self.assertEqual(True, f.clean('true'))
self.assertEqual(False, f.clean('false'))
def test_nullbooleanfield_2(self):
# Make sure that the internal value is preserved if using HiddenInput (#7753)
class HiddenNullBooleanForm(Form):
hidden_nullbool1 = NullBooleanField(widget=HiddenInput, initial=True)
hidden_nullbool2 = NullBooleanField(widget=HiddenInput, initial=False)
f = HiddenNullBooleanForm()
self.assertHTMLEqual(
'<input type="hidden" name="hidden_nullbool1" value="True" id="id_hidden_nullbool1" />'
'<input type="hidden" name="hidden_nullbool2" value="False" id="id_hidden_nullbool2" />',
str(f)
)
def test_nullbooleanfield_3(self):
class HiddenNullBooleanForm(Form):
hidden_nullbool1 = NullBooleanField(widget=HiddenInput, initial=True)
hidden_nullbool2 = NullBooleanField(widget=HiddenInput, initial=False)
f = HiddenNullBooleanForm({'hidden_nullbool1': 'True', 'hidden_nullbool2': 'False'})
self.assertIsNone(f.full_clean())
self.assertEqual(True, f.cleaned_data['hidden_nullbool1'])
self.assertEqual(False, f.cleaned_data['hidden_nullbool2'])
def test_nullbooleanfield_4(self):
# Make sure we're compatible with MySQL, which uses 0 and 1 for its boolean
# values. (#9609)
NULLBOOL_CHOICES = (('1', 'Yes'), ('0', 'No'), ('', 'Unknown'))
class MySQLNullBooleanForm(Form):
nullbool0 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
nullbool1 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
nullbool2 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
f = MySQLNullBooleanForm({'nullbool0': '1', 'nullbool1': '0', 'nullbool2': ''})
self.assertIsNone(f.full_clean())
self.assertEqual(True, f.cleaned_data['nullbool0'])
self.assertEqual(False, f.cleaned_data['nullbool1'])
self.assertIsNone(f.cleaned_data['nullbool2'])
def test_nullbooleanfield_changed(self):
f = NullBooleanField()
self.assertTrue(f.has_changed(False, None))
self.assertTrue(f.has_changed(None, False))
self.assertFalse(f.has_changed(None, None))
self.assertFalse(f.has_changed(False, False))
self.assertTrue(f.has_changed(True, False))
self.assertTrue(f.has_changed(True, None))
self.assertTrue(f.has_changed(True, False))
# MultipleChoiceField #########################################################
def test_multiplechoicefield_1(self):
f = MultipleChoiceField(choices=[('1', 'One'), ('2', 'Two')])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(['1'], f.clean([1]))
self.assertEqual(['1'], f.clean(['1']))
self.assertEqual(['1', '2'], f.clean(['1', '2']))
self.assertEqual(['1', '2'], f.clean([1, '2']))
self.assertEqual(['1', '2'], f.clean((1, '2')))
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, [])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, ())
self.assertRaisesMessage(
ValidationError, "'Select a valid choice. 3 is not one of the available choices.'",
f.clean, ['3']
)
def test_multiplechoicefield_2(self):
f = MultipleChoiceField(choices=[('1', 'One'), ('2', 'Two')], required=False)
self.assertEqual([], f.clean(''))
self.assertEqual([], f.clean(None))
self.assertEqual(['1'], f.clean([1]))
self.assertEqual(['1'], f.clean(['1']))
self.assertEqual(['1', '2'], f.clean(['1', '2']))
self.assertEqual(['1', '2'], f.clean([1, '2']))
self.assertEqual(['1', '2'], f.clean((1, '2')))
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
self.assertEqual([], f.clean([]))
self.assertEqual([], f.clean(()))
self.assertRaisesMessage(
ValidationError, "'Select a valid choice. 3 is not one of the available choices.'",
f.clean, ['3']
)
def test_multiplechoicefield_3(self):
f = MultipleChoiceField(
choices=[('Numbers', (('1', 'One'), ('2', 'Two'))), ('Letters', (('3', 'A'), ('4', 'B'))), ('5', 'Other')]
)
self.assertEqual(['1'], f.clean([1]))
self.assertEqual(['1'], f.clean(['1']))
self.assertEqual(['1', '5'], f.clean([1, 5]))
self.assertEqual(['1', '5'], f.clean([1, '5']))
self.assertEqual(['1', '5'], f.clean(['1', 5]))
self.assertEqual(['1', '5'], f.clean(['1', '5']))
self.assertRaisesMessage(
ValidationError, "'Select a valid choice. 6 is not one of the available choices.'",
f.clean, ['6']
)
self.assertRaisesMessage(
ValidationError, "'Select a valid choice. 6 is not one of the available choices.'",
f.clean, ['1', '6']
)
def test_multiplechoicefield_changed(self):
f = MultipleChoiceField(choices=[('1', 'One'), ('2', 'Two'), ('3', 'Three')])
self.assertFalse(f.has_changed(None, None))
self.assertFalse(f.has_changed([], None))
self.assertTrue(f.has_changed(None, ['1']))
self.assertFalse(f.has_changed([1, 2], ['1', '2']))
self.assertFalse(f.has_changed([2, 1], ['1', '2']))
self.assertTrue(f.has_changed([1, 2], ['1']))
self.assertTrue(f.has_changed([1, 2], ['1', '3']))
# TypedMultipleChoiceField ############################################################
# TypedMultipleChoiceField is just like MultipleChoiceField, except that coerced types
# will be returned:
def test_typedmultiplechoicefield_1(self):
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int)
self.assertEqual([1], f.clean(['1']))
self.assertRaisesMessage(
ValidationError, "'Select a valid choice. 2 is not one of the available choices.'",
f.clean, ['2']
)
def test_typedmultiplechoicefield_2(self):
# Different coercion, same validation.
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=float)
self.assertEqual([1.0], f.clean(['1']))
def test_typedmultiplechoicefield_3(self):
# This can also cause weirdness: be careful (bool(-1) == True, remember)
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=bool)
self.assertEqual([True], f.clean(['-1']))
def test_typedmultiplechoicefield_4(self):
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int)
self.assertEqual([1, -1], f.clean(['1', '-1']))
self.assertRaisesMessage(
ValidationError, "'Select a valid choice. 2 is not one of the available choices.'",
f.clean, ['1', '2']
)
def test_typedmultiplechoicefield_5(self):
# Even more weirdness: if you have a valid choice but your coercion function
# can't coerce, you'll still get a validation error. Don't do this!
f = TypedMultipleChoiceField(choices=[('A', 'A'), ('B', 'B')], coerce=int)
self.assertRaisesMessage(
ValidationError, "'Select a valid choice. B is not one of the available choices.'",
f.clean, ['B']
)
# Required fields require values
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, [])
def test_typedmultiplechoicefield_6(self):
# Non-required fields aren't required
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False)
self.assertEqual([], f.clean([]))
def test_typedmultiplechoicefield_7(self):
# If you want cleaning an empty value to return a different type, tell the field
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False, empty_value=None)
self.assertIsNone(f.clean([]))
def test_typedmultiplechoicefield_has_changed(self):
# has_changed should not trigger required validation
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=True)
self.assertFalse(f.has_changed(None, ''))
def test_typedmultiplechoicefield_special_coerce(self):
"""
Test a coerce function which results in a value not present in choices.
Refs #21397.
"""
def coerce_func(val):
return Decimal('1.%s' % val)
f = TypedMultipleChoiceField(
choices=[(1, "1"), (2, "2")], coerce=coerce_func, required=True)
self.assertEqual([Decimal('1.2')], f.clean(['2']))
self.assertRaisesMessage(ValidationError,
"'This field is required.'", f.clean, [])
self.assertRaisesMessage(ValidationError,
"'Select a valid choice. 3 is not one of the available choices.'",
f.clean, ['3'])
# ComboField ##################################################################
def test_combofield_1(self):
f = ComboField(fields=[CharField(max_length=20), EmailField()])
self.assertEqual('[email protected]', f.clean('[email protected]'))
self.assertRaisesMessage(
ValidationError, "'Ensure this value has at most 20 characters (it has 28).'",
f.clean, '[email protected]'
)
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'not an email')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
def test_combofield_2(self):
f = ComboField(fields=[CharField(max_length=20), EmailField()], required=False)
self.assertEqual('[email protected]', f.clean('[email protected]'))
self.assertRaisesMessage(
ValidationError, "'Ensure this value has at most 20 characters (it has 28).'",
f.clean, '[email protected]'
)
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'not an email')
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
# FilePathField ###############################################################
def test_filepathfield_1(self):
path = os.path.abspath(upath(forms.__file__))
path = os.path.dirname(path) + '/'
self.assertTrue(fix_os_paths(path).endswith('/django/forms/'))
def test_filepathfield_2(self):
path = upath(forms.__file__)
path = os.path.dirname(os.path.abspath(path)) + '/'
f = FilePathField(path=path)
f.choices = [p for p in f.choices if p[0].endswith('.py')]
f.choices.sort()
expected = [
('/django/forms/__init__.py', '__init__.py'),
('/django/forms/boundfield.py', 'boundfield.py'),
('/django/forms/fields.py', 'fields.py'),
('/django/forms/forms.py', 'forms.py'),
('/django/forms/formsets.py', 'formsets.py'),
('/django/forms/models.py', 'models.py'),
('/django/forms/utils.py', 'utils.py'),
('/django/forms/widgets.py', 'widgets.py')
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
self.assertRaisesMessage(
ValidationError, "'Select a valid choice. fields.py is not one of the available choices.'",
f.clean, 'fields.py'
)
assert fix_os_paths(f.clean(path + 'fields.py')).endswith('/django/forms/fields.py')
def test_filepathfield_3(self):
path = upath(forms.__file__)
path = os.path.dirname(os.path.abspath(path)) + '/'
f = FilePathField(path=path, match='^.*?\.py$')
f.choices.sort()
expected = [
('/django/forms/__init__.py', '__init__.py'),
('/django/forms/boundfield.py', 'boundfield.py'),
('/django/forms/fields.py', 'fields.py'),
('/django/forms/forms.py', 'forms.py'),
('/django/forms/formsets.py', 'formsets.py'),
('/django/forms/models.py', 'models.py'),
('/django/forms/utils.py', 'utils.py'),
('/django/forms/widgets.py', 'widgets.py')
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
def test_filepathfield_4(self):
path = os.path.abspath(upath(forms.__file__))
path = os.path.dirname(path) + '/'
f = FilePathField(path=path, recursive=True, match='^.*?\.py$')
f.choices.sort()
expected = [
('/django/forms/__init__.py', '__init__.py'),
('/django/forms/boundfield.py', 'boundfield.py'),
('/django/forms/extras/__init__.py', 'extras/__init__.py'),
('/django/forms/extras/widgets.py', 'extras/widgets.py'),
('/django/forms/fields.py', 'fields.py'),
('/django/forms/forms.py', 'forms.py'),
('/django/forms/formsets.py', 'formsets.py'),
('/django/forms/models.py', 'models.py'),
('/django/forms/utils.py', 'utils.py'),
('/django/forms/widgets.py', 'widgets.py')
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
def test_filepathfield_folders(self):
path = os.path.dirname(upath(__file__)) + '/filepath_test_files/'
f = FilePathField(path=path, allow_folders=True, allow_files=False)
f.choices.sort()
expected = [
('/tests/forms_tests/tests/filepath_test_files/directory', 'directory'),
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
f = FilePathField(path=path, allow_folders=True, allow_files=True)
f.choices.sort()
expected = [
('/tests/forms_tests/tests/filepath_test_files/.dot-file', '.dot-file'),
('/tests/forms_tests/tests/filepath_test_files/1x1.bmp', '1x1.bmp'),
('/tests/forms_tests/tests/filepath_test_files/1x1.png', '1x1.png'),
('/tests/forms_tests/tests/filepath_test_files/directory', 'directory'),
('/tests/forms_tests/tests/filepath_test_files/fake-image.jpg', 'fake-image.jpg'),
('/tests/forms_tests/tests/filepath_test_files/real-text-file.txt', 'real-text-file.txt'),
]
actual = fix_os_paths(f.choices)
self.assertEqual(len(expected), len(actual))
for exp, got in zip(expected, actual):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
# SplitDateTimeField ##########################################################
def test_splitdatetimefield_1(self):
from django.forms.widgets import SplitDateTimeWidget
f = SplitDateTimeField()
self.assertIsInstance(f.widget, SplitDateTimeWidget)
self.assertEqual(
datetime.datetime(2006, 1, 10, 7, 30),
f.clean([datetime.date(2006, 1, 10), datetime.time(7, 30)])
)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
six.assertRaisesRegex(
self, ValidationError, "'Enter a valid date\.', u?'Enter a valid time\.'",
f.clean, ['hello', 'there']
)
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ['hello', '07:30'])
def test_splitdatetimefield_2(self):
f = SplitDateTimeField(required=False)
self.assertEqual(
datetime.datetime(2006, 1, 10, 7, 30),
f.clean([datetime.date(2006, 1, 10), datetime.time(7, 30)])
)
self.assertEqual(datetime.datetime(2006, 1, 10, 7, 30), f.clean(['2006-01-10', '07:30']))
self.assertIsNone(f.clean(None))
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(['']))
self.assertIsNone(f.clean(['', '']))
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
six.assertRaisesRegex(
self, ValidationError, "'Enter a valid date\.', u?'Enter a valid time\.'",
f.clean, ['hello', 'there']
)
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ['hello', '07:30'])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10', ''])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10'])
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ['', '07:30'])
def test_splitdatetimefield_changed(self):
f = SplitDateTimeField(input_date_formats=['%d/%m/%Y'])
self.assertFalse(f.has_changed(['11/01/2012', '09:18:15'], ['11/01/2012', '09:18:15']))
self.assertTrue(f.has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), ['2008-05-06', '12:40:00']))
self.assertFalse(f.has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), ['06/05/2008', '12:40']))
self.assertTrue(f.has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), ['06/05/2008', '12:41']))
# GenericIPAddressField #######################################################
def test_generic_ipaddress_invalid_arguments(self):
self.assertRaises(ValueError, GenericIPAddressField, protocol="hamster")
self.assertRaises(ValueError, GenericIPAddressField, protocol="ipv4", unpack_ipv4=True)
def test_generic_ipaddress_as_generic(self):
# The edge cases of the IPv6 validation code are not deeply tested
# here, they are covered in the tests for django.utils.ipv6
f = GenericIPAddressField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1 '), '127.0.0.1')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '127.0.0.')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '1.2.3.4.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '12345:2:3:4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3::4')
self.assertRaisesMessage(
ValidationError, "'This is not a valid IPv6 address.'",
f.clean, 'foo::223:6cff:fe8a:2e8a'
)
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3:4:5:6:7:8')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1:2')
def test_generic_ipaddress_as_ipv4_only(self):
f = GenericIPAddressField(protocol="IPv4")
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1 '), '127.0.0.1')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, '127.0.0.')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, '1.2.3.4.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, '256.125.1.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, 'fe80::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, '2a02::223:6cff:fe8a:2e8a')
def test_generic_ipaddress_as_ipv6_only(self):
f = GenericIPAddressField(protocol="IPv6")
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, '127.0.0.1')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, '127.0.0.')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, '1.2.3.4.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '12345:2:3:4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3::4')
self.assertRaisesMessage(
ValidationError, "'This is not a valid IPv6 address.'",
f.clean, 'foo::223:6cff:fe8a:2e8a'
)
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3:4:5:6:7:8')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1:2')
def test_generic_ipaddress_as_generic_not_required(self):
f = GenericIPAddressField(required=False)
self.assertEqual(f.clean(''), '')
self.assertEqual(f.clean(None), '')
self.assertEqual(f.clean('127.0.0.1'), '127.0.0.1')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '127.0.0.')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '1.2.3.4.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '12345:2:3:4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3::4')
self.assertRaisesMessage(
ValidationError, "'This is not a valid IPv6 address.'",
f.clean, 'foo::223:6cff:fe8a:2e8a'
)
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3:4:5:6:7:8')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1:2')
def test_generic_ipaddress_normalization(self):
# Test the normalizing code
f = GenericIPAddressField()
self.assertEqual(f.clean(' ::ffff:0a0a:0a0a '), '::ffff:10.10.10.10')
self.assertEqual(f.clean(' ::ffff:10.10.10.10 '), '::ffff:10.10.10.10')
self.assertEqual(f.clean(' 2001:000:a:0000:0:fe:fe:beef '), '2001:0:a::fe:fe:beef')
self.assertEqual(f.clean(' 2001::a:0000:0:fe:fe:beef '), '2001:0:a::fe:fe:beef')
f = GenericIPAddressField(unpack_ipv4=True)
self.assertEqual(f.clean(' ::ffff:0a0a:0a0a'), '10.10.10.10')
# SlugField ###################################################################
def test_slugfield_normalization(self):
f = SlugField()
self.assertEqual(f.clean(' aa-bb-cc '), 'aa-bb-cc')
def test_slugfield_unicode_normalization(self):
f = SlugField(allow_unicode=True)
self.assertEqual(f.clean('a'), 'a')
self.assertEqual(f.clean('1'), '1')
self.assertEqual(f.clean('a1'), 'a1')
self.assertEqual(f.clean('你好'), '你好')
self.assertEqual(f.clean(' 你-好 '), '你-好')
self.assertEqual(f.clean('ıçğüş'), 'ıçğüş')
self.assertEqual(f.clean('foo-ıç-bar'), 'foo-ıç-bar')
# UUIDField ###################################################################
def test_uuidfield_1(self):
field = UUIDField()
value = field.clean('550e8400e29b41d4a716446655440000')
self.assertEqual(value, uuid.UUID('550e8400e29b41d4a716446655440000'))
def test_uuidfield_2(self):
field = UUIDField(required=False)
value = field.clean('')
self.assertEqual(value, None)
def test_uuidfield_3(self):
field = UUIDField()
with self.assertRaises(ValidationError) as cm:
field.clean('550e8400')
self.assertEqual(cm.exception.messages[0], 'Enter a valid UUID.')
def test_uuidfield_4(self):
field = UUIDField()
value = field.prepare_value(uuid.UUID('550e8400e29b41d4a716446655440000'))
self.assertEqual(value, '550e8400e29b41d4a716446655440000')
|
edisonlz/fruit | refs/heads/master | web_project/base/site-packages/django/utils/log.py | 108 | import logging
import traceback
from django.conf import settings
from django.core import mail
from django.core.mail import get_connection
from django.views.debug import ExceptionReporter, get_exception_reporter_filter
# Make sure a NullHandler is available
# This was added in Python 2.7/3.2
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Make sure that dictConfig is available
# This was added in Python 2.7/3.2
try:
from logging.config import dictConfig
except ImportError:
from django.utils.dictconfig import dictConfig
getLogger = logging.getLogger
# Default logging for Django. This sends an email to the site admins on every
# HTTP 500 error. Depending on DEBUG, all other log records are either sent to
# the console (DEBUG=True) or discarded by mean of the NullHandler (DEBUG=False).
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'console': {
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
},
'null': {
'class': 'django.utils.log.NullHandler',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers': ['console'],
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django.security': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'py.warnings': {
'handlers': ['console'],
},
}
}
class AdminEmailHandler(logging.Handler):
"""An exception log handler that emails log entries to site admins.
If the request is passed as the first argument to the log record,
request data will be provided in the email report.
"""
def __init__(self, include_html=False, email_backend=None):
logging.Handler.__init__(self)
self.include_html = include_html
self.email_backend = email_backend
def emit(self, record):
try:
request = record.request
subject = '%s (%s IP): %s' % (
record.levelname,
('internal' if request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS
else 'EXTERNAL'),
record.getMessage()
)
filter = get_exception_reporter_filter(request)
request_repr = filter.get_request_repr(request)
except Exception:
subject = '%s: %s' % (
record.levelname,
record.getMessage()
)
request = None
request_repr = "Request repr() unavailable."
subject = self.format_subject(subject)
if record.exc_info:
exc_info = record.exc_info
stack_trace = '\n'.join(traceback.format_exception(*record.exc_info))
else:
exc_info = (None, record.getMessage(), None)
stack_trace = 'No stack trace available'
message = "%s\n\n%s" % (stack_trace, request_repr)
reporter = ExceptionReporter(request, is_email=True, *exc_info)
html_message = reporter.get_traceback_html() if self.include_html else None
mail.mail_admins(subject, message, fail_silently=True,
html_message=html_message,
connection=self.connection())
def connection(self):
return get_connection(backend=self.email_backend, fail_silently=True)
def format_subject(self, subject):
"""
Escape CR and LF characters, and limit length.
RFC 2822's hard limit is 998 characters per line. So, minus "Subject: "
the actual subject must be no longer than 989 characters.
"""
formatted_subject = subject.replace('\n', '\\n').replace('\r', '\\r')
return formatted_subject[:989]
class CallbackFilter(logging.Filter):
"""
A logging filter that checks the return value of a given callable (which
takes the record-to-be-logged as its only parameter) to decide whether to
log a record.
"""
def __init__(self, callback):
self.callback = callback
def filter(self, record):
if self.callback(record):
return 1
return 0
class RequireDebugFalse(logging.Filter):
def filter(self, record):
return not settings.DEBUG
class RequireDebugTrue(logging.Filter):
def filter(self, record):
return settings.DEBUG
|
xpansa/sale-workflow | refs/heads/8.0 | sale_quotation_sourcing/wizard/quotation_sourcing.py | 35 | # -*- coding: utf-8 -*-
#
# Author: Alexandre Fayolle
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import models, fields, api
class QuotationSourcingWizard(models.TransientModel):
_name = 'sale.order.sourcing'
sale_id = fields.Many2one('sale.order', string='Sale Order')
line_ids = fields.One2many('sale.order.line.sourcing', 'wizard_id',
string='Lines')
_rec_name = 'sale_id'
@api.multi
def action_done(self):
self.ensure_one()
for line in self.line_ids:
line.so_line_id.sourced_by = line.po_line_id
return self[0].sale_id.action_button_confirm()
class QuotationLineSource(models.TransientModel):
_name = 'sale.order.line.sourcing'
wizard_id = fields.Many2one('sale.order.sourcing', string='Wizard')
so_line_id = fields.Many2one('sale.order.line', string='Sale Order Line')
product_id = fields.Many2one('product.product',
string='Product',
related=('so_line_id', 'product_id'))
po_line_id = fields.Many2one(
'purchase.order.line',
string='Sourced By',
domain="[('product_id', '=', product_id),"
" ('order_id.state', 'in', ['draft', 'confirmed'])]")
|
chugunovyar/factoryForBuild | refs/heads/master | env/lib/python2.7/site-packages/numpy/distutils/info.py | 264 | """
Enhanced distutils with Fortran compilers support and more.
"""
from __future__ import division, absolute_import, print_function
postpone_import = True
|
henkhaus/wow | refs/heads/master | testing/multiprocess.py | 1 | from wowlib import wowapi, queries, log, mongoconnection, doctemplates
import time
import os
import multiprocessing
import pymongo
from pymongo import MongoClient
#/todo rewrite updatefunction to be a map function. Ie when mapped, all items in iterable can have this function applied to them
logname = os.path.splitext(__file__)[0]
data = wowapi.auctionurl('Shadow-Council')
posts_init = mongoconnection.auctionconnection()
threads = 3
####
# create counters
count = 0
newcount = 0
timestamp = time.time()
updated = 0
update_stats = {}
# create list of distinct auctions in memory
auction_list = []
auctions = posts_init.find({'status': 'Active'}).distinct('auc')
for auction in auctions:
auction_list.append(auction)
print("Auction List Created")
posts_init.close()
######
data_parser =[]
for item in data:
data_parser.append(item)
def findhour(bidtrac):
try:
firstempty = bidtrac.index(['0,0'])
return firstempty
except:
return 0
def updatefunction(auction):
client= MongoClient('mongodb://localhost:27017/')
db = client.wowtest
posts = db.auctiondata
count = 0
newcount = 0
updated = 0
# create new json, this allows you to add data not returned from wowapi
newrow = doctemplates.auctiondoc(auction, timestamp)
# if statement to ensure that only new data is added to the DB.
if queries.binary_search(auction_list, newrow['auc']) is True:
curr_auc = posts.find_one({"auc": newrow['auc']})
try:
# findhour finds the index of first empty bidtrac tuple
first_empty = findhour(curr_auc['bidtrac'])
update_return = posts.update({'auc': newrow['auc']},
{'$set': {'timeupdated': timestamp,
'bidtrac.' + str(first_empty) + '.0': newrow['bid'],
'bidtrac.' + str(first_empty) + '.1': timestamp}})
updated += 1
#update_stats = log.logdicts(update_return)
except Exception as e:
update_return = posts.update({'auc': newrow['auc']},
{'$set': {'timeupdated': timestamp}})
updated += 1
#update_stats = log.logdicts(update_return, update_stats)
log.logging.error(e)
else:
# if auction was not found in auction list, auction is inserted into auctiondata
posts.insert_one(newrow)
newcount += 1
count += 1
@log.log(logname)
def mp_handler(data_parser):
p = multiprocessing.Pool(threads)
p.map(updatefunction, data_parser)
if __name__ == '__main__':
print('name')
mp_handler(data_parser)
|
nateknight/arducopter | refs/heads/Arducopter_Tri2 | Tools/autotest/apm_unit_tests/dev/arducopter_climb_descend.py | 250 | import arducopter
def unit_test(mavproxy, mav):
'''A scripted flight plan'''
if (
arducopter.calibrate_level(mavproxy, mav) and
arducopter.arm_motors(mavproxy, mav) and
arducopter.takeoff(mavproxy,mav, alt_min=30, takeoff_throttle=1510) and
arducopter.change_alt(mavproxy, mav, alt_min=60) and
arducopter.change_alt(mavproxy, mav, alt_min=20)
):
return True
return False
|
vertigo235/Sick-Beard-XEM | refs/heads/master | lib/bs4/__init__.py | 417 | """Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup uses a pluggable XML or HTML parser to parse a
(possibly invalid) document into a tree representation. Beautiful Soup
provides provides methods and Pythonic idioms that make it easy to
navigate, search, and modify the parse tree.
Beautiful Soup works with Python 2.6 and up. It works better if lxml
and/or html5lib is installed.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/bs4/doc/
"""
__author__ = "Leonard Richardson ([email protected])"
__version__ = "4.3.2"
__copyright__ = "Copyright (c) 2004-2013 Leonard Richardson"
__license__ = "MIT"
__all__ = ['BeautifulSoup']
import os
import re
import warnings
from .builder import builder_registry, ParserRejectedMarkup
from .dammit import UnicodeDammit
from .element import (
CData,
Comment,
DEFAULT_OUTPUT_ENCODING,
Declaration,
Doctype,
NavigableString,
PageElement,
ProcessingInstruction,
ResultSet,
SoupStrainer,
Tag,
)
# The very first thing we do is give a useful error if someone is
# running this code under Python 3 without converting it.
syntax_error = u'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work. You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).'
class BeautifulSoup(Tag):
"""
This class defines the basic interface called by the tree builders.
These methods will be called by the parser:
reset()
feed(markup)
The tree builder may call these methods from its feed() implementation:
handle_starttag(name, attrs) # See note about return value
handle_endtag(name)
handle_data(data) # Appends to the current data node
endData(containerClass=NavigableString) # Ends the current data node
No matter how complicated the underlying parser is, you should be
able to build a tree using 'start tag' events, 'end tag' events,
'data' events, and "done with data" events.
If you encounter an empty-element tag (aka a self-closing tag,
like HTML's <br> tag), call handle_starttag and then
handle_endtag.
"""
ROOT_TAG_NAME = u'[document]'
# If the end-user gives no indication which tree builder they
# want, look for one with these features.
DEFAULT_BUILDER_FEATURES = ['html', 'fast']
ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
def __init__(self, markup="", features=None, builder=None,
parse_only=None, from_encoding=None, **kwargs):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser."""
if 'convertEntities' in kwargs:
warnings.warn(
"BS4 does not respect the convertEntities argument to the "
"BeautifulSoup constructor. Entities are always converted "
"to Unicode characters.")
if 'markupMassage' in kwargs:
del kwargs['markupMassage']
warnings.warn(
"BS4 does not respect the markupMassage argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for any necessary markup massage.")
if 'smartQuotesTo' in kwargs:
del kwargs['smartQuotesTo']
warnings.warn(
"BS4 does not respect the smartQuotesTo argument to the "
"BeautifulSoup constructor. Smart quotes are always converted "
"to Unicode characters.")
if 'selfClosingTags' in kwargs:
del kwargs['selfClosingTags']
warnings.warn(
"BS4 does not respect the selfClosingTags argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for understanding self-closing tags.")
if 'isHTML' in kwargs:
del kwargs['isHTML']
warnings.warn(
"BS4 does not respect the isHTML argument to the "
"BeautifulSoup constructor. You can pass in features='html' "
"or features='xml' to get a builder capable of handling "
"one or the other.")
def deprecated_argument(old_name, new_name):
if old_name in kwargs:
warnings.warn(
'The "%s" argument to the BeautifulSoup constructor '
'has been renamed to "%s."' % (old_name, new_name))
value = kwargs[old_name]
del kwargs[old_name]
return value
return None
parse_only = parse_only or deprecated_argument(
"parseOnlyThese", "parse_only")
from_encoding = from_encoding or deprecated_argument(
"fromEncoding", "from_encoding")
if len(kwargs) > 0:
arg = kwargs.keys().pop()
raise TypeError(
"__init__() got an unexpected keyword argument '%s'" % arg)
if builder is None:
if isinstance(features, basestring):
features = [features]
if features is None or len(features) == 0:
features = self.DEFAULT_BUILDER_FEATURES
builder_class = builder_registry.lookup(*features)
if builder_class is None:
raise FeatureNotFound(
"Couldn't find a tree builder with the features you "
"requested: %s. Do you need to install a parser library?"
% ",".join(features))
builder = builder_class()
self.builder = builder
self.is_xml = builder.is_xml
self.builder.soup = self
self.parse_only = parse_only
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
elif len(markup) <= 256:
# Print out warnings for a couple beginner problems
# involving passing non-markup to Beautiful Soup.
# Beautiful Soup will still parse the input as markup,
# just in case that's what the user really wants.
if (isinstance(markup, unicode)
and not os.path.supports_unicode_filenames):
possible_filename = markup.encode("utf8")
else:
possible_filename = markup
is_file = False
try:
is_file = os.path.exists(possible_filename)
except Exception, e:
# This is almost certainly a problem involving
# characters not valid in filenames on this
# system. Just let it go.
pass
if is_file:
warnings.warn(
'"%s" looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.' % markup)
if markup[:5] == "http:" or markup[:6] == "https:":
# TODO: This is ugly but I couldn't get it to work in
# Python 3 otherwise.
if ((isinstance(markup, bytes) and not b' ' in markup)
or (isinstance(markup, unicode) and not u' ' in markup)):
warnings.warn(
'"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup)
for (self.markup, self.original_encoding, self.declared_html_encoding,
self.contains_replacement_characters) in (
self.builder.prepare_markup(markup, from_encoding)):
self.reset()
try:
self._feed()
break
except ParserRejectedMarkup:
pass
# Clear out the markup and remove the builder's circular
# reference to this object.
self.markup = None
self.builder.soup = None
def _feed(self):
# Convert the document to Unicode.
self.builder.reset()
self.builder.feed(self.markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def reset(self):
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.current_data = []
self.currentTag = None
self.tagStack = []
self.preserve_whitespace_tag_stack = []
self.pushTag(self)
def new_tag(self, name, namespace=None, nsprefix=None, **attrs):
"""Create a new tag associated with this soup."""
return Tag(None, self.builder, name, namespace, nsprefix, attrs)
def new_string(self, s, subclass=NavigableString):
"""Create a new NavigableString associated with this soup."""
navigable = subclass(s)
navigable.setup()
return navigable
def insert_before(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
def insert_after(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
def popTag(self):
tag = self.tagStack.pop()
if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]:
self.preserve_whitespace_tag_stack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
if tag.name in self.builder.preserve_whitespace_tags:
self.preserve_whitespace_tag_stack.append(tag)
def endData(self, containerClass=NavigableString):
if self.current_data:
current_data = u''.join(self.current_data)
# If whitespace is not preserved, and this string contains
# nothing but ASCII spaces, replace it with a single space
# or newline.
if not self.preserve_whitespace_tag_stack:
strippable = True
for i in current_data:
if i not in self.ASCII_SPACES:
strippable = False
break
if strippable:
if '\n' in current_data:
current_data = '\n'
else:
current_data = ' '
# Reset the data collector.
self.current_data = []
# Should we add this string to the tree at all?
if self.parse_only and len(self.tagStack) <= 1 and \
(not self.parse_only.text or \
not self.parse_only.search(current_data)):
return
o = containerClass(current_data)
self.object_was_parsed(o)
def object_was_parsed(self, o, parent=None, most_recent_element=None):
"""Add an object to the parse tree."""
parent = parent or self.currentTag
most_recent_element = most_recent_element or self._most_recent_element
o.setup(parent, most_recent_element)
if most_recent_element is not None:
most_recent_element.next_element = o
self._most_recent_element = o
parent.contents.append(o)
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
# The BeautifulSoup object itself can never be popped.
return
most_recently_popped = None
stack_size = len(self.tagStack)
for i in range(stack_size - 1, 0, -1):
t = self.tagStack[i]
if (name == t.name and nsprefix == t.prefix):
if inclusivePop:
most_recently_popped = self.popTag()
break
most_recently_popped = self.popTag()
return most_recently_popped
def handle_starttag(self, name, namespace, nsprefix, attrs):
"""Push a start tag on to the stack.
If this method returns None, the tag was rejected by the
SoupStrainer. You should proceed as if the tag had not occured
in the document. For instance, if this was a self-closing tag,
don't call handle_endtag.
"""
# print "Start tag %s: %s" % (name, attrs)
self.endData()
if (self.parse_only and len(self.tagStack) <= 1
and (self.parse_only.text
or not self.parse_only.search_tag(name, attrs))):
return None
tag = Tag(self, self.builder, name, namespace, nsprefix, attrs,
self.currentTag, self._most_recent_element)
if tag is None:
return tag
if self._most_recent_element:
self._most_recent_element.next_element = tag
self._most_recent_element = tag
self.pushTag(tag)
return tag
def handle_endtag(self, name, nsprefix=None):
#print "End tag: " + name
self.endData()
self._popToTag(name, nsprefix)
def handle_data(self, data):
self.current_data.append(data)
def decode(self, pretty_print=False,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a string or Unicode representation of this document.
To get Unicode, pass None for encoding."""
if self.is_xml:
# Print the XML declaration
encoding_part = ''
if eventual_encoding != None:
encoding_part = ' encoding="%s"' % eventual_encoding
prefix = u'<?xml version="1.0"%s?>\n' % encoding_part
else:
prefix = u''
if not pretty_print:
indent_level = None
else:
indent_level = 0
return prefix + super(BeautifulSoup, self).decode(
indent_level, eventual_encoding, formatter)
# Alias to make it easier to type import: 'from bs4 import _soup'
_s = BeautifulSoup
_soup = BeautifulSoup
class BeautifulStoneSoup(BeautifulSoup):
"""Deprecated interface to an XML parser."""
def __init__(self, *args, **kwargs):
kwargs['features'] = 'xml'
warnings.warn(
'The BeautifulStoneSoup class is deprecated. Instead of using '
'it, pass features="xml" into the BeautifulSoup constructor.')
super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
class StopParsing(Exception):
pass
class FeatureNotFound(ValueError):
pass
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
|
tiagofrepereira2012/pre_annotation | refs/heads/master | bootstrap-buildout.py | 172 | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", help="use a specific zc.buildout version")
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
parser.add_option("--allow-site-packages",
action="store_true", default=False,
help=("Let bootstrap.py use existing site packages"))
parser.add_option("--setuptools-version",
help="use a specific setuptools version")
options, args = parser.parse_args()
######################################################################
# load/install setuptools
try:
if options.allow_site_packages:
import setuptools
import pkg_resources
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
ez = {}
exec(urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
if not options.allow_site_packages:
# ez_setup imports site, which adds site packages
# this will remove them from the path to ensure that incompatible versions
# of setuptools are not in the path
import site
# inside a virtualenv, there is no 'getsitepackages'.
# We can't remove these reliably
if hasattr(site, 'getsitepackages'):
for sitepackage_path in site.getsitepackages():
sys.path[:] = [x for x in sys.path if sitepackage_path not in x]
setup_args = dict(to_dir=tmpeggs, download_delay=0)
if options.setuptools_version is not None:
setup_args['version'] = options.setuptools_version
ez['use_setuptools'](**setup_args)
import setuptools
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
cmd = [sys.executable, '-c',
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
try:
return not parsed_version.is_prerelease
except AttributeError:
# Older setuptools
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd, env=dict(os.environ, PYTHONPATH=setuptools_path)) != 0:
raise Exception(
"Failed to execute command:\n%s" % repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
|
bdaroz/the-blue-alliance | refs/heads/master | tests/suggestions/test_media_url_parse.py | 2 | import json
import unittest2
from google.appengine.ext import testbed
from consts.media_type import MediaType
from helpers.media_helper import MediaParser
from helpers.webcast_helper import WebcastParser
class TestMediaUrlParser(unittest2.TestCase):
def setUp(cls):
cls.testbed = testbed.Testbed()
cls.testbed.activate()
cls.testbed.init_urlfetch_stub()
def tearDown(cls):
cls.testbed.deactivate()
def test_youtube_parse(self):
yt_long = MediaParser.partial_media_dict_from_url("http://www.youtube.com/watch?v=I-IrVbsl_K8")
self.assertEqual(yt_long['media_type_enum'], MediaType.YOUTUBE_VIDEO)
self.assertEqual(yt_long['foreign_key'], "I-IrVbsl_K8")
yt_short = MediaParser.partial_media_dict_from_url("http://youtu.be/I-IrVbsl_K8")
self.assertEqual(yt_short['media_type_enum'], MediaType.YOUTUBE_VIDEO)
self.assertEqual(yt_short['foreign_key'], "I-IrVbsl_K8")
yt_from_playlist = MediaParser.partial_media_dict_from_url("https://www.youtube.com/watch?v=VP992UKFbko&index=1&list=PLZT9pIgNOV6ZE0EgstWeoRWGWT3uoaszm")
self.assertEqual(yt_from_playlist['media_type_enum'], MediaType.YOUTUBE_VIDEO)
self.assertEqual(yt_from_playlist['foreign_key'], 'VP992UKFbko')
# def test_cdphotothread_parsetest_cdphotothread_parse(self):
# cd = MediaParser.partial_media_dict_from_url(
# "https://www.chiefdelphi.com/media/photos/41999")
# self.assertEqual(cd['media_type_enum'], MediaType.CD_PHOTO_THREAD)
# self.assertEqual(cd['foreign_key'], "41999")
# self.assertTrue(cd['details_json'])
# details = json.loads(cd['details_json'])
# self.assertEqual(details['image_partial'], "a88/a880fa0d65c6b49ddb93323bc7d2e901_l.jpg")
def test_imgur_parse(self):
imgur_img = MediaParser.partial_media_dict_from_url("http://imgur.com/zYqWbBh")
self.assertEqual(imgur_img['media_type_enum'], MediaType.IMGUR)
self.assertEqual(imgur_img['foreign_key'], "zYqWbBh")
imgur_img = MediaParser.partial_media_dict_from_url("http://i.imgur.com/zYqWbBh.png")
self.assertEqual(imgur_img['media_type_enum'], MediaType.IMGUR)
self.assertEqual(imgur_img['foreign_key'], "zYqWbBh")
self.assertEqual(MediaParser.partial_media_dict_from_url("http://imgur.com/r/aww"), None)
self.assertEqual(MediaParser.partial_media_dict_from_url("http://imgur.com/a/album"), None)
def test_fb_profile_parse(self):
result = MediaParser.partial_media_dict_from_url("http://facebook.com/theuberbots")
self.assertEqual(result['media_type_enum'], MediaType.FACEBOOK_PROFILE)
self.assertEqual(result['is_social'], True)
self.assertEqual(result['foreign_key'], 'theuberbots')
self.assertEqual(result['site_name'], MediaType.type_names[MediaType.FACEBOOK_PROFILE])
self.assertEqual(result['profile_url'], 'https://www.facebook.com/theuberbots')
def test_twitter_profile_parse(self):
result = MediaParser.partial_media_dict_from_url("https://twitter.com/team1124")
self.assertEqual(result['media_type_enum'], MediaType.TWITTER_PROFILE)
self.assertEqual(result['is_social'], True)
self.assertEqual(result['foreign_key'], 'team1124')
self.assertEqual(result['site_name'], MediaType.type_names[MediaType.TWITTER_PROFILE])
self.assertEqual(result['profile_url'], 'https://twitter.com/team1124')
def test_youtube_profile_parse(self):
result = MediaParser.partial_media_dict_from_url("https://www.youtube.com/Uberbots1124")
self.assertEqual(result['media_type_enum'], MediaType.YOUTUBE_CHANNEL)
self.assertEqual(result['is_social'], True)
self.assertEqual(result['foreign_key'], 'uberbots1124')
self.assertEqual(result['site_name'], MediaType.type_names[MediaType.YOUTUBE_CHANNEL])
self.assertEqual(result['profile_url'], 'https://www.youtube.com/uberbots1124')
short_result = MediaParser.partial_media_dict_from_url("https://www.youtube.com/Uberbots1124")
self.assertEqual(short_result['media_type_enum'], MediaType.YOUTUBE_CHANNEL)
self.assertEqual(short_result['is_social'], True)
self.assertEqual(short_result['foreign_key'], 'uberbots1124')
self.assertEqual(short_result['site_name'], MediaType.type_names[MediaType.YOUTUBE_CHANNEL])
self.assertEqual(short_result['profile_url'], 'https://www.youtube.com/uberbots1124')
gapps_result = MediaParser.partial_media_dict_from_url("https://www.youtube.com/c/tnt3102org")
self.assertEqual(gapps_result['media_type_enum'], MediaType.YOUTUBE_CHANNEL)
self.assertEqual(gapps_result['is_social'], True)
self.assertEqual(gapps_result['foreign_key'], 'tnt3102org')
self.assertEqual(gapps_result['site_name'], MediaType.type_names[MediaType.YOUTUBE_CHANNEL])
self.assertEqual(gapps_result['profile_url'], 'https://www.youtube.com/tnt3102org')
def test_github_profile_parse(self):
result = MediaParser.partial_media_dict_from_url("https://github.com/frc1124")
self.assertEqual(result['media_type_enum'], MediaType.GITHUB_PROFILE)
self.assertEqual(result['is_social'], True)
self.assertEqual(result['foreign_key'], 'frc1124')
self.assertEqual(result['site_name'], MediaType.type_names[MediaType.GITHUB_PROFILE])
self.assertEqual(result['profile_url'], 'https://github.com/frc1124')
def test_instagram_profile_parse(self):
result = MediaParser.partial_media_dict_from_url("https://www.instagram.com/4hteamneutrino")
self.assertEqual(result['media_type_enum'], MediaType.INSTAGRAM_PROFILE)
self.assertEqual(result['is_social'], True)
self.assertEqual(result['foreign_key'], '4hteamneutrino')
self.assertEqual(result['site_name'], MediaType.type_names[MediaType.INSTAGRAM_PROFILE])
self.assertEqual(result['profile_url'], 'https://www.instagram.com/4hteamneutrino')
def test_periscope_profile_parse(self):
result = MediaParser.partial_media_dict_from_url("https://www.periscope.tv/evolution2626")
self.assertEqual(result['media_type_enum'], MediaType.PERISCOPE_PROFILE)
self.assertEqual(result['is_social'], True)
self.assertEqual(result['foreign_key'], 'evolution2626')
self.assertEqual(result['site_name'], MediaType.type_names[MediaType.PERISCOPE_PROFILE])
self.assertEqual(result['profile_url'], 'https://www.periscope.tv/evolution2626')
def test_grabcad_link(self):
result = MediaParser.partial_media_dict_from_url("https://grabcad.com/library/2016-148-robowranglers-1")
self.assertEqual(result['media_type_enum'], MediaType.GRABCAD)
self.assertEqual(result['is_social'], False)
self.assertEqual(result['foreign_key'], '2016-148-robowranglers-1')
details = json.loads(result['details_json'])
self.assertEqual(details['model_name'], '2016 | 148 - Robowranglers')
self.assertEqual(details['model_description'], 'Renegade')
self.assertEqual(details['model_image'], 'https://d2t1xqejof9utc.cloudfront.net/screenshots/pics/96268d5c5e6c1b7fe8892f713813bb40/card.jpg')
self.assertEqual(details['model_created'], '2016-09-19T11:52:23Z')
def test_instagram_image(self):
result = MediaParser.partial_media_dict_from_url("https://www.instagram.com/p/BUnZiriBYre/")
self.assertEqual(result['media_type_enum'], MediaType.INSTAGRAM_IMAGE)
self.assertEqual(result['foreign_key'], "BUnZiriBYre")
details = json.loads(result['details_json'])
self.assertEqual(details['title'], "FRC 195 @ 2017 Battlecry @ WPI")
self.assertEqual(details['author_name'], '1stroboticsrocks')
self.assertIsNotNone(details.get('thumbnail_url', None))
def test_unsupported_url_parse(self):
self.assertEqual(MediaParser.partial_media_dict_from_url("http://foo.bar"), None)
class TestWebcastUrlParser(unittest2.TestCase):
@classmethod
def setUpClass(cls):
cls.testbed = testbed.Testbed()
cls.testbed.activate()
cls.testbed.init_urlfetch_stub()
@classmethod
def tearDownClass(cls):
cls.testbed.deactivate()
def testTwitchUrl(self):
res = WebcastParser.webcast_dict_from_url("http://twitch.tv/frcgamesense")
self.assertIsNotNone(res)
self.assertEqual(res['type'], 'twitch')
self.assertEqual(res['channel'], 'frcgamesense')
unknown = WebcastParser.webcast_dict_from_url("http://twitch.tv/")
self.assertIsNone(unknown)
def testYouTubeUrl(self):
yt_long = WebcastParser.webcast_dict_from_url("http://www.youtube.com/watch?v=I-IrVbsl_K8")
self.assertIsNotNone(yt_long)
self.assertEqual(yt_long['type'], 'youtube')
self.assertEqual(yt_long['channel'], 'I-IrVbsl_K8')
yt_short = WebcastParser.webcast_dict_from_url("http://youtu.be/I-IrVbsl_K8")
self.assertIsNotNone(yt_short)
self.assertEqual(yt_short['type'], 'youtube')
self.assertEqual(yt_short['channel'], 'I-IrVbsl_K8')
bad_long = WebcastParser.webcast_dict_from_url('"http://www.youtube.com/')
self.assertIsNone(bad_long)
bad_short = WebcastParser.webcast_dict_from_url("http://youtu.be/")
self.assertIsNone(bad_short)
def testUstream(self):
res = WebcastParser.webcast_dict_from_url('http://www.ustream.tv/decoraheagles')
self.assertIsNotNone(res)
self.assertEqual(res['type'], 'ustream')
self.assertEqual(res['channel'], '3064708')
bad = WebcastParser.webcast_dict_from_url('http://ustream.tv/')
self.assertIsNone(bad)
def testUnknownUrl(self):
bad = WebcastParser.webcast_dict_from_url("http://mywebsite.somewebcast")
self.assertIsNone(bad)
|
cubieboard/openbox_external_chromium | refs/heads/openbox | net/tools/testserver/device_management.py | 66 | #!/usr/bin/python2.5
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A bare-bones test server for testing cloud policy support.
This implements a simple cloud policy test server that can be used to test
chrome's device management service client. The policy information is read from
the file named device_management in the server's data directory. It contains
enforced and recommended policies for the device and user scope, and a list
of managed users.
The format of the file is JSON. The root dictionary contains a list under the
key "managed_users". It contains auth tokens for which the server will claim
that the user is managed. The token string "*" indicates that all users are
claimed to be managed. Other keys in the root dictionary identify request
scopes. Each request scope is described by a dictionary that holds two
sub-dictionaries: "mandatory" and "recommended". Both these hold the policy
definitions as key/value stores, their format is identical to what the Linux
implementation reads from /etc.
Example:
{
"chromeos/device": {
"mandatory": {
"HomepageLocation" : "http://www.chromium.org"
},
"recommended": {
"JavascriptEnabled": false,
},
},
"managed_users": [
"secret123456"
]
}
"""
import cgi
import logging
import os
import random
import re
import sys
import time
import tlslite
import tlslite.api
import tlslite.utils
# The name and availability of the json module varies in python versions.
try:
import simplejson as json
except ImportError:
try:
import json
except ImportError:
json = None
import asn1der
import device_management_backend_pb2 as dm
import cloud_policy_pb2 as cp
import chrome_device_policy_pb2 as dp
# ASN.1 object identifier for PKCS#1/RSA.
PKCS1_RSA_OID = '\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01'
class RequestHandler(object):
"""Decodes and handles device management requests from clients.
The handler implements all the request parsing and protobuf message decoding
and encoding. It calls back into the server to lookup, register, and
unregister clients.
"""
def __init__(self, server, path, headers, request):
"""Initialize the handler.
Args:
server: The TestServer object to use for (un)registering clients.
path: A string containing the request path and query parameters.
headers: A rfc822.Message-like object containing HTTP headers.
request: The request data received from the client as a string.
"""
self._server = server
self._path = path
self._headers = headers
self._request = request
self._params = None
def GetUniqueParam(self, name):
"""Extracts a unique query parameter from the request.
Args:
name: Names the parameter to fetch.
Returns:
The parameter value or None if the parameter doesn't exist or is not
unique.
"""
if not self._params:
self._params = cgi.parse_qs(self._path[self._path.find('?') + 1:])
param_list = self._params.get(name, [])
if len(param_list) == 1:
return param_list[0]
return None;
def HandleRequest(self):
"""Handles a request.
Parses the data supplied at construction time and returns a pair indicating
http status code and response data to be sent back to the client.
Returns:
A tuple of HTTP status code and response data to send to the client.
"""
rmsg = dm.DeviceManagementRequest()
rmsg.ParseFromString(self._request)
logging.debug('auth -> ' + self._headers.getheader('Authorization', ''))
logging.debug('deviceid -> ' + self.GetUniqueParam('deviceid'))
self.DumpMessage('Request', rmsg)
request_type = self.GetUniqueParam('request')
# Check server side requirements, as defined in
# device_management_backend.proto.
if (self.GetUniqueParam('devicetype') != '2' or
self.GetUniqueParam('apptype') != 'Chrome' or
(request_type != 'ping' and
len(self.GetUniqueParam('deviceid')) >= 64) or
len(self.GetUniqueParam('agent')) >= 64):
return (400, 'Invalid request parameter')
if request_type == 'register':
return self.ProcessRegister(rmsg.register_request)
elif request_type == 'unregister':
return self.ProcessUnregister(rmsg.unregister_request)
elif request_type == 'policy' or request_type == 'ping':
return self.ProcessPolicy(rmsg.policy_request, request_type)
else:
return (400, 'Invalid request parameter')
def CheckGoogleLogin(self):
"""Extracts the GoogleLogin auth token from the HTTP request, and
returns it. Returns None if the token is not present.
"""
match = re.match('GoogleLogin auth=(\\w+)',
self._headers.getheader('Authorization', ''))
if not match:
return None
return match.group(1)
def ProcessRegister(self, msg):
"""Handles a register request.
Checks the query for authorization and device identifier, registers the
device with the server and constructs a response.
Args:
msg: The DeviceRegisterRequest message received from the client.
Returns:
A tuple of HTTP status code and response data to send to the client.
"""
# Check the auth token and device ID.
if not self.CheckGoogleLogin():
return (403, 'No authorization')
device_id = self.GetUniqueParam('deviceid')
if not device_id:
return (400, 'Missing device identifier')
token_info = self._server.RegisterDevice(device_id,
msg.machine_id,
msg.type)
# Send back the reply.
response = dm.DeviceManagementResponse()
response.register_response.device_management_token = (
token_info['device_token'])
response.register_response.machine_name = token_info['machine_name']
self.DumpMessage('Response', response)
return (200, response.SerializeToString())
def ProcessUnregister(self, msg):
"""Handles a register request.
Checks for authorization, unregisters the device and constructs the
response.
Args:
msg: The DeviceUnregisterRequest message received from the client.
Returns:
A tuple of HTTP status code and response data to send to the client.
"""
# Check the management token.
token, response = self.CheckToken();
if not token:
return response
# Unregister the device.
self._server.UnregisterDevice(token);
# Prepare and send the response.
response = dm.DeviceManagementResponse()
response.unregister_response.CopyFrom(dm.DeviceUnregisterResponse())
self.DumpMessage('Response', response)
return (200, response.SerializeToString())
def ProcessInitialPolicy(self, msg):
"""Handles a 'preregister policy' request.
Queries the list of managed users and responds the client if their user
is managed or not.
Args:
msg: The PolicyFetchRequest message received from the client.
Returns:
A tuple of HTTP status code and response data to send to the client.
"""
# Check the GAIA token.
auth = self.CheckGoogleLogin()
if not auth:
return (403, 'No authorization')
chrome_initial_settings = dm.ChromeInitialSettingsProto()
if ('*' in self._server.policy['managed_users'] or
auth in self._server.policy['managed_users']):
chrome_initial_settings.enrollment_provision = (
dm.ChromeInitialSettingsProto.MANAGED);
else:
chrome_initial_settings.enrollment_provision = (
dm.ChromeInitialSettingsProto.UNMANAGED);
policy_data = dm.PolicyData()
policy_data.policy_type = msg.policy_type
policy_data.policy_value = chrome_initial_settings.SerializeToString()
# Prepare and send the response.
response = dm.DeviceManagementResponse()
fetch_response = response.policy_response.response.add()
fetch_response.policy_data = (
policy_data.SerializeToString())
self.DumpMessage('Response', response)
return (200, response.SerializeToString())
def ProcessDevicePolicy(self, msg):
"""Handles a policy request that uses the deprecated protcol.
TODO(gfeher): Remove this when we certainly don't need it.
Checks for authorization, encodes the policy into protobuf representation
and constructs the response.
Args:
msg: The DevicePolicyRequest message received from the client.
Returns:
A tuple of HTTP status code and response data to send to the client.
"""
# Check the management token.
token, response = self.CheckToken()
if not token:
return response
# Stuff the policy dictionary into a response message and send it back.
response = dm.DeviceManagementResponse()
response.policy_response.CopyFrom(dm.DevicePolicyResponse())
# Respond only if the client requested policy for the cros/device scope,
# since that's where chrome policy is supposed to live in.
if msg.policy_scope == 'chromeos/device':
policy = self._server.policy['google/chromeos/user']['mandatory']
setting = response.policy_response.setting.add()
setting.policy_key = 'chrome-policy'
policy_value = dm.GenericSetting()
for (key, value) in policy.iteritems():
entry = policy_value.named_value.add()
entry.name = key
entry_value = dm.GenericValue()
if isinstance(value, bool):
entry_value.value_type = dm.GenericValue.VALUE_TYPE_BOOL
entry_value.bool_value = value
elif isinstance(value, int):
entry_value.value_type = dm.GenericValue.VALUE_TYPE_INT64
entry_value.int64_value = value
elif isinstance(value, str) or isinstance(value, unicode):
entry_value.value_type = dm.GenericValue.VALUE_TYPE_STRING
entry_value.string_value = value
elif isinstance(value, list):
entry_value.value_type = dm.GenericValue.VALUE_TYPE_STRING_ARRAY
for list_entry in value:
entry_value.string_array.append(str(list_entry))
entry.value.CopyFrom(entry_value)
setting.policy_value.CopyFrom(policy_value)
self.DumpMessage('Response', response)
return (200, response.SerializeToString())
def ProcessPolicy(self, msg, request_type):
"""Handles a policy request.
Checks for authorization, encodes the policy into protobuf representation
and constructs the response.
Args:
msg: The DevicePolicyRequest message received from the client.
Returns:
A tuple of HTTP status code and response data to send to the client.
"""
if msg.request:
for request in msg.request:
if request.policy_type == 'google/chromeos/unregistered_user':
if request_type != 'ping':
return (400, 'Invalid request type')
return self.ProcessInitialPolicy(request)
elif (request.policy_type in
('google/chromeos/user', 'google/chromeos/device')):
if request_type != 'policy':
return (400, 'Invalid request type')
return self.ProcessCloudPolicy(request)
else:
return (400, 'Invalid policy_type')
else:
return self.ProcessDevicePolicy(msg)
def SetProtobufMessageField(self, group_message, field, field_value):
'''Sets a field in a protobuf message.
Args:
group_message: The protobuf message.
field: The field of the message to set, it shuold be a member of
group_message.DESCRIPTOR.fields.
field_value: The value to set.
'''
if field.label == field.LABEL_REPEATED:
assert type(field_value) == list
entries = group_message.__getattribute__(field.name)
for list_item in field_value:
entries.append(list_item)
return
elif field.type == field.TYPE_BOOL:
assert type(field_value) == bool
elif field.type == field.TYPE_STRING:
assert type(field_value) == str or type(field_value) == unicode
elif field.type == field.TYPE_INT64:
assert type(field_value) == int
elif (field.type == field.TYPE_MESSAGE and
field.message_type.name == 'StringList'):
assert type(field_value) == list
entries = group_message.__getattribute__(field.name).entries
for list_item in field_value:
entries.append(list_item)
return
else:
raise Exception('Unknown field type %s' % field.type)
group_message.__setattr__(field.name, field_value)
def GatherDevicePolicySettings(self, settings, policies):
'''Copies all the policies from a dictionary into a protobuf of type
CloudDeviceSettingsProto.
Args:
settings: The destination ChromeDeviceSettingsProto protobuf.
policies: The source dictionary containing policies in JSON format.
'''
for group in settings.DESCRIPTOR.fields:
# Create protobuf message for group.
group_message = eval('dp.' + group.message_type.name + '()')
# Indicates if at least one field was set in |group_message|.
got_fields = False
# Iterate over fields of the message and feed them from the
# policy config file.
for field in group_message.DESCRIPTOR.fields:
field_value = None
if field.name in policies:
got_fields = True
field_value = policies[field.name]
self.SetProtobufMessageField(group_message, field, field_value)
if got_fields:
settings.__getattribute__(group.name).CopyFrom(group_message)
def GatherUserPolicySettings(self, settings, policies):
'''Copies all the policies from a dictionary into a protobuf of type
CloudPolicySettings.
Args:
settings: The destination: a CloudPolicySettings protobuf.
policies: The source: a dictionary containing policies under keys
'recommended' and 'mandatory'.
'''
for group in settings.DESCRIPTOR.fields:
# Create protobuf message for group.
group_message = eval('cp.' + group.message_type.name + '()')
# We assume that this policy group will be recommended, and only switch
# it to mandatory if at least one of its members is mandatory.
group_message.policy_options.mode = cp.PolicyOptions.RECOMMENDED
# Indicates if at least one field was set in |group_message|.
got_fields = False
# Iterate over fields of the message and feed them from the
# policy config file.
for field in group_message.DESCRIPTOR.fields:
field_value = None
if field.name in policies['mandatory']:
group_message.policy_options.mode = cp.PolicyOptions.MANDATORY
field_value = policies['mandatory'][field.name]
elif field.name in policies['recommended']:
field_value = policies['recommended'][field.name]
if field_value != None:
got_fields = True
self.SetProtobufMessageField(group_message, field, field_value)
if got_fields:
settings.__getattribute__(group.name).CopyFrom(group_message)
def ProcessCloudPolicy(self, msg):
"""Handles a cloud policy request. (New protocol for policy requests.)
Checks for authorization, encodes the policy into protobuf representation,
signs it and constructs the repsonse.
Args:
msg: The CloudPolicyRequest message received from the client.
Returns:
A tuple of HTTP status code and response data to send to the client.
"""
token_info, error = self.CheckToken()
if not token_info:
return error
# Response is only given if the scope is specified in the config file.
# Normally 'google/chromeos/device' and 'google/chromeos/user' should be
# accepted.
policy_value = ''
if (msg.policy_type in token_info['allowed_policy_types'] and
msg.policy_type in self._server.policy):
if msg.policy_type == 'google/chromeos/user':
settings = cp.CloudPolicySettings()
self.GatherUserPolicySettings(settings,
self._server.policy[msg.policy_type])
policy_value = settings.SerializeToString()
elif msg.policy_type == 'google/chromeos/device':
settings = dp.ChromeDeviceSettingsProto()
self.GatherDevicePolicySettings(settings,
self._server.policy[msg.policy_type])
policy_value = settings.SerializeToString()
# Figure out the key we want to use. If multiple keys are configured, the
# server will rotate through them in a round-robin fashion.
signing_key = None
req_key = None
key_version = 1
nkeys = len(self._server.keys)
if msg.signature_type == dm.PolicyFetchRequest.SHA1_RSA and nkeys > 0:
if msg.public_key_version in range(1, nkeys + 1):
# requested key exists, use for signing and rotate.
req_key = self._server.keys[msg.public_key_version - 1]['private_key']
key_version = (msg.public_key_version % nkeys) + 1
signing_key = self._server.keys[key_version - 1]
# Fill the policy data protobuf.
policy_data = dm.PolicyData()
policy_data.policy_type = msg.policy_type
policy_data.timestamp = int(time.time() * 1000)
policy_data.request_token = token_info['device_token'];
policy_data.policy_value = policy_value
policy_data.machine_name = token_info['machine_name']
if signing_key:
policy_data.public_key_version = key_version
policy_data.username = self._server.username
policy_data.device_id = token_info['device_id']
signed_data = policy_data.SerializeToString()
response = dm.DeviceManagementResponse()
fetch_response = response.policy_response.response.add()
fetch_response.policy_data = signed_data
if signing_key:
fetch_response.policy_data_signature = (
signing_key['private_key'].hashAndSign(signed_data).tostring())
if msg.public_key_version != key_version:
fetch_response.new_public_key = signing_key['public_key']
if req_key:
fetch_response.new_public_key_signature = (
req_key.hashAndSign(fetch_response.new_public_key).tostring())
self.DumpMessage('Response', response)
return (200, response.SerializeToString())
def CheckToken(self):
"""Helper for checking whether the client supplied a valid DM token.
Extracts the token from the request and passed to the server in order to
look up the client.
Returns:
A pair of token information record and error response. If the first
element is None, then the second contains an error code to send back to
the client. Otherwise the first element is the same structure that is
returned by LookupToken().
"""
error = None
dmtoken = None
request_device_id = self.GetUniqueParam('deviceid')
match = re.match('GoogleDMToken token=(\\w+)',
self._headers.getheader('Authorization', ''))
if match:
dmtoken = match.group(1)
if not dmtoken:
error = dm.DeviceManagementResponse.DEVICE_MANAGEMENT_TOKEN_INVALID
else:
token_info = self._server.LookupToken(dmtoken)
if (not token_info or
not request_device_id or
token_info['device_id'] != request_device_id):
error = dm.DeviceManagementResponse.DEVICE_NOT_FOUND
else:
return (token_info, None)
response = dm.DeviceManagementResponse()
response.error = error
self.DumpMessage('Response', response)
return (None, (200, response.SerializeToString()))
def DumpMessage(self, label, msg):
"""Helper for logging an ASCII dump of a protobuf message."""
logging.debug('%s\n%s' % (label, str(msg)))
class TestServer(object):
"""Handles requests and keeps global service state."""
def __init__(self, policy_path, private_key_paths, policy_user):
"""Initializes the server.
Args:
policy_path: Names the file to read JSON-formatted policy from.
private_key_paths: List of paths to read private keys from.
"""
self._registered_tokens = {}
self.policy = {}
# There is no way to for the testserver to know the user name belonging to
# the GAIA auth token we received (short of actually talking to GAIA). To
# address this, we have a command line parameter to set the username that
# the server should report to the client.
self.username = policy_user
if json is None:
print 'No JSON module, cannot parse policy information'
else :
try:
self.policy = json.loads(open(policy_path).read())
except IOError:
print 'Failed to load policy from %s' % policy_path
self.keys = []
if private_key_paths:
# Load specified keys from the filesystem.
for key_path in private_key_paths:
try:
key = tlslite.api.parsePEMKey(open(key_path).read(), private=True)
except IOError:
print 'Failed to load private key from %s' % key_path
continue
assert key != None
self.keys.append({ 'private_key' : key })
else:
# Generate a key if none were specified.
key = tlslite.api.generateRSAKey(1024)
assert key != None
self.keys.append({ 'private_key' : key })
# Derive the public keys from the loaded private keys.
for entry in self.keys:
key = entry['private_key']
algorithm = asn1der.Sequence(
[ asn1der.Data(asn1der.OBJECT_IDENTIFIER, PKCS1_RSA_OID),
asn1der.Data(asn1der.NULL, '') ])
rsa_pubkey = asn1der.Sequence([ asn1der.Integer(key.n),
asn1der.Integer(key.e) ])
pubkey = asn1der.Sequence([ algorithm, asn1der.Bitstring(rsa_pubkey) ])
entry['public_key'] = pubkey;
def HandleRequest(self, path, headers, request):
"""Handles a request.
Args:
path: The request path and query parameters received from the client.
headers: A rfc822.Message-like object containing HTTP headers.
request: The request data received from the client as a string.
Returns:
A pair of HTTP status code and response data to send to the client.
"""
handler = RequestHandler(self, path, headers, request)
return handler.HandleRequest()
def RegisterDevice(self, device_id, machine_id, type):
"""Registers a device or user and generates a DM token for it.
Args:
device_id: The device identifier provided by the client.
Returns:
The newly generated device token for the device.
"""
dmtoken_chars = []
while len(dmtoken_chars) < 32:
dmtoken_chars.append(random.choice('0123456789abcdef'))
dmtoken = ''.join(dmtoken_chars)
allowed_policy_types = {
dm.DeviceRegisterRequest.USER: ['google/chromeos/user'],
dm.DeviceRegisterRequest.DEVICE: ['google/chromeos/device'],
dm.DeviceRegisterRequest.TT: ['google/chromeos/user'],
}
self._registered_tokens[dmtoken] = {
'device_id': device_id,
'device_token': dmtoken,
'allowed_policy_types': allowed_policy_types[type],
'machine_name': 'chromeos-' + machine_id,
}
return self._registered_tokens[dmtoken]
def LookupToken(self, dmtoken):
"""Looks up a device or a user by DM token.
Args:
dmtoken: The device management token provided by the client.
Returns:
A dictionary with information about a device or user that is registered by
dmtoken, or None if the token is not found.
"""
return self._registered_tokens.get(dmtoken, None)
def UnregisterDevice(self, dmtoken):
"""Unregisters a device identified by the given DM token.
Args:
dmtoken: The device management token provided by the client.
"""
if dmtoken in self._registered_tokens.keys():
del self._registered_tokens[dmtoken]
|
lizardsystem/lizard-waterbalance | refs/heads/vss | lizard_waterbalance/models_tests.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#******************************************************************************
#
# This file is part of the lizard_waterbalance Django app.
#
# The lizard_waterbalance app is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# the lizard_waterbalance app. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2011 Nelen & Schuurmans
#
#******************************************************************************
#
# Initial programmer: Pieter Swinkels
#
#******************************************************************************
from datetime import datetime
from unittest import TestCase
from lizard_waterbalance.models import Timeseries
from lizard_waterbalance.models import TimeseriesEvent
class TimeseriesTests(TestCase):
def setup_timeseries(self):
"""Return a non-sticky time series in the database."""
timeseries = Timeseries()
timeseries.save()
self.setup_events(timeseries)
return timeseries
def setup_sticky_timeseries(self):
"""Return a non-sticky time series in the database."""
timeseries = Timeseries()
timeseries.default_value = 5.0
timeseries.stick_to_last_value = True
timeseries.save()
self.setup_events(timeseries)
return timeseries
def setup_events(self, timeseries):
"""Create events for the given time series in the database."""
for day in range(5, 10):
event = TimeseriesEvent()
event.time = datetime(2011, 4, day)
event.value = 10.0
event.timeseries = timeseries
event.save()
def test_a(self):
"""Test a Timeseries can be saved to the database."""
timeseries = self.setup_timeseries()
expected_events = [(datetime(2011, 4, 5), 10.0),
(datetime(2011, 4, 6), 10.0),
(datetime(2011, 4, 7), 10.0),
(datetime(2011, 4, 8), 10.0),
(datetime(2011, 4, 9), 10.0)]
self.assertEqual(expected_events, list(timeseries.events()))
def test_b(self):
"""Test a sticky Timeseries fills in defaults before the first event.
"""
timeseries = self.setup_sticky_timeseries()
events = timeseries.events(datetime(2011, 4, 1), datetime(2011, 4, 6))
expected_events = [(datetime(2011, 4, 1), 5.0),
(datetime(2011, 4, 2), 5.0),
(datetime(2011, 4, 3), 5.0),
(datetime(2011, 4, 4), 5.0),
(datetime(2011, 4, 5), 10.0),
(datetime(2011, 4, 6), 10.0)]
self.assertEqual(expected_events, list(events))
def test_c(self):
"""Test a sticky Timeseries fills in the last event value after the last event."""
timeseries = self.setup_sticky_timeseries()
events = timeseries.events(datetime(2011, 4, 8), datetime(2011, 4, 12))
expected_events = [(datetime(2011, 4, 8), 10.0),
(datetime(2011, 4, 9), 10.0),
(datetime(2011, 4, 10), 10.0),
(datetime(2011, 4, 11), 10.0),
(datetime(2011, 4, 12), 10.0)]
self.assertEqual(expected_events, list(events))
def test_d(self):
"""Test a non-sticky Timeseries only returns available events."""
timeseries = self.setup_timeseries()
events = timeseries.events(datetime(2011, 4, 1), datetime(2011, 4, 12))
expected_events = [(datetime(2011, 4, 5), 10.0),
(datetime(2011, 4, 6), 10.0),
(datetime(2011, 4, 7), 10.0),
(datetime(2011, 4, 8), 10.0),
(datetime(2011, 4, 9), 10.0)]
self.assertEqual(expected_events, list(events))
def test_e(self):
"""Test a sticky Timeseries only returns available events.
We will call Timeseries.events without a start or end.
"""
timeseries = self.setup_sticky_timeseries()
events = timeseries.events()
expected_events = [(datetime(2011, 4, 5), 10.0),
(datetime(2011, 4, 6), 10.0),
(datetime(2011, 4, 7), 10.0),
(datetime(2011, 4, 8), 10.0),
(datetime(2011, 4, 9), 10.0)]
self.assertEqual(expected_events, list(events))
|
philsch/ansible | refs/heads/devel | lib/ansible/modules/windows/win_iis_virtualdirectory.py | 78 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Henrik Wallström <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_iis_virtualdirectory
version_added: "2.0"
short_description: Configures a virtual directory in IIS.
description:
- Creates, Removes and configures a virtual directory in IIS.
options:
name:
description:
- The name of the virtual directory to create or remove
required: true
state:
description:
- Whether to add or remove the specified virtual directory
choices:
- absent
- present
required: false
default: present
site:
description:
- The site name under which the virtual directory is created or exists.
required: true
application:
description:
- The application under which the virtual directory is created or exists.
required: false
default: null
physical_path:
description:
- The physical path to the folder in which the new virtual directory is created. The specified folder must already exist.
required: false
default: null
author: Henrik Wallström
'''
EXAMPLES = r'''
- name: Create a virtual directory if it does not exist
win_iis_virtualdirectory:
name: somedirectory
site: somesite
state: present
physical_path: c:\virtualdirectory\some
- name: Remove a virtual directory if it exists
win_iis_virtualdirectory:
name: somedirectory
site: somesite
state: absent
- name: Create a virtual directory on an application if it does not exist
win_iis_virtualdirectory:
name: somedirectory
site: somesite
application: someapp
state: present
physical_path: c:\virtualdirectory\some
'''
|
aospx-kitkat/platform_external_chromium_org | refs/heads/kitkat | build/android/avd.py | 35 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Launches Android Virtual Devices with a set configuration for testing Chrome.
The script will launch a specified number of Android Virtual Devices (AVD's).
"""
import install_emulator_deps
import logging
import optparse
import os
import subprocess
import sys
from pylib import constants
from pylib.utils import emulator
def main(argv):
# ANDROID_SDK_ROOT needs to be set to the location of the SDK used to launch
# the emulator to find the system images upon launch.
emulator_sdk = os.path.join(constants.EMULATOR_SDK_ROOT,
'android_tools', 'sdk')
os.environ['ANDROID_SDK_ROOT'] = emulator_sdk
opt_parser = optparse.OptionParser(description='AVD script.')
opt_parser.add_option('-n', '--num', dest='emulator_count',
help='Number of emulators to launch (default is 1).',
type='int', default='1')
opt_parser.add_option('--abi', default='x86',
help='Platform of emulators to launch (x86 default).')
options, _ = opt_parser.parse_args(argv[1:])
logging.basicConfig(level=logging.INFO,
format='# %(asctime)-15s: %(message)s')
logging.root.setLevel(logging.INFO)
# Check if KVM is enabled for x86 AVD's and check for x86 system images.
if options.abi =='x86':
if not install_emulator_deps.CheckKVM():
logging.critical('ERROR: KVM must be enabled in BIOS, and installed. '
'Enable KVM in BIOS and run install_emulator_deps.py')
return 1
elif not install_emulator_deps.CheckX86Image():
logging.critical('ERROR: System image for x86 AVD not installed. Run '
'install_emulator_deps.py')
return 1
if not install_emulator_deps.CheckSDK():
logging.critical('ERROR: Emulator SDK not installed. Run '
'install_emulator_deps.py.')
return 1
emulator.LaunchEmulators(options.emulator_count, options.abi, True)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
bramwalet/Subliminal.bundle | refs/heads/master | Contents/Libraries/Shared/chardet/cp949prober.py | 2800 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import CP949SMModel
class CP949Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(CP949SMModel)
# NOTE: CP949 is a superset of EUC-KR, so the distribution should be
# not different.
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "CP949"
|
lyynocs/magento-connector-v8 | refs/heads/master | connector_ecommerce/unit/__init__.py | 5 | # -*- coding: utf-8 -*-
from . import sale_order_onchange
|
fbradyirl/home-assistant | refs/heads/dev | tests/components/litejet/test_init.py | 4 | """The tests for the litejet component."""
import logging
import unittest
from homeassistant.components import litejet
from tests.common import get_test_home_assistant
_LOGGER = logging.getLogger(__name__)
class TestLiteJet(unittest.TestCase):
"""Test the litejet component."""
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.start()
self.hass.block_till_done()
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_is_ignored_unspecified(self):
"""Ensure it is ignored when unspecified."""
self.hass.data["litejet_config"] = {}
assert not litejet.is_ignored(self.hass, "Test")
def test_is_ignored_empty(self):
"""Ensure it is ignored when empty."""
self.hass.data["litejet_config"] = {litejet.CONF_EXCLUDE_NAMES: []}
assert not litejet.is_ignored(self.hass, "Test")
def test_is_ignored_normal(self):
"""Test if usually ignored."""
self.hass.data["litejet_config"] = {
litejet.CONF_EXCLUDE_NAMES: ["Test", "Other One"]
}
assert litejet.is_ignored(self.hass, "Test")
assert not litejet.is_ignored(self.hass, "Other one")
assert not litejet.is_ignored(self.hass, "Other 0ne")
assert litejet.is_ignored(self.hass, "Other One There")
assert litejet.is_ignored(self.hass, "Other One")
|
robinvierich/SublimeTextGRLD | refs/heads/master | grld/session.py | 1 | import sublime
import sys
import threading
import hashlib
# Helper module
try:
from .helper import H
except:
from helper import H
# Settings variables
try:
from . import settings as S
except:
import settings as S
# DBGp protocol constants
try:
from . import dbgp
except:
import dbgp
try:
from . import grld
except:
import grld
# Config module
from .config import get_value
# Log module
from .log import debug, info
# Protocol module
from .protocol import ProtocolConnectionException
# Util module
from .util import get_real_path
# View module
from .view import DATA_CONTEXT, DATA_STACK, DATA_WATCH, DATA_COROUTINES, DATA_EVALUATE, TITLE_WINDOW_WATCH, generate_context_output, generate_stack_output, generate_coroutines_output, has_debug_view, render_regions, show_content, show_file, show_panel_content
ACTION_BREAK = "action_break"
ACTION_EVALUATE = "action_evaluate"
ACTION_EXECUTE = "action_execute"
ACTION_INIT = "action_init"
ACTION_REMOVE_BREAKPOINT = "action_remove_breakpoint"
ACTION_SET_BREAKPOINT = "action_set_breakpoint"
ACTION_STATUS = "action_status"
ACTION_USER_EXECUTE = "action_user_execute"
ACTION_WATCH = "action_watch"
ACTION_SET_CURRENT_STACK_LEVEL = "action_set_current_stack_level"
ACTION_SET_SELECTED_THREAD = "action_set_selected_thread"
def is_connected(show_status=False):
"""
Check if client is connected to debugger engine.
Keyword arguments:
show_status -- Show message why client is not connected in status bar.
"""
if S.PROTOCOL and S.PROTOCOL.connected:
return True
elif S.PROTOCOL and show_status:
sublime.status_message('GRLD: Waiting for response from debugger engine.')
elif show_status:
sublime.status_message('GRLD: No GRLD session running.')
return False
def is_execution_broken():
return S.BREAKPOINT_ROW or S.BREAKPOINT_EXCEPTION
def connection_error(exception):
"""
Template for showing error message on connection error/loss.
Keyword arguments:
message -- Exception/reason of connection error/loss.
"""
info("Connection lost with debugger engine.")
debug(message)
# Reset connection
with S.PROTOCOL as protocol:
protocol.disconnect(exception)
#protocol.clear()
# Render breakpoint markers
#render_regions()
def update_socket_loop():
if not S.PROTOCOL:
return
try:
with S.PROTOCOL as protocol:
protocol.update()
except ProtocolConnectionException as e:
protocol.disconnect(e)
return
sublime.set_timeout(lambda: sublime.status_message('GRLD: Connected'), 0)
sublime.set_timeout_async(update_socket_loop, 100)
class SocketHandler(threading.Thread):
def __init__(self, action, **options):
threading.Thread.__init__(self)
self.action = action
self.options = options
self.current_thread = 'current' # thread currently running in Lua (since the last time execution stopped)
self.selected_thread = 'current' # selected thread from UI
self.current_stack_level = 1
def get_option(self, option, default_value=None):
if option in self.options.keys():
return self.options[option]
return default_value
def run_command(self, command, args=None):
if not isinstance(args, dict):
args = {}
self.timeout(lambda: self._run_command(command, args))
def _run_command(self, command, args=None):
try:
sublime.active_window().run_command(command, args)
except:
# In case active_window() is not available
pass
def run_view_command(self, command, args=None):
if not isinstance(args, dict):
args = {}
self.timeout(lambda: self._run_view_command)
def _run_view_command(self, command, args=None):
try:
sublime.active_window().active_view().run_command(command, args)
except:
# In case there is no active_view() available
pass
def status_message(self, message):
sublime.set_timeout(lambda: sublime.status_message(message), 100)
def timeout(self, function):
sublime.set_timeout(function, 0)
def run(self):
# Make sure an action is defined
if not self.action:
return
try:
S.SESSION_BUSY = True
# Evaluate
if self.action == ACTION_BREAK:
self.execute_break()
elif self.action == ACTION_EVALUATE:
self.evaluate(self.get_option('expression'), self.get_option('view'))
# Execute
elif self.action == ACTION_EXECUTE:
self.execute(self.get_option('command'))
# Init
elif self.action == ACTION_INIT:
self.init()
# Remove breakpoint
elif self.action == ACTION_REMOVE_BREAKPOINT:
self.remove_breakpoint(self.get_option('filename'), self.get_option('lineno'))
# Set breakpoint
elif self.action == ACTION_SET_BREAKPOINT:
# TODO: support conditional bps
active = True # self.get_option('expression')
self.set_breakpoint(self.get_option('filename'), self.get_option('lineno'), active)
# Status
elif self.action == ACTION_STATUS:
self.status()
# User defined execute
elif self.action == ACTION_USER_EXECUTE:
self.user_execute(self.get_option('command'), self.get_option('args'))
# Watch expression
elif self.action == ACTION_WATCH:
self.watch_expression()
elif self.action == ACTION_SET_CURRENT_STACK_LEVEL:
self.set_current_stack_level(self.get_option('stack_level'))
pass
elif self.action == ACTION_SET_SELECTED_THREAD :
self.set_current_stack_level(self.get_option('selected_thread'))
pass
# Show dialog on connection error
except ProtocolConnectionException as e:
self.timeout(lambda: connection_error(e))
finally:
S.SESSION_BUSY = False
def execute_break(self):
with S.PROTOCOL as protocol:
protocol.send('break', 'running')
def evaluate(self, expression, view):
if not expression or not is_connected():
return
# Send 'eval' command to debugger engine with code to evaluate
response = self.evaluate_expression(expression, self.current_thread, self.current_stack_level)
transformed_response = self.transform_grld_eval_response(response)
response_str = generate_context_output(transformed_response, values_only=True, multiline=False)
self.timeout(lambda: view.run_command('grld_update_evaluate_line_response', {'response': response_str}))
def execute(self, command):
# Do not execute if no command is set
if not command or not is_connected():
return
# Reset previous breakpoint values
S.BREAKPOINT_EXCEPTION = None
S.BREAKPOINT_ROW = None
S.CONTEXT_DATA.clear()
self.watch_expression()
# Send command to debugger engine
with S.PROTOCOL as protocol:
protocol.send(command)
if command == grld.RUN:
self.run_command('grld_layout')
#self.timeout(lambda: render_regions())
def get_value(self, grld_id):
with S.PROTOCOL as protocol:
protocol.send('getValue')
protocol.send(grld_id)
response = protocol.read()
return response
def is_table(self, variable):
if type(variable) != dict: return False
if 'value' not in variable: return False
value = variable['value']
if type(value) != dict: return False
if 'type' not in value: return False
return value['type'] == 'table'
def transform_grld_table(self, table_variable, parent_table_refs):
name = table_variable['name']
table_values = table_variable['value']
table_ref = table_values['short']
id = table_values['id']
type = table_values['type']
assert type == 'table', "table_variable passed to transform_grld_table must of type 'table'"
parent_table_refs.append(table_ref)
table_children = self.get_value(id)
transformed_children = {}
for i, child in table_children.items():
if self.is_table(child):
child_table_ref = child['value']['short']
if child_table_ref in parent_table_refs: # special value if table child is a reference to the table itself (avoid infinite recursion)
idx = parent_table_refs.index(child_table_ref)
num_tables_up_from_child = len(parent_table_refs) - idx - 1
if num_tables_up_from_child == 0:
description = '<circular reference to this table>'
else:
description = '<circular reference to a parent table {} levels up>'.format(num_tables_up_from_child)
transformed_children[i] = {'name': child['name'], 'type':'table-ref', 'value': description}
else:
transformed_children[i] = self.transform_grld_variable(child, parent_table_refs)
else:
transformed_children[i] = self.transform_grld_variable(child, parent_table_refs)
return {'name': name, 'type': type, 'value': table_ref, 'numchildren': len(transformed_children.keys()), 'children': transformed_children}
def transform_grld_variable(self, variable, parent_table_refs=None):
name = variable['name']
# if nothing is returned, GRLD returns {name = '<no result'>}
if not 'value' in variable:
return {'name': '', 'value': name, 'type': ''}
if self.is_table(variable):
return self.transform_grld_table(variable, parent_table_refs or []) #handle tables separately
value = variable['value']
if type(value) == dict:
value_type = value['type']
value = value['short']
else:
if type(value) == bool:
value_type = 'boolean'
elif type(value) == int or type(value) == float:
value_type = 'number'
elif type(value) == str:
value_type = 'string'
else:
value_type = '?'
return {'name': name, 'value': str(value), 'type': value_type}
def transform_grld_eval_response(self, eval_response, scope=None):
transformed = {}
for i, var in eval_response.items():
transformed_item = self.transform_grld_variable(var)
if scope:
name = "(%s) %s" % (scope, transformed_item['name'])
transformed_item['name'] = name
else:
name = transformed_item['name']
transformed[i] = transformed_item
return transformed
def transform_grld_context_response(self, context_response, scope):
return self.transform_grld_eval_response(context_response, scope)
def get_context_values(self, thread, stack_level):
"""
Get variables in current context.
"""
if not is_connected():
return
context = H.new_dictionary()
try:
# local variables
#if get_value(S.KEY_SUPER_GLOBALS):
with S.PROTOCOL as protocol:
protocol.send("locals")
protocol.send(thread)
protocol.send(stack_level)
response = protocol.read()
properties = self.transform_grld_context_response(response, "local")
context.update(properties)
# upvalues
protocol.send("upvalues")
protocol.send(thread)
protocol.send(stack_level)
response = protocol.read()
properties = self.transform_grld_context_response(response, "upvalue")
context.update(properties)
except ProtocolConnectionException as e:
self.timeout(lambda: connection_error(e))
# Store context variables in session
S.CONTEXT_DATA = context
return generate_context_output(context)
def get_stack_values(self):
"""
Get stack information for current context.
"""
response = None
if is_connected():
try:
with S.PROTOCOL as protocol:
# Get stack information
protocol.send("callstack")
protocol.send(self.current_thread)
response = protocol.read()
except ProtocolConnectionException as e:
self.timeout(lambda: connection_error(e))
#response should be something like: {"1": {"name": <name of thing>, "namewhat": (global|local|method|field|''), "what": (Lua, C, main), "source": @<filename>, "line": line in file}}
return response
def evaluate_expression(self, expression, thread, stack_level):
try:
with S.PROTOCOL as protocol:
protocol.send('evaluate')
protocol.send('=' + expression) #TODO: should we always add '='?
protocol.send(thread)
protocol.send(stack_level)
response = protocol.read()
except ProtocolConnectionException as e:
self.timeout(lambda: connection_error(e))
return response
def get_watch_values(self, thread, stack_level):
"""
Evaluate all watch expressions in current context.
"""
for index, item in enumerate(S.WATCH):
# Reset value for watch expression
S.WATCH[index]['value'] = None
# Evaluate watch expression when connected to debugger engine
if is_connected():
if item['enabled']:
watch_value = self.evaluate_expression(item['expression'], thread, stack_level)
S.WATCH[index]['value'] = self.transform_grld_eval_response(watch_value)
def init(self):
if not is_connected():
return
with S.PROTOCOL as protocol:
protocol.register_command_cb('break', (lambda filename, line: self.handle_break_command(filename, line)))
protocol.register_command_cb('synchronize', (lambda: self.handle_synchronize_command()))
# Connection initialization
client_name = protocol.read()
# # Set breakpoints for files
for filename, breakpoint_data in S.BREAKPOINT.items():
if breakpoint_data:
for lineno, bp in breakpoint_data.items():
if bp['enabled']:
# TODO: support conditional bps
active = True #bp['expression']
self.set_breakpoint(filename, lineno, active)
debug('breakpoint_set: ' + filename + ':' + lineno)
if get_value(S.KEY_BREAK_ON_START):
protocol.send("break", "running")
update_socket_loop()
#self.timeout(lambda: render_regions())
def remove_breakpoint(self, filename, lineno):
if not is_connected():
return
self.set_breakpoint(filename, lineno, False)
def set_breakpoint(self, filename, lineno, active=True):
if not filename or not lineno or not is_connected():
return
with S.PROTOCOL as protocol:
# Get path of file on server
fileuri = get_real_path(filename, True)
# Set breakpoint
protocol.send("setbreakpoint", "running")
protocol.send({"source": fileuri, "line": int(lineno), "value": active}, "running")
#self.timeout(lambda: render_regions())
def set_exception(self, exception):
if not is_connected():
return
with S.PROTOCOL as protocol:
protocol.send(dbgp.BREAKPOINT_SET, t='exception', x='"%s"' % exception)
response = protocol.read()
def status(self):
raise NotImplementedError('status is not implemented yet')
#if not is_connected():
# return
#with S.PROTOCOL as protocol:
# # Send 'status' command to debugger engine
# protocol.send(dbgp.STATUS)
# response = protocol.read()
## Show response in status bar
#self.status_message("GRLD status: " + response.get(dbgp.ATTRIBUTE_REASON) + ' - ' + response.get(dbgp.ATTRIBUTE_STATUS))
def user_execute(self, command, args=None):
if not command or not is_connected():
return
with S.PROTOCOL as protocol:
# Send command to debugger engine
protocol.send(command, args)
response = protocol.read(return_string=True)
# Show response data in output panel
self.timeout(lambda: show_panel_content(response))
def watch_expression(self, thread=None, stack_level=None):
if not thread:
thread = self.current_thread
if not stack_level:
stack_level = self.current_stack_level
# Evaluate watch expressions
self.get_watch_values(thread, stack_level)
# Show watch expression
self.timeout(lambda: self._watch_expression(self.get_option('check_watch_view', False)))
def _watch_expression(self, check_watch_view):
# Do not show if we only want to show content when Watch view is not available
if check_watch_view and not has_debug_view(TITLE_WINDOW_WATCH):
return
show_content(DATA_WATCH)
#self.timeout(lambda: render_regions())
def set_current_stack_level(self, stack_level):
self.current_stack_level = stack_level
def set_selected_thread(self, selected_thread):
self.selected_thread = selected_thread
def get_all_coroutines(self):
with S.PROTOCOL as protocol:
protocol.send('coroutines')
response = protocol.read()
count = len(response.keys())
response[count + 1] = {'id': 'main'} # always include a 'main' coroutine. This is how GRLD references the main Lua thread.
return response
def update_current_thread(self, coroutines_dict):
with S.PROTOCOL as protocol:
protocol.send('currentthread')
current_thread = protocol.read()
# GRLD only passes back co-routines that are NOT 'main'. So, if current_thread is not in the list, then 'main' is the current thread.
coroutine_ids = (coroutine_descriptor['id'] for coroutine_descriptor in coroutines_dict.values())
if current_thread not in coroutine_ids:
current_thread = 'main'
self.current_thread = current_thread
def update_contextual_data(self, thread, stack_level):
# Context variables
context = self.get_context_values(thread, stack_level)
self.timeout(lambda: show_content(DATA_CONTEXT, context))
# Watch expressions
self.watch_expression(thread, stack_level)
# Render breakpoint markers
#self.timeout(lambda: render_regions())
def handle_break_command(self, filename, line):
S.SESSION_BUSY = True
filename = get_real_path(filename)
# Show debug/status output
self.status_message('GRLD: Break')
info('Break: ' + filename )
# Store line number of breakpoint for displaying region marker
S.BREAKPOINT_ROW = { 'filename': filename, 'lineno': str(line) }
# Focus/Open file window view
self.timeout(lambda: show_file(filename, line))
coroutines_dict = self.get_all_coroutines()
self.update_current_thread(coroutines_dict)
coroutines_str = generate_coroutines_output(coroutines_dict, self.current_thread)
self.timeout(lambda: show_content(DATA_COROUTINES, coroutines_str))
# Stack history
stack = self.get_stack_values()
if stack:
stack_levels = [int(level) for level in stack.keys()]
min_stack_level = min(stack_levels)
self.current_stack_level = min_stack_level
stack_str = generate_stack_output(stack)
else:
stack_str = H.unicode_string('[{level}] {filename}.{where}:{lineno}\n' \
.format(level=0, where='{main}', lineno=1, filename=fileuri))
self.timeout(lambda: show_content(DATA_STACK, stack_str))
self.update_contextual_data(self.current_thread, self.current_stack_level)
#self.timeout(lambda: render_regions())
S.SESSION_BUSY = False
def handle_synchronize_command(self):
S.SESSION_BUSY = True
with S.PROTOCOL as protocol:
# synchronize expects us to return the # of active breakpoints (this is actually not implemented, we MUST return 0 here)
protocol.send(0)
# next we need to send the "breakOnConnection" value, this is configurable, but we'll just always return false for now
protocol.send(False)
S.SESSION_BUSY = False
class SessionException(Exception):
pass |
zhangjiajie/tax_benchmark | refs/heads/master | script/ete2/_ph.py | 2 | # -*- coding: utf-8 -*-
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://ete.cgenomics.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2011).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit are available in the documentation.
#
# More info at http://ete.cgenomics.org
#
#
# #END_LICENSE#############################################################
__VERSION__="ete2-2.2rev1026"
#START_LICENSE###########################################################
#
#
#
# #END_LICENSE#############################################################
''' I use this module to check for newer versions of ETE '''
import urllib2 as url
try:
from ete2 import __ETEID__
except ImportError:
__ETEID__ = "Unknown"
try:
from ete2 import __VERSION__
except ImportError:
__VERSION__ = "Unknown"
def call():
print " == Calling home...",
try:
f = url.urlopen('http://ete.cgenomics.org/et_phone_home.php?VERSION=%s&ID=%s'
%(__VERSION__, __ETEID__))
except:
print "No answer :("
else:
print "Got answer!"
print f.read()
module_name = __name__.split(".")[0]
try:
f = url.urlopen('http://ete.cgenomics.org/releases/ete2/%s.latest'
%module_name)
except:
latest = None
else:
latest = int(f.read())
try:
current = int(__VERSION__.split("rev")[1])
except (IndexError, ValueError):
current = None
if not latest:
print "I could not find data about your version [%s]" %module_name
print "Are you ok?"
elif not current:
print "I could not determine your version [%s]" %module_name
print "Are you ok?"
print "Latest stable ETE version is", latest
elif latest > current:
print "You look a bit old."
print "A newer version is available: rev%s" %latest
print "Use 'easy_install -U %s' to upgrade" %module_name
else:
print "I see you are in shape."
print "No updates are available."
try:
msg = raw_input("\n == Do you want to leave any message?\n(Press enter to finish)\n\n").strip()
except KeyboardInterrupt:
msg = None
if msg:
msg = url.quote(msg)
try:
f = url.urlopen('http://ete.cgenomics.org/et_phone_home.php?VERSION=%s&ID=%s&MSG=%s'
%(__VERSION__, __ETEID__, msg))
except:
print "Message could be delivered :("
else:
print f.read()
def new_version(module_name=None, current=None):
if not module_name:
module_name = __name__.split(".")[0]
try:
f = url.urlopen('http://ete.cgenomics.org/releases/ete2/%s.latest'
%module_name)
except:
latest = None
else:
latest = int(f.read())
news_url = 'http://ete.cgenomics.org/releases/ete2/%s.latest_news' %module_name
msg = read_content(news_url)
if not current:
try:
current = int(__VERSION__.split("rev")[1])
except (IndexError, ValueError):
current = None
return current, latest, msg
def read_content(address):
try:
f = url.urlopen(address)
except:
return None
else:
return f.read()
|
etherkit/OpenBeacon2 | refs/heads/master | client/linux-x86/venv/lib/python3.8/site-packages/attr/__init__.py | 3 | from __future__ import absolute_import, division, print_function
import sys
from functools import partial
from . import converters, exceptions, filters, setters, validators
from ._config import get_run_validators, set_run_validators
from ._funcs import asdict, assoc, astuple, evolve, has, resolve_types
from ._make import (
NOTHING,
Attribute,
Factory,
attrib,
attrs,
fields,
fields_dict,
make_class,
validate,
)
from ._version_info import VersionInfo
__version__ = "20.1.0"
__version_info__ = VersionInfo._from_version_string(__version__)
__title__ = "attrs"
__description__ = "Classes Without Boilerplate"
__url__ = "https://www.attrs.org/"
__uri__ = __url__
__doc__ = __description__ + " <" + __uri__ + ">"
__author__ = "Hynek Schlawack"
__email__ = "[email protected]"
__license__ = "MIT"
__copyright__ = "Copyright (c) 2015 Hynek Schlawack"
s = attributes = attrs
ib = attr = attrib
dataclass = partial(attrs, auto_attribs=True) # happy Easter ;)
__all__ = [
"Attribute",
"Factory",
"NOTHING",
"asdict",
"assoc",
"astuple",
"attr",
"attrib",
"attributes",
"attrs",
"converters",
"evolve",
"exceptions",
"fields",
"fields_dict",
"filters",
"get_run_validators",
"has",
"ib",
"make_class",
"resolve_types",
"s",
"set_run_validators",
"setters",
"validate",
"validators",
]
if sys.version_info[:2] >= (3, 6):
from ._next_gen import define, field, frozen, mutable
__all__.extend((define, field, frozen, mutable))
|
arborh/tensorflow | refs/heads/master | tensorflow/python/tpu/tpu_config.py | 27 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stub file to maintain backwards compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,unused-import
from tensorflow_estimator.python.estimator.tpu.tpu_config import *
# pylint: enable=wildcard-import,unused-import
|
mantidproject/mantid | refs/heads/master | Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/CorelliPowderCalibrationCreateTest.py | 3 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import numpy as np
from numpy.testing import assert_allclose
import unittest
from mantid.api import mtd
from mantid.simpleapi import (
CloneWorkspace, CompareWorkspaces, CorelliPowderCalibrationCreate, ConvertUnits, CreateSampleWorkspace,
DeleteWorkspaces, MoveInstrumentComponent, Rebin, RotateInstrumentComponent)
class CorelliPowderCalibrationCreateTest(unittest.TestCase):
def setUp(self) -> None:
r"""Fixture runs at the beginning of every test method"""
spacings_reference = [0.9179, 0.9600, 1.0451, 1.2458, 1.3576, 1.5677, 1.6374, 3.1353] # silicon
# add one Gaussian peak for every reference d-spacing
peak_functions = list()
for spacing in spacings_reference:
peak_function = f'name=Gaussian, PeakCentre={spacing}, Height={10 * np.sqrt(spacing)}, Sigma={0.003 * spacing}'
peak_functions.append(peak_function)
function = ';'.join(peak_functions)
begin, end, bin_width = spacings_reference[0] - 0.5, spacings_reference[-1] + 0.5, 0.0001
# Single 10x10 rectangular detector, located 4m downstream the sample along the X-axis
# Each detector has the same histogram of intensities, showing eight Gaussian peaks with centers at the
# reference d-spacings
CreateSampleWorkspace(WorkspaceType='Histogram', Function='User Defined', UserDefinedFunction=function,
XUnit='dSpacing', XMin=begin, XMax=end, BinWidth=bin_width,
NumBanks=1, PixelSpacing=0.02, SourceDistanceFromSample=10.0, BankDistanceFromSample=4.0,
OutputWorkspace='test_workspace_dSpacing')
RotateInstrumentComponent(Workspace='test_workspace_dSpacing', ComponentName='bank1', X=0., Y=1., z=0.,
Angle=90, RelativeRotation=True)
MoveInstrumentComponent(Workspace='test_workspace_dSpacing', ComponentName='bank1', X=4.0, y=0.0, z=0.0,
RelativePosition=False)
# Eight peaks now in TOF. Only when the instrument is located 4m downstream along the X-axis will we obtain
# the correct d-Spacings if we convert back to dSpacings units. If we perturb the instrument and convert
# back to dSpacing units, we'll obtain eight peaks centered at d-spacings sligthly different than the
# reference values
ConvertUnits(InputWorkspace='test_workspace_dSpacing', Target='TOF', EMode='Elastic',
OutputWorkspace='test_workspace_TOF')
Rebin(InputWorkspace='test_workspace_TOF', Params=[300, 1.0, 16666.7], OutputWorkspace='test_workspace_TOF')
ConvertUnits(InputWorkspace='test_workspace_TOF', Target='dSpacing', EMode='Elastic',
OutputWorkspace='test_workspace_dSpacing')
self.spacings_reference = spacings_reference
def tearDown(self) -> None:
r"""Fixture runs at the end of every test method"""
DeleteWorkspaces(['test_workspace_dSpacing', 'test_workspace_TOF'])
def spacings_recovered(self, input_workspace, calibrate=True):
r"""Compare the input_workspace to the reference workspace 'test_workspace_dSpacing' after being
converted to TOF units. If calibrate=True, calibrate first and then convert units"""
if calibrate:
CorelliPowderCalibrationCreate(
InputWorkspace='perturbed', OutputWorkspacesPrefix='cal_',
TofBinning=[300, 1.0, 16666.7], PeakPositions=self.spacings_reference, SourceToSampleDistance=10.0,
ComponentList='bank1', FixY=False, ComponentMaxTranslation=0.03, FixYaw=False, ComponentMaxRotation=6,
Minimizer='differential_evolution', MaxIterations=10)
ConvertUnits(InputWorkspace='perturbed', Target='dSpacing', EMode='Elastic', OutputWorkspace='perturbed_dS')
results = CompareWorkspaces(Workspace1='test_workspace_dSpacing', Workspace2='perturbed_dS', Tolerance=0.001,
CheckInstrument=False)
DeleteWorkspaces(['perturbed_dS'])
return results.Result
def test_exceptions(self):
# Both FixSource=True, AdjustSource=True can't be True
try:
CorelliPowderCalibrationCreate(
InputWorkspace='test_workspace_TOF', OutputWorkspacesPrefix='cal_',
TofBinning=[300, 1.0, 16666.7], PeakPositions=self.spacings_reference, FixSource=True,
AdjustSource=True, ComponentList='bank1', FixY=False, ComponentMaxTranslation=0.2,
FixYaw=False, ComponentMaxRotation=10)
except RuntimeError as error:
assert 'Some invalid Properties found' in str(error)
# Both FixSource=True, AdjustSource=True can't be False
try:
CorelliPowderCalibrationCreate(
InputWorkspace='test_workspace_TOF', OutputWorkspacesPrefix='cal_',
TofBinning=[300, 1.0, 16666.7], PeakPositions=self.spacings_reference, FixSource=False,
AdjustSource=False, ComponentList='bank1', FixY=False, ComponentMaxTranslation=0.2,
FixYaw=False, ComponentMaxRotation=10)
except RuntimeError as error:
assert 'Some invalid Properties found' in str(error)
@unittest.skip("causes surpassing the timeout in the Jenkins servers")
def test_translation(self):
CloneWorkspace(InputWorkspace='test_workspace_TOF', OutputWorkspace='perturbed')
CloneWorkspace(InputWorkspace='test_workspace_TOF', OutputWorkspace='perturbed')
MoveInstrumentComponent(Workspace='perturbed', ComponentName='bank1', X=0.02, y=0.005, z=0.005,
RelativePosition=True)
assert self.spacings_recovered('perturbed', calibrate=False) is False
assert self.spacings_recovered('perturbed', calibrate=True)
DeleteWorkspaces(['perturbed'])
@unittest.skip("causes surpassing the timeout in the Jenkins servers")
def test_rotation(self):
CloneWorkspace(InputWorkspace='test_workspace_TOF', OutputWorkspace='perturbed')
RotateInstrumentComponent(Workspace='perturbed', ComponentName='bank1', X=0, Y=0, z=1, Angle=5,
RelativeRotation=True)
assert self.spacings_recovered('perturbed', calibrate=False) is False
assert self.spacings_recovered('perturbed', calibrate=True)
DeleteWorkspaces(['perturbed'])
def test_translation_rotation(self):
CloneWorkspace(InputWorkspace='test_workspace_TOF', OutputWorkspace='perturbed')
MoveInstrumentComponent(Workspace='perturbed', ComponentName='bank1', X=0.02, y=0.005, z=0.005,
RelativePosition=True)
RotateInstrumentComponent(Workspace='perturbed', ComponentName='bank1', X=0, Y=0, z=1, Angle=5,
RelativeRotation=True)
assert self.spacings_recovered('perturbed', calibrate=False) is False
assert self.spacings_recovered('perturbed', calibrate=True)
DeleteWorkspaces(['perturbed'])
def test_fix_y(self) -> None:
CloneWorkspace(InputWorkspace='test_workspace_TOF', OutputWorkspace='perturbed')
y = -0.0042 # desired fixed position
MoveInstrumentComponent(Workspace='perturbed', ComponentName='bank1', X=0, y=y, z=0,
RelativePosition=False)
r"""Pass option FixY=True"""
CorelliPowderCalibrationCreate(
InputWorkspace='perturbed', OutputWorkspacesPrefix='cal_',
TofBinning=[300, 1.0, 16666.7], PeakPositions=self.spacings_reference, SourceToSampleDistance=10.0,
ComponentList='bank1', FixY=True, ComponentMaxTranslation=0.2, ComponentMaxRotation=10,
Minimizer='L-BFGS-B')
# Check Y-position of first bank hasn't changed
row = mtd['cal_adjustments'].row(1)
self.assertAlmostEquals(row['Yposition'], y, places=5)
DeleteWorkspaces(['perturbed'])
def test_fix_yaw(self) -> None:
CloneWorkspace(InputWorkspace='test_workspace_TOF', OutputWorkspace='perturbed')
RotateInstrumentComponent(Workspace='perturbed', ComponentName='bank1', X=0, Y=0, z=1, Angle=5,
RelativeRotation=True)
r"""Pass option FixYaw=True"""
CorelliPowderCalibrationCreate(
InputWorkspace='perturbed', OutputWorkspacesPrefix='cal_',
TofBinning=[300, 1.0, 16666.7], PeakPositions=self.spacings_reference, SourceToSampleDistance=10.0,
ComponentList='bank1', ComponentMaxTranslation=0.2, FixYaw=True, ComponentMaxRotation=10,
Minimizer='L-BFGS-B')
# Check no change in the rotations around Z-axis of first bank
row = mtd['cal_displacements'].row(0)
self.assertAlmostEquals(row['DeltaGamma'], 0.0, places=5)
DeleteWorkspaces(['perturbed'])
if __name__ == '__main__':
unittest.main()
|
JasonGross/mozjs | refs/heads/master | python/mozbuild/mozpack/copier.py | 2 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import stat
from mozpack.errors import errors
from mozpack.files import (
BaseFile,
Dest,
)
import mozpack.path
import errno
from collections import (
Counter,
OrderedDict,
)
class FileRegistry(object):
'''
Generic container to keep track of a set of BaseFile instances. It
preserves the order under which the files are added, but doesn't keep
track of empty directories (directories are not stored at all).
The paths associated with the BaseFile instances are relative to an
unspecified (virtual) root directory.
registry = FileRegistry()
registry.add('foo/bar', file_instance)
'''
def __init__(self):
self._files = OrderedDict()
self._required_directories = Counter()
def _partial_paths(self, path):
'''
Turn "foo/bar/baz/zot" into ["foo/bar/baz", "foo/bar", "foo"].
'''
partial_paths = []
partial_path = path
while partial_path:
partial_path = mozpack.path.dirname(partial_path)
if partial_path:
partial_paths.append(partial_path)
return partial_paths
def add(self, path, content):
'''
Add a BaseFile instance to the container, under the given path.
'''
assert isinstance(content, BaseFile)
if path in self._files:
return errors.error("%s already added" % path)
if self._required_directories[path] > 0:
return errors.error("Can't add %s: it is a required directory" %
path)
# Check whether any parent of the given path is already stored
partial_paths = self._partial_paths(path)
for partial_path in partial_paths:
if partial_path in self._files:
return errors.error("Can't add %s: %s is a file" %
(path, partial_path))
self._files[path] = content
self._required_directories.update(partial_paths)
def match(self, pattern):
'''
Return the list of paths, stored in the container, matching the
given pattern. See the mozpack.path.match documentation for a
description of the handled patterns.
'''
if '*' in pattern:
return [p for p in self.paths()
if mozpack.path.match(p, pattern)]
if pattern == '':
return self.paths()
if pattern in self._files:
return [pattern]
return [p for p in self.paths()
if mozpack.path.basedir(p, [pattern]) == pattern]
def remove(self, pattern):
'''
Remove paths matching the given pattern from the container. See the
mozpack.path.match documentation for a description of the handled
patterns.
'''
items = self.match(pattern)
if not items:
return errors.error("Can't remove %s: %s" % (pattern,
"not matching anything previously added"))
for i in items:
del self._files[i]
self._required_directories.subtract(self._partial_paths(i))
def paths(self):
'''
Return all paths stored in the container, in the order they were added.
'''
return self._files.keys()
def __len__(self):
'''
Return number of paths stored in the container.
'''
return len(self._files)
def __contains__(self, pattern):
raise RuntimeError("'in' operator forbidden for %s. Use contains()." %
self.__class__.__name__)
def contains(self, pattern):
'''
Return whether the container contains paths matching the given
pattern. See the mozpack.path.match documentation for a description of
the handled patterns.
'''
return len(self.match(pattern)) > 0
def __getitem__(self, path):
'''
Return the BaseFile instance stored in the container for the given
path.
'''
return self._files[path]
def __iter__(self):
'''
Iterate over all (path, BaseFile instance) pairs from the container.
for path, file in registry:
(...)
'''
return self._files.iteritems()
def required_directories(self):
'''
Return the set of directories required by the paths in the container,
in no particular order. The returned directories are relative to an
unspecified (virtual) root directory (and do not include said root
directory).
'''
return set(k for k, v in self._required_directories.items() if v > 0)
class FileCopyResult(object):
"""Represents results of a FileCopier.copy operation."""
def __init__(self):
self.updated_files = set()
self.existing_files = set()
self.removed_files = set()
self.removed_directories = set()
@property
def updated_files_count(self):
return len(self.updated_files)
@property
def existing_files_count(self):
return len(self.existing_files)
@property
def removed_files_count(self):
return len(self.removed_files)
@property
def removed_directories_count(self):
return len(self.removed_directories)
class FileCopier(FileRegistry):
'''
FileRegistry with the ability to copy the registered files to a separate
directory.
'''
def copy(self, destination, skip_if_older=True,
remove_unaccounted=True,
remove_all_directory_symlinks=True,
remove_empty_directories=True):
'''
Copy all registered files to the given destination path. The given
destination can be an existing directory, or not exist at all. It
can't be e.g. a file.
The copy process acts a bit like rsync: files are not copied when they
don't need to (see mozpack.files for details on file.copy).
By default, files in the destination directory that aren't
registered are removed and empty directories are deleted. In
addition, all directory symlinks in the destination directory
are deleted: this is a conservative approach to ensure that we
never accidently write files into a directory that is not the
destination directory. In the worst case, we might have a
directory symlink in the object directory to the source
directory.
To disable removing of unregistered files, pass
remove_unaccounted=False. To disable removing empty
directories, pass remove_empty_directories=False. In rare
cases, you might want to maintain directory symlinks in the
destination directory (at least those that are not required to
be regular directories): pass
remove_all_directory_symlinks=False. Exercise caution with
this flag: you almost certainly do not want to preserve
directory symlinks.
Returns a FileCopyResult that details what changed.
'''
assert isinstance(destination, basestring)
assert not os.path.exists(destination) or os.path.isdir(destination)
result = FileCopyResult()
have_symlinks = hasattr(os, 'symlink')
destination = os.path.normpath(destination)
# We create the destination directory specially. We can't do this as
# part of the loop doing mkdir() below because that loop munges
# symlinks and permissions and parent directories of the destination
# directory may have their own weird schema. The contract is we only
# manage children of destination, not its parents.
try:
os.makedirs(destination)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Because we could be handling thousands of files, code in this
# function is optimized to minimize system calls. We prefer CPU time
# in Python over possibly I/O bound filesystem calls to stat() and
# friends.
required_dirs = set([destination])
dest_files = set()
for p, f in self:
dest_files.add(os.path.normpath(os.path.join(destination, p)))
required_dirs |= set(os.path.normpath(os.path.join(destination, d))
for d in self.required_directories())
# Ensure destination directories are in place and proper.
#
# The "proper" bit is important. We need to ensure that directories
# have appropriate permissions or we will be unable to discover
# and write files. Furthermore, we need to verify directories aren't
# symlinks.
#
# Symlinked directories (a symlink whose target is a directory) are
# incompatible with us because our manifest talks in terms of files,
# not directories. If we leave symlinked directories unchecked, we
# would blindly follow symlinks and this might confuse file
# installation. For example, if an existing directory is a symlink
# to directory X and we attempt to install a symlink in this directory
# to a file in directory X, we may create a recursive symlink!
for d in sorted(required_dirs, key=len):
try:
os.mkdir(d)
except OSError as error:
if error.errno != errno.EEXIST:
raise
# We allow the destination to be a symlink because the caller
# is responsible for managing the destination and we assume
# they know what they are doing.
if have_symlinks and d != destination:
st = os.lstat(d)
if stat.S_ISLNK(st.st_mode):
# While we have remove_unaccounted, it doesn't apply
# to directory symlinks because if it did, our behavior
# could be very wrong.
os.remove(d)
os.mkdir(d)
if not os.access(d, os.W_OK):
umask = os.umask(0077)
os.umask(umask)
os.chmod(d, 0777 & ~umask)
# While we have remove_unaccounted, it doesn't apply to empty
# directories because it wouldn't make sense: an empty directory
# is empty, so removing it should have no effect.
existing_dirs = set()
existing_files = set()
for root, dirs, files in os.walk(destination):
# We need to perform the same symlink detection as above. os.walk()
# doesn't follow symlinks into directories by default, so we need
# to check dirs (we can't wait for root).
if have_symlinks:
filtered = []
for d in dirs:
full = os.path.join(root, d)
st = os.lstat(full)
if stat.S_ISLNK(st.st_mode):
# This directory symlink is not a required
# directory: any such symlink would have been
# removed and a directory created above.
if remove_all_directory_symlinks:
os.remove(full)
result.removed_files.add(os.path.normpath(full))
else:
existing_files.add(os.path.normpath(full))
else:
filtered.append(d)
dirs[:] = filtered
existing_dirs.add(os.path.normpath(root))
for d in dirs:
existing_dirs.add(os.path.normpath(os.path.join(root, d)))
for f in files:
existing_files.add(os.path.normpath(os.path.join(root, f)))
# Now we reconcile the state of the world against what we want.
# Remove files no longer accounted for.
if remove_unaccounted:
for f in existing_files - dest_files:
# Windows requires write access to remove files.
if os.name == 'nt' and not os.access(f, os.W_OK):
# It doesn't matter what we set permissions to since we
# will remove this file shortly.
os.chmod(f, 0600)
os.remove(f)
result.removed_files.add(f)
# Install files.
for p, f in self:
destfile = os.path.normpath(os.path.join(destination, p))
if f.copy(destfile, skip_if_older):
result.updated_files.add(destfile)
else:
result.existing_files.add(destfile)
if not remove_empty_directories:
return result
# Figure out which directories can be removed. This is complicated
# by the fact we optionally remove existing files. This would be easy
# if we walked the directory tree after installing files. But, we're
# trying to minimize system calls.
# Start with the ideal set.
remove_dirs = existing_dirs - required_dirs
# Then don't remove directories if we didn't remove unaccounted files
# and one of those files exists.
if not remove_unaccounted:
for f in existing_files:
parent = f
previous = ''
parents = set()
while True:
parent = os.path.dirname(parent)
parents.add(parent)
if previous == parent:
break
previous = parent
remove_dirs -= parents
# Remove empty directories that aren't required.
for d in sorted(remove_dirs, key=len, reverse=True):
# Permissions may not allow deletion. So ensure write access is
# in place before attempting delete.
os.chmod(d, 0700)
os.rmdir(d)
result.removed_directories.add(d)
return result
class FilePurger(FileCopier):
"""A variation of FileCopier that is used to purge untracked files.
Callers create an instance then call .add() to register files/paths that
should exist. Once the canonical set of files that may exist is defined,
.purge() is called against a target directory. All files and empty
directories in the target directory that aren't in the registry will be
deleted.
"""
class FakeFile(BaseFile):
def copy(self, dest, skip_if_older=True):
return True
def add(self, path):
"""Record that a path should exist.
We currently do not track what kind of entity should be behind that
path. We presumably could add type tracking later and have purging
delete entities if there is a type mismatch.
"""
return FileCopier.add(self, path, FilePurger.FakeFile())
def purge(self, dest):
"""Deletes all files and empty directories not in the registry."""
return FileCopier.copy(self, dest)
def copy(self, *args, **kwargs):
raise Exception('copy() disabled on FilePurger. Use purge().')
class Jarrer(FileRegistry, BaseFile):
'''
FileRegistry with the ability to copy and pack the registered files as a
jar file. Also acts as a BaseFile instance, to be copied with a FileCopier.
'''
def __init__(self, compress=True, optimize=True):
'''
Create a Jarrer instance. See mozpack.mozjar.JarWriter documentation
for details on the compress and optimize arguments.
'''
self.compress = compress
self.optimize = optimize
self._preload = []
FileRegistry.__init__(self)
def copy(self, dest, skip_if_older=True):
'''
Pack all registered files in the given destination jar. The given
destination jar may be a path to jar file, or a Dest instance for
a jar file.
If the destination jar file exists, its (compressed) contents are used
instead of the registered BaseFile instances when appropriate.
'''
class DeflaterDest(Dest):
'''
Dest-like class, reading from a file-like object initially, but
switching to a Deflater object if written to.
dest = DeflaterDest(original_file)
dest.read() # Reads original_file
dest.write(data) # Creates a Deflater and write data there
dest.read() # Re-opens the Deflater and reads from it
'''
def __init__(self, orig=None, compress=True):
self.mode = None
self.deflater = orig
self.compress = compress
def read(self, length=-1):
if self.mode != 'r':
assert self.mode is None
self.mode = 'r'
return self.deflater.read(length)
def write(self, data):
if self.mode != 'w':
from mozpack.mozjar import Deflater
self.deflater = Deflater(self.compress)
self.mode = 'w'
self.deflater.write(data)
def exists(self):
return self.deflater is not None
if isinstance(dest, basestring):
dest = Dest(dest)
assert isinstance(dest, Dest)
from mozpack.mozjar import JarWriter, JarReader
try:
old_jar = JarReader(fileobj=dest)
except Exception:
old_jar = []
old_contents = dict([(f.filename, f) for f in old_jar])
with JarWriter(fileobj=dest, compress=self.compress,
optimize=self.optimize) as jar:
for path, file in self:
if path in old_contents:
deflater = DeflaterDest(old_contents[path], self.compress)
else:
deflater = DeflaterDest(compress=self.compress)
file.copy(deflater, skip_if_older)
jar.add(path, deflater.deflater, mode=file.mode)
if self._preload:
jar.preload(self._preload)
def open(self):
raise RuntimeError('unsupported')
def preload(self, paths):
'''
Add the given set of paths to the list of preloaded files. See
mozpack.mozjar.JarWriter documentation for details on jar preloading.
'''
self._preload.extend(paths)
|
fvant/ansible-modules-core | refs/heads/devel | cloud/openstack/quantum_router_gateway.py | 99 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
HAVE_DEPS = False
DOCUMENTATION = '''
---
module: quantum_router_gateway
version_added: "1.2"
author: "Benno Joy (@bennojoy)"
short_description: set/unset a gateway interface for the router with the specified external network
description:
- Creates/Removes a gateway interface from the router, used to associate a external network with a router to route external traffic.
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
auth_url:
description:
- The keystone URL for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
router_name:
description:
- Name of the router to which the gateway should be attached.
required: true
default: None
network_name:
description:
- Name of the external network which should be attached to the router.
required: true
default: None
requirements:
- "python >= 2.6"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
# Attach an external network with a router to allow flow of external traffic
- quantum_router_gateway: state=present login_username=admin login_password=admin
login_tenant_name=admin router_name=external_router
network_name=external_network
'''
_os_keystone = None
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception, e:
module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception, e:
module.fail_json(msg = "Error getting network endpoint: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception, e:
module.fail_json(msg = "Error in connecting to neutron: %s " % e.message)
return neutron
def _get_router_id(module, neutron):
kwargs = {
'name': module.params['router_name'],
}
try:
routers = neutron.list_routers(**kwargs)
except Exception, e:
module.fail_json(msg = "Error in getting the router list: %s " % e.message)
if not routers['routers']:
return None
return routers['routers'][0]['id']
def _get_net_id(neutron, module):
kwargs = {
'name': module.params['network_name'],
'router:external': True
}
try:
networks = neutron.list_networks(**kwargs)
except Exception, e:
module.fail_json("Error in listing neutron networks: %s" % e.message)
if not networks['networks']:
return None
return networks['networks'][0]['id']
def _get_port_id(neutron, module, router_id, network_id):
kwargs = {
'device_id': router_id,
'network_id': network_id,
}
try:
ports = neutron.list_ports(**kwargs)
except Exception, e:
module.fail_json( msg = "Error in listing ports: %s" % e.message)
if not ports['ports']:
return None
return ports['ports'][0]['id']
def _add_gateway_router(neutron, module, router_id, network_id):
kwargs = {
'network_id': network_id
}
try:
neutron.add_gateway_router(router_id, kwargs)
except Exception, e:
module.fail_json(msg = "Error in adding gateway to router: %s" % e.message)
return True
def _remove_gateway_router(neutron, module, router_id):
try:
neutron.remove_gateway_router(router_id)
except Exception, e:
module.fail_json(msg = "Error in removing gateway to router: %s" % e.message)
return True
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
router_name = dict(required=True),
network_name = dict(required=True),
state = dict(default='present', choices=['absent', 'present']),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required')
neutron = _get_neutron_client(module, module.params)
router_id = _get_router_id(module, neutron)
if not router_id:
module.fail_json(msg="failed to get the router id, please check the router name")
network_id = _get_net_id(neutron, module)
if not network_id:
module.fail_json(msg="failed to get the network id, please check the network name and make sure it is external")
if module.params['state'] == 'present':
port_id = _get_port_id(neutron, module, router_id, network_id)
if not port_id:
_add_gateway_router(neutron, module, router_id, network_id)
module.exit_json(changed=True, result="created")
module.exit_json(changed=False, result="success")
if module.params['state'] == 'absent':
port_id = _get_port_id(neutron, module, router_id, network_id)
if not port_id:
module.exit_json(changed=False, result="Success")
_remove_gateway_router(neutron, module, router_id)
module.exit_json(changed=True, result="Deleted")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
golismero/golismero | refs/heads/master | thirdparty_libs/nltk/inference/prover9.py | 12 | # Natural Language Toolkit: Interface to the Prover9 Theorem Prover
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Dan Garrette <[email protected]>
# Ewan Klein <[email protected]>
#
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
A theorem prover that makes use of the external 'Prover9' package.
"""
import os
import subprocess
import nltk
from nltk.sem.logic import LogicParser, ExistsExpression, AllExpression, \
NegatedExpression, AndExpression, IffExpression, OrExpression, \
EqualityExpression, ImpExpression
from api import BaseProverCommand, Prover
#
# Following is not yet used. Return code for 2 actually realized as 512.
#
p9_return_codes = {
0: True,
1: "(FATAL)", #A fatal error occurred (user's syntax error).
2: False, # (SOS_EMPTY) Prover9 ran out of things to do
# (sos list exhausted).
3: "(MAX_MEGS)", # The max_megs (memory limit) parameter was exceeded.
4: "(MAX_SECONDS)", # The max_seconds parameter was exceeded.
5: "(MAX_GIVEN)", # The max_given parameter was exceeded.
6: "(MAX_KEPT)", # The max_kept parameter was exceeded.
7: "(ACTION)", # A Prover9 action terminated the search.
101: "(SIGSEGV)", # Prover9 crashed, most probably due to a bug.
}
class Prover9CommandParent(object):
"""
A common base class used by both ``Prover9Command`` and ``MaceCommand``,
which is responsible for maintaining a goal and a set of assumptions,
and generating prover9-style input files from them.
"""
def print_assumptions(self, output_format='nltk'):
"""
Print the list of the current assumptions.
"""
if output_format.lower() == 'nltk':
for a in self.assumptions():
print a
elif output_format.lower() == 'prover9':
for a in convert_to_prover9(self.assumptions()):
print a
else:
raise NameError("Unrecognized value for 'output_format': %s" %
output_format)
class Prover9Command(Prover9CommandParent, BaseProverCommand):
"""
A ``ProverCommand`` specific to the ``Prover9`` prover. It contains
the a print_assumptions() method that is used to print the list
of assumptions in multiple formats.
"""
def __init__(self, goal=None, assumptions=None, timeout=60, prover=None):
"""
:param goal: Input expression to prove
:type goal: sem.Expression
:param assumptions: Input expressions to use as assumptions in
the proof.
:type assumptions: list(sem.Expression)
:param timeout: number of seconds before timeout; set to 0 for
no timeout.
:type timeout: int
:param prover: a prover. If not set, one will be created.
:type prover: Prover9
"""
if not assumptions:
assumptions = []
if prover is not None:
assert isinstance(prover, Prover9)
else:
prover = Prover9(timeout)
BaseProverCommand.__init__(self, prover, goal, assumptions)
def decorate_proof(self, proof_string, simplify=True):
"""
:see BaseProverCommand.decorate_proof()
"""
if simplify:
return self._prover._call_prooftrans(proof_string, ['striplabels'])[0].rstrip()
else:
return proof_string.rstrip()
class Prover9Parent(object):
"""
A common class extended by both ``Prover9`` and ``Mace <mace.Mace>``.
It contains the functionality required to convert NLTK-style
expressions into Prover9-style expressions.
"""
_binary_location = None
def config_prover9(self, binary_location, verbose=False):
if binary_location is None:
self._binary_location = None
self._prover9_bin = None
else:
name = 'prover9'
self._prover9_bin = nltk.internals.find_binary(
name,
path_to_bin=binary_location,
env_vars=['PROVER9HOME'],
url='http://www.cs.unm.edu/~mccune/prover9/',
binary_names=[name, name + '.exe'],
verbose=verbose)
self._binary_location = self._prover9_bin.rsplit(os.path.sep, 1)
def prover9_input(self, goal, assumptions):
"""
:return: The input string that should be provided to the
prover9 binary. This string is formed based on the goal,
assumptions, and timeout value of this object.
"""
s = ''
if assumptions:
s += 'formulas(assumptions).\n'
for p9_assumption in convert_to_prover9(assumptions):
s += ' %s.\n' % p9_assumption
s += 'end_of_list.\n\n'
if goal:
s += 'formulas(goals).\n'
s += ' %s.\n' % convert_to_prover9(goal)
s += 'end_of_list.\n\n'
return s
def binary_locations(self):
"""
A list of directories that should be searched for the prover9
executables. This list is used by ``config_prover9`` when searching
for the prover9 executables.
"""
return ['/usr/local/bin/prover9',
'/usr/local/bin/prover9/bin',
'/usr/local/bin',
'/usr/bin',
'/usr/local/prover9',
'/usr/local/share/prover9']
def _find_binary(self, name, verbose=False):
binary_locations = self.binary_locations()
if self._binary_location is not None:
binary_locations += [self._binary_location]
return nltk.internals.find_binary(name,
searchpath=binary_locations,
env_vars=['PROVER9HOME'],
url='http://www.cs.unm.edu/~mccune/prover9/',
binary_names=[name, name + '.exe'],
verbose=verbose)
def _call(self, input_str, binary, args=[], verbose=False):
"""
Call the binary with the given input.
:param input_str: A string whose contents are used as stdin.
:param binary: The location of the binary to call
:param args: A list of command-line arguments.
:return: A tuple (stdout, returncode)
:see: ``config_prover9``
"""
if verbose:
print 'Calling:', binary
print 'Args:', args
print 'Input:\n', input_str, '\n'
# Call prover9 via a subprocess
cmd = [binary] + args
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate(input_str)
if verbose:
print 'Return code:', p.returncode
if stdout: print 'stdout:\n', stdout, '\n'
if stderr: print 'stderr:\n', stderr, '\n'
return (stdout, p.returncode)
def convert_to_prover9(input):
"""
Convert a ``logic.Expression`` to Prover9 format.
"""
if isinstance(input, list):
result = []
for s in input:
try:
result.append(_convert_to_prover9(s.simplify()))
except:
print 'input %s cannot be converted to Prover9 input syntax' % input
raise
return result
else:
try:
return _convert_to_prover9(input.simplify())
except:
print 'input %s cannot be converted to Prover9 input syntax' % input
raise
def _convert_to_prover9(expression):
"""
Convert ``logic.Expression`` to Prover9 formatted string.
"""
if isinstance(expression, ExistsExpression):
return 'exists ' + str(expression.variable) + ' ' + _convert_to_prover9(expression.term)
elif isinstance(expression, AllExpression):
return 'all ' + str(expression.variable) + ' ' + _convert_to_prover9(expression.term)
elif isinstance(expression, NegatedExpression):
return '-(' + _convert_to_prover9(expression.term) + ')'
elif isinstance(expression, AndExpression):
return '(' + _convert_to_prover9(expression.first) + ' & ' + \
_convert_to_prover9(expression.second) + ')'
elif isinstance(expression, OrExpression):
return '(' + _convert_to_prover9(expression.first) + ' | ' + \
_convert_to_prover9(expression.second) + ')'
elif isinstance(expression, ImpExpression):
return '(' + _convert_to_prover9(expression.first) + ' -> ' + \
_convert_to_prover9(expression.second) + ')'
elif isinstance(expression, IffExpression):
return '(' + _convert_to_prover9(expression.first) + ' <-> ' + \
_convert_to_prover9(expression.second) + ')'
elif isinstance(expression, EqualityExpression):
return '(' + _convert_to_prover9(expression.first) + ' = ' + \
_convert_to_prover9(expression.second) + ')'
else:
return str(expression)
class Prover9(Prover9Parent, Prover):
_prover9_bin = None
_prooftrans_bin = None
def __init__(self, timeout=60):
self._timeout = timeout
"""The timeout value for prover9. If a proof can not be found
in this amount of time, then prover9 will return false.
(Use 0 for no timeout.)"""
def _prove(self, goal=None, assumptions=None, verbose=False):
"""
Use Prover9 to prove a theorem.
:return: A pair whose first element is a boolean indicating if the
proof was successful (i.e. returns value of 0) and whose second element
is the output of the prover.
"""
if not assumptions:
assumptions = []
stdout, returncode = self._call_prover9(self.prover9_input(goal, assumptions),
verbose=verbose)
return (returncode == 0, stdout)
def prover9_input(self, goal, assumptions):
"""
:see: Prover9Parent.prover9_input
"""
s = 'clear(auto_denials).\n' #only one proof required
return s + Prover9Parent.prover9_input(self, goal, assumptions)
def _call_prover9(self, input_str, args=[], verbose=False):
"""
Call the ``prover9`` binary with the given input.
:param input_str: A string whose contents are used as stdin.
:param args: A list of command-line arguments.
:return: A tuple (stdout, returncode)
:see: ``config_prover9``
"""
if self._prover9_bin is None:
self._prover9_bin = self._find_binary('prover9', verbose)
updated_input_str = ''
if self._timeout > 0:
updated_input_str += 'assign(max_seconds, %d).\n\n' % self._timeout
updated_input_str += input_str
stdout, returncode = self._call(updated_input_str, self._prover9_bin, args, verbose)
if returncode not in [0,2]:
errormsgprefix = '%%ERROR:'
if errormsgprefix in stdout:
msgstart = stdout.index(errormsgprefix)
errormsg = stdout[msgstart:].strip()
else:
errormsg = None
if returncode in [3,4,5,6]:
raise Prover9LimitExceededException(returncode, errormsg)
else:
raise Prover9FatalException(returncode, errormsg)
return stdout, returncode
def _call_prooftrans(self, input_str, args=[], verbose=False):
"""
Call the ``prooftrans`` binary with the given input.
:param input_str: A string whose contents are used as stdin.
:param args: A list of command-line arguments.
:return: A tuple (stdout, returncode)
:see: ``config_prover9``
"""
if self._prooftrans_bin is None:
self._prooftrans_bin = self._find_binary('prooftrans', verbose)
return self._call(input_str, self._prooftrans_bin, args, verbose)
class Prover9Exception(Exception):
def __init__(self, returncode, message):
msg = p9_return_codes[returncode]
if message:
msg += '\n%s' % message
Exception.__init__(self, msg)
class Prover9FatalException(Prover9Exception):
pass
class Prover9LimitExceededException(Prover9Exception):
pass
######################################################################
#{ Tests and Demos
######################################################################
def test_config():
a = LogicParser().parse('(walk(j) & sing(j))')
g = LogicParser().parse('walk(j)')
p = Prover9Command(g, assumptions=[a])
p._executable_path = None
p.prover9_search=[]
p.prove()
#config_prover9('/usr/local/bin')
print p.prove()
print p.proof()
def test_convert_to_prover9(expr):
"""
Test that parsing works OK.
"""
for t in expr:
e = LogicParser().parse(t)
print convert_to_prover9(e)
def test_prove(arguments):
"""
Try some proofs and exhibit the results.
"""
for (goal, assumptions) in arguments:
g = LogicParser().parse(goal)
alist = [LogicParser().parse(a) for a in assumptions]
p = Prover9Command(g, assumptions=alist).prove()
for a in alist:
print ' %s' % a
print '|- %s: %s\n' % (g, p)
arguments = [
('(man(x) <-> (not (not man(x))))', []),
('(not (man(x) & (not man(x))))', []),
('(man(x) | (not man(x)))', []),
('(man(x) & (not man(x)))', []),
('(man(x) -> man(x))', []),
('(not (man(x) & (not man(x))))', []),
('(man(x) | (not man(x)))', []),
('(man(x) -> man(x))', []),
('(man(x) <-> man(x))', []),
('(not (man(x) <-> (not man(x))))', []),
('mortal(Socrates)', ['all x.(man(x) -> mortal(x))', 'man(Socrates)']),
('((all x.(man(x) -> walks(x)) & man(Socrates)) -> some y.walks(y))', []),
('(all x.man(x) -> all x.man(x))', []),
('some x.all y.sees(x,y)', []),
('some e3.(walk(e3) & subj(e3, mary))',
['some e1.(see(e1) & subj(e1, john) & some e2.(pred(e1, e2) & walk(e2) & subj(e2, mary)))']),
('some x e1.(see(e1) & subj(e1, x) & some e2.(pred(e1, e2) & walk(e2) & subj(e2, mary)))',
['some e1.(see(e1) & subj(e1, john) & some e2.(pred(e1, e2) & walk(e2) & subj(e2, mary)))'])
]
expressions = [r'some x y.sees(x,y)',
r'some x.(man(x) & walks(x))',
r'\x.(man(x) & walks(x))',
r'\x y.sees(x,y)',
r'walks(john)',
r'\x.big(x, \y.mouse(y))',
r'(walks(x) & (runs(x) & (threes(x) & fours(x))))',
r'(walks(x) -> runs(x))',
r'some x.(PRO(x) & sees(John, x))',
r'some x.(man(x) & (not walks(x)))',
r'all x.(man(x) -> walks(x))']
def spacer(num=45):
print '-' * num
def demo():
print "Testing configuration"
spacer()
test_config()
print
print "Testing conversion to Prover9 format"
spacer()
test_convert_to_prover9(expressions)
print
print "Testing proofs"
spacer()
test_prove(arguments)
if __name__ == '__main__':
demo()
|
marckuz/django | refs/heads/master | tests/admin_checks/__init__.py | 12133432 | |
raoanirudh/rmtk | refs/heads/master | rmtk/vulnerability/vulnerability.py | 12133432 | |
KyoungRan/Django_React_ex | refs/heads/master | Django_React_Workshop-mbrochh/django/myvenv/lib/python3.4/site-packages/django/conf/locale/nb/__init__.py | 12133432 | |
gannetson/django | refs/heads/master | tests/bash_completion/management/__init__.py | 12133432 | |
wtrdrnkr/pyrecon | refs/heads/master | setup.py | 2 | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name="PyRECONSTRUCT",
version="2.2.0",
author="Michael Musslewhite",
author_email="[email protected]",
url="https://github.com/musslebot/pyrecon",
packages=[
"classes",
"tools",
"gui"
],
package_dir={
"classes": "pyrecon/classes",
"tools": "pyrecon/tools",
"gui": "pyrecon/gui",
},
license="LICENSE.txt",
description="Python for interacting with RECONSTRUCT files",
long_description=open("README.txt").read(),
)
|
tecwebjoao/TecWeb-TF-2T-B-SI | refs/heads/master | venv/Lib/site-packages/setuptools/dep_util.py | 316 | from distutils.dep_util import newer_group
# yes, this is was almost entirely copy-pasted from
# 'newer_pairwise()', this is just another convenience
# function.
def newer_pairwise_group(sources_groups, targets):
"""Walk both arguments in parallel, testing if each source group is newer
than its corresponding target. Returns a pair of lists (sources_groups,
targets) where sources is newer than target, according to the semantics
of 'newer_group()'.
"""
if len(sources_groups) != len(targets):
raise ValueError("'sources_group' and 'targets' must be the same length")
# build a pair of lists (sources_groups, targets) where source is newer
n_sources = []
n_targets = []
for i in range(len(sources_groups)):
if newer_group(sources_groups[i], targets[i]):
n_sources.append(sources_groups[i])
n_targets.append(targets[i])
return n_sources, n_targets
|
cdellin/prpy | refs/heads/master | src/prpy/db.py | 3 | # Copyright (c) 2013, Carnegie Mellon University
# All rights reserved.
# Authors: Michael Koval <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of Carnegie Mellon University nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import openravepy
import bind
class KinBodyDatabase(object):
DATABASE_NAME = 'prpy'
DATABASE_TABLE = 'ordata'
def __init__(self):
try:
import pymongo
except ImportError:
raise Exception('KinBodyDatabase requires MongoDB and pymongo to be installed.')
self.client = pymongo.MongoClient()
self.database = self.client[DATABASE_NAME]
self.table = self.database[DATABASE_TABLE]
def bind(self):
openravepy.Environment.ReadKinBodyDB = self.ReadKinBodyDB
def ReadKinBodyDB(self, env, name):
matches = self.table.find({ 'name': name })
# Check for missing and duplicate entries.
num_matches = matches.count()
if num_matches == 0:
raise KeyError('There is no object named "{0:s}".'.format(name))
elif num_matches > 1:
raise KeyError('There are {0:d} objects named "{1:s}".'.format(
num_matches, name))
# Construct the KinBody.
metadata = matches[0]
kinbody = env.ReadKinBodyXMLData(metadata['openrave_xml'])
kinbody.SetName(metadata['name'])
# Attach the metadata.
bind.InstanceDeduplicator.add_canonical(kinbody)
kinbody.metadata = metadata
return kinbody
def ReadRobotDB(self, env, name):
kinbody = self.ReadKinBodyDB(env, name)
if not isinstance(kinbody, openravepy.Robot):
kinbody.Destroy()
raise IOError('Type "{0:s}" is not a robot.'.format(name))
return kinbody
|
Grogdor/CouchPotatoServer | refs/heads/master | libs/gntp/cli.py | 122 | # Copyright: 2013 Paul Traylor
# These sources are released under the terms of the MIT license: see LICENSE
import logging
import os
import sys
from optparse import OptionParser, OptionGroup
from gntp.notifier import GrowlNotifier
from gntp.shim import RawConfigParser
from gntp.version import __version__
DEFAULT_CONFIG = os.path.expanduser('~/.gntp')
config = RawConfigParser({
'hostname': 'localhost',
'password': None,
'port': 23053,
})
config.read([DEFAULT_CONFIG])
if not config.has_section('gntp'):
config.add_section('gntp')
class ClientParser(OptionParser):
def __init__(self):
OptionParser.__init__(self, version="%%prog %s" % __version__)
group = OptionGroup(self, "Network Options")
group.add_option("-H", "--host",
dest="host", default=config.get('gntp', 'hostname'),
help="Specify a hostname to which to send a remote notification. [%default]")
group.add_option("--port",
dest="port", default=config.getint('gntp', 'port'), type="int",
help="port to listen on [%default]")
group.add_option("-P", "--password",
dest='password', default=config.get('gntp', 'password'),
help="Network password")
self.add_option_group(group)
group = OptionGroup(self, "Notification Options")
group.add_option("-n", "--name",
dest="app", default='Python GNTP Test Client',
help="Set the name of the application [%default]")
group.add_option("-s", "--sticky",
dest='sticky', default=False, action="store_true",
help="Make the notification sticky [%default]")
group.add_option("--image",
dest="icon", default=None,
help="Icon for notification (URL or /path/to/file)")
group.add_option("-m", "--message",
dest="message", default=None,
help="Sets the message instead of using stdin")
group.add_option("-p", "--priority",
dest="priority", default=0, type="int",
help="-2 to 2 [%default]")
group.add_option("-d", "--identifier",
dest="identifier",
help="Identifier for coalescing")
group.add_option("-t", "--title",
dest="title", default=None,
help="Set the title of the notification [%default]")
group.add_option("-N", "--notification",
dest="name", default='Notification',
help="Set the notification name [%default]")
group.add_option("--callback",
dest="callback",
help="URL callback")
self.add_option_group(group)
# Extra Options
self.add_option('-v', '--verbose',
dest='verbose', default=0, action='count',
help="Verbosity levels")
def parse_args(self, args=None, values=None):
values, args = OptionParser.parse_args(self, args, values)
if values.message is None:
print('Enter a message followed by Ctrl-D')
try:
message = sys.stdin.read()
except KeyboardInterrupt:
exit()
else:
message = values.message
if values.title is None:
values.title = ' '.join(args)
# If we still have an empty title, use the
# first bit of the message as the title
if values.title == '':
values.title = message[:20]
values.verbose = logging.WARNING - values.verbose * 10
return values, message
def main():
(options, message) = ClientParser().parse_args()
logging.basicConfig(level=options.verbose)
if not os.path.exists(DEFAULT_CONFIG):
logging.info('No config read found at %s', DEFAULT_CONFIG)
growl = GrowlNotifier(
applicationName=options.app,
notifications=[options.name],
defaultNotifications=[options.name],
hostname=options.host,
password=options.password,
port=options.port,
)
result = growl.register()
if result is not True:
exit(result)
# This would likely be better placed within the growl notifier
# class but until I make _checkIcon smarter this is "easier"
if options.icon is not None and not options.icon.startswith('http'):
logging.info('Loading image %s', options.icon)
f = open(options.icon)
options.icon = f.read()
f.close()
result = growl.notify(
noteType=options.name,
title=options.title,
description=message,
icon=options.icon,
sticky=options.sticky,
priority=options.priority,
callback=options.callback,
identifier=options.identifier,
)
if result is not True:
exit(result)
if __name__ == "__main__":
main()
|
savoirfairelinux/partner-addons | refs/heads/10.0 | partner_duplicate_mgmt/models/res_partner.py | 1 | # -*- coding: utf-8 -*-
# © 2017 Savoir-faire Linux
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
import logging
import re
from odoo import _, api, fields, models
from odoo.exceptions import UserError
import unidecode
_logger = logging.getLogger(__name__)
class ResPartner(models.Model):
_inherit = 'res.partner'
indexed_name = fields.Char('Indexed Name')
duplicate_ids = fields.Many2many(
'res.partner', relation='rel_partner_duplicate',
column1='partner_id', column2='duplicate_id',
compute='_compute_duplicate_ids', string='Duplicate Partners')
duplicate_1_ids = fields.One2many(
'res.partner.duplicate', inverse_name='partner_2_id',
domain=[('state', '=', 'to_validate')])
duplicate_2_ids = fields.One2many(
'res.partner.duplicate', inverse_name='partner_1_id',
domain=[('state', '=', 'to_validate')])
duplicate_count = fields.Integer(compute='_compute_duplicate_ids')
company_type = fields.Selection(store=True)
@api.depends('duplicate_1_ids', 'duplicate_2_ids')
def _compute_duplicate_ids(self):
for rec in self:
dups_1 = rec.mapped('duplicate_1_ids.partner_1_id')
dups_2 = rec.mapped('duplicate_2_ids.partner_2_id')
rec.duplicate_ids = (dups_1 | dups_2)
rec.duplicate_count = len(rec.duplicate_ids)
def _get_indexed_name(self):
if not self.name:
return ''
indexed_name = self.name
terms = self.env['res.partner.duplicate.term'].search([])
spaces_begining = '(^|\s+)'
spaces_end = '($|\s+)'
for term in terms:
expression = term.expression
if term.type != 'regex':
expression = (
spaces_begining + re.escape(expression) + spaces_end)
indexed_name = re.sub(
expression, ' ', indexed_name, flags=re.IGNORECASE)
indexed_name = unidecode.unidecode(indexed_name)
return indexed_name.strip().lower()
def _get_min_similarity(self, indexed_name):
if len(indexed_name) <= 9:
return self.env['ir.config_parameter'].get_param(
'partner_duplicate_mgmt.partner_name_similarity_1')
if 10 <= len(indexed_name) <= 17:
return self.env['ir.config_parameter'].get_param(
'partner_duplicate_mgmt.partner_name_similarity_2')
if len(indexed_name) >= 18:
return self.env['ir.config_parameter'].get_param(
'partner_duplicate_mgmt.partner_name_similarity_3')
def _get_duplicates(self, indexed_name=None):
if self._context.get('disable_duplicate_check'):
return []
if not indexed_name:
indexed_name = self.indexed_name
min_similarity = self._get_min_similarity(indexed_name)
cr = self.env.cr
cr.execute('SELECT set_limit(%s)', (min_similarity,))
cr.execute("""
SELECT p.id, p.name
FROM res_partner p
WHERE p.id != %(id)s
AND p.active = true
AND p.indexed_name %% %(name)s
AND p.company_type = %(company_type)s
AND ((p.parent_id IS NOT DISTINCT FROM %(parent)s)
OR (p.parent_id IS NULL AND p.id != %(parent)s)
OR (%(parent)s IS NULL AND %(id)s != p.parent_id))
AND NOT EXISTS (
SELECT NULL
FROM res_partner_duplicate d
WHERE (d.partner_1_id = p.id AND d.partner_2_id = %(id)s)
OR (d.partner_1_id = %(id)s AND d.partner_2_id = p.id)
)
""", {
'id': self.id or self._origin.id or 0,
'company_type': self.company_type,
'name': indexed_name or '',
'parent': self.parent_id.id or None,
})
return cr.dictfetchall()
@api.onchange('name', 'parent_id', 'company_type', 'is_company')
def onchange_name(self):
indexed_name = self._get_indexed_name()
if self.id or not indexed_name:
return
duplicate_dict = self._get_duplicates(indexed_name)
if duplicate_dict:
duplicate_names = [d['name'] for d in duplicate_dict]
partner_names = ", ".join(duplicate_names)
return {
'warning': {
'title': 'Warning',
'message': _(
"This partner (%(new_partner)s) may be considered "
"as a duplicate of the following partner(s): "
"%(partner_names)s.") % {
'new_partner': self.name,
'partner_names': partner_names,
}}}
def _update_indexed_name(self):
for partner in self:
indexed_name = partner._get_indexed_name()
partner.write({'indexed_name': indexed_name})
def _create_duplicates(self):
partners = self._get_duplicates()
duplicates = self.env['res.partner']
for partner in partners:
self.env['res.partner.duplicate'].create({
'partner_1_id': min(self.id, partner['id']),
'partner_2_id': max(self.id, partner['id']),
})
duplicates |= self.browse(partner['id'])
return duplicates
def _post_message_duplicates(self, duplicates):
for record in self:
if duplicates:
partner_names = ', '.join(duplicates.mapped('name'))
message = _('Duplicate Partners : %s') % (partner_names)
record.message_post(body=message)
@api.model
def create(self, vals):
res = super(ResPartner, self).create(vals)
res._update_indexed_name()
duplicates = res._create_duplicates()
res._post_message_duplicates(duplicates)
return res
@api.multi
def write(self, vals):
res = super(ResPartner, self).write(vals)
if 'name' in vals or 'firstname' in vals or 'lastname' in vals:
self._update_indexed_name()
if (
'parent_id' in vals or 'name' in vals or
'lastname' in vals or 'firstname' in vals or
'company_type' in vals or 'is_company' in vals
):
for record in self:
duplicates = record._create_duplicates()
record._post_message_duplicates(duplicates)
return res
@api.multi
def action_view_duplicates(self):
self.ensure_one()
action = self.env.ref('contacts.action_contacts')
partners = self.duplicate_ids
res = {
'name': action.name,
'type': action.type,
'res_model': action.res_model,
'view_type': action.view_type,
'view_mode': 'tree,form',
'views': [(action.view_id.id, 'tree'), (False, 'form')],
'search_view_id': action.search_view_id.id,
'context': action.context,
'target': 'new',
'domain': [
('id', 'in', partners.ids),
],
}
if len(partners) == 1:
res['view_id'] = (
self.env.ref('partner_duplicate_mgmt.view_partner_form').id)
res['view_mode'] = 'form'
res['res_id'] = partners.id
del res['views']
return res
@api.model_cr_context
def _auto_init(self):
res = super(ResPartner, self)._auto_init()
cr = self._cr
cr.execute("""
SELECT name, installed
FROM pg_available_extension_versions
WHERE name='pg_trgm' AND installed=true
""")
if not cr.fetchone():
try:
cr.execute('CREATE EXTENSION pg_trgm')
except:
message = (
"Could not create the pg_trgm postgresql EXTENSION. "
"You must log to your database as superuser and type "
"the following command:\nCREATE EXTENSION pg_trgm."
)
_logger.warning(message)
raise Exception(message)
cr.execute("""
SELECT indexname
FROM pg_indexes
WHERE indexname='res_partner_indexed_name_gin'
""")
if not cr.fetchone():
cr.execute("""
CREATE INDEX res_partner_indexed_name_gin
ON res_partner USING gin(indexed_name gin_trgm_ops)
""")
return res
@api.multi
def action_merge(self):
group = self.env.ref(
'partner_duplicate_mgmt.group_duplicate_partners_control')
if group not in self.env.user.groups_id:
raise UserError(_(
"You don't have access to merge partners."))
if len(self) != 2:
raise UserError(_("Please, select two partners to merge."))
if self[0].company_type != self[1].company_type:
raise UserError(_("You can not merge a company with a contact."))
duplicate = self.env['res.partner.duplicate'].search([
('partner_1_id', 'in', self.ids),
('partner_2_id', 'in', self.ids),
], limit=1)
if not duplicate:
duplicate = self.env['res.partner.duplicate'].create(
{'partner_1_id': self[0].id, 'partner_2_id': self[1].id})
if duplicate.state != 'to_validate':
view = self.env.ref(
'partner_duplicate_mgmt.res_partner_duplicate_form')
return {
'type': 'ir.actions.act_window',
'res_model': 'res.partner.duplicate',
'view_type': 'form',
'view_mode': 'form',
'views': [(view.id, 'form')],
'res_id': duplicate.id,
}
return duplicate.open_partner_merge_wizard()
@api.model
def hide_merge_selected_contacts_action(self):
action = self.env['ir.actions.act_window'].search([
('name', '=', 'Merge Selected Contacts')])
if action:
action.unlink()
@api.model
def name_search(self, name='', args=None, operator='ilike', limit=100):
if not self._context.get('duplicate_partner_1_id'):
return super(ResPartner, self).name_search(
name, args, operator, limit)
partner_1 = self.browse(self._context.get('duplicate_partner_1_id'))
partner_2 = self.browse(self._context.get('duplicate_partner_2_id'))
return (partner_1 | partner_2).name_get()
def _get_field_value(self, field_name):
field_value = getattr(self, field_name)
return self._fields[field_name].convert_to_write(field_value, self)
|
Big-B702/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/test/test_decimal.py | 49 | # Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz (aahz at pobox.com)
# and Tim Peters
"""
These are the test cases for the Decimal module.
There are two groups of tests, Arithmetic and Behaviour. The former test
the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter
test the pythonic behaviour according to PEP 327.
Cowlishaw's tests can be downloaded from:
www2.hursley.ibm.com/decimal/dectest.zip
This test module can be called from command line with one parameter (Arithmetic
or Behaviour) to test each part, or without parameter to test both parts. If
you're working through IDLE, you can import this test module and call test_main()
with the corresponding argument.
"""
import math
import os, sys
import operator
import warnings
import pickle, copy
import unittest
from decimal import *
import numbers
from test.support import (run_unittest, run_doctest, is_resource_enabled,
requires_IEEE_754)
from test.support import check_warnings
import random
try:
import threading
except ImportError:
threading = None
# Useful Test Constant
Signals = tuple(getcontext().flags.keys())
# Signals ordered with respect to precedence: when an operation
# produces multiple signals, signals occurring later in the list
# should be handled before those occurring earlier in the list.
OrderedSignals = (Clamped, Rounded, Inexact, Subnormal,
Underflow, Overflow, DivisionByZero, InvalidOperation)
# Tests are built around these assumed context defaults.
# test_main() restores the original context.
def init():
global ORIGINAL_CONTEXT
ORIGINAL_CONTEXT = getcontext().copy()
DefaultTestContext = Context(
prec = 9,
rounding = ROUND_HALF_EVEN,
traps = dict.fromkeys(Signals, 0)
)
setcontext(DefaultTestContext)
TESTDATADIR = 'decimaltestdata'
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
directory = testdir + os.sep + TESTDATADIR + os.sep
skip_expected = not os.path.isdir(directory)
# list of individual .decTest test ids that correspond to tests that
# we're skipping for one reason or another.
skipped_test_ids = set([
# Skip implementation-specific scaleb tests.
'scbx164',
'scbx165',
# For some operations (currently exp, ln, log10, power), the decNumber
# reference implementation imposes additional restrictions on the context
# and operands. These restrictions are not part of the specification;
# however, the effect of these restrictions does show up in some of the
# testcases. We skip testcases that violate these restrictions, since
# Decimal behaves differently from decNumber for these testcases so these
# testcases would otherwise fail.
'expx901',
'expx902',
'expx903',
'expx905',
'lnx901',
'lnx902',
'lnx903',
'lnx905',
'logx901',
'logx902',
'logx903',
'logx905',
'powx1183',
'powx1184',
'powx4001',
'powx4002',
'powx4003',
'powx4005',
'powx4008',
'powx4010',
'powx4012',
'powx4014',
])
# Make sure it actually raises errors when not expected and caught in flags
# Slower, since it runs some things several times.
EXTENDEDERRORTEST = False
#Map the test cases' error names to the actual errors
ErrorNames = {'clamped' : Clamped,
'conversion_syntax' : InvalidOperation,
'division_by_zero' : DivisionByZero,
'division_impossible' : InvalidOperation,
'division_undefined' : InvalidOperation,
'inexact' : Inexact,
'invalid_context' : InvalidOperation,
'invalid_operation' : InvalidOperation,
'overflow' : Overflow,
'rounded' : Rounded,
'subnormal' : Subnormal,
'underflow' : Underflow}
def Nonfunction(*args):
"""Doesn't do anything."""
return None
RoundingDict = {'ceiling' : ROUND_CEILING, #Maps test-case names to roundings.
'down' : ROUND_DOWN,
'floor' : ROUND_FLOOR,
'half_down' : ROUND_HALF_DOWN,
'half_even' : ROUND_HALF_EVEN,
'half_up' : ROUND_HALF_UP,
'up' : ROUND_UP,
'05up' : ROUND_05UP}
# Name adapter to be able to change the Decimal and Context
# interface without changing the test files from Cowlishaw
nameAdapter = {'and':'logical_and',
'apply':'_apply',
'class':'number_class',
'comparesig':'compare_signal',
'comparetotal':'compare_total',
'comparetotmag':'compare_total_mag',
'copy':'copy_decimal',
'copyabs':'copy_abs',
'copynegate':'copy_negate',
'copysign':'copy_sign',
'divideint':'divide_int',
'invert':'logical_invert',
'iscanonical':'is_canonical',
'isfinite':'is_finite',
'isinfinite':'is_infinite',
'isnan':'is_nan',
'isnormal':'is_normal',
'isqnan':'is_qnan',
'issigned':'is_signed',
'issnan':'is_snan',
'issubnormal':'is_subnormal',
'iszero':'is_zero',
'maxmag':'max_mag',
'minmag':'min_mag',
'nextminus':'next_minus',
'nextplus':'next_plus',
'nexttoward':'next_toward',
'or':'logical_or',
'reduce':'normalize',
'remaindernear':'remainder_near',
'samequantum':'same_quantum',
'squareroot':'sqrt',
'toeng':'to_eng_string',
'tointegral':'to_integral_value',
'tointegralx':'to_integral_exact',
'tosci':'to_sci_string',
'xor':'logical_xor',
}
# The following functions return True/False rather than a Decimal instance
LOGICAL_FUNCTIONS = (
'is_canonical',
'is_finite',
'is_infinite',
'is_nan',
'is_normal',
'is_qnan',
'is_signed',
'is_snan',
'is_subnormal',
'is_zero',
'same_quantum',
)
class DecimalTest(unittest.TestCase):
"""Class which tests the Decimal class against the test cases.
Changed for unittest.
"""
def setUp(self):
self.context = Context()
self.ignore_list = ['#']
# Basically, a # means return NaN InvalidOperation.
# Different from a sNaN in trim
self.ChangeDict = {'precision' : self.change_precision,
'rounding' : self.change_rounding_method,
'maxexponent' : self.change_max_exponent,
'minexponent' : self.change_min_exponent,
'clamp' : self.change_clamp}
def eval_file(self, file):
global skip_expected
if skip_expected:
raise unittest.SkipTest
return
with open(file) as f:
for line in f:
line = line.replace('\r\n', '').replace('\n', '')
#print line
try:
t = self.eval_line(line)
except DecimalException as exception:
#Exception raised where there shouldn't have been one.
self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line)
return
def eval_line(self, s):
if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'):
s = (s.split('->')[0] + '->' +
s.split('->')[1].split('--')[0]).strip()
else:
s = s.split('--')[0].strip()
for ignore in self.ignore_list:
if s.find(ignore) >= 0:
#print s.split()[0], 'NotImplemented--', ignore
return
if not s:
return
elif ':' in s:
return self.eval_directive(s)
else:
return self.eval_equation(s)
def eval_directive(self, s):
funct, value = (x.strip().lower() for x in s.split(':'))
if funct == 'rounding':
value = RoundingDict[value]
else:
try:
value = int(value)
except ValueError:
pass
funct = self.ChangeDict.get(funct, Nonfunction)
funct(value)
def eval_equation(self, s):
#global DEFAULT_PRECISION
#print DEFAULT_PRECISION
if not TEST_ALL and random.random() < 0.90:
return
try:
Sides = s.split('->')
L = Sides[0].strip().split()
id = L[0]
if DEBUG:
print("Test ", id, end=" ")
funct = L[1].lower()
valstemp = L[2:]
L = Sides[1].strip().split()
ans = L[0]
exceptions = L[1:]
except (TypeError, AttributeError, IndexError):
raise InvalidOperation
def FixQuotes(val):
val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote')
val = val.replace("'", '').replace('"', '')
val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"')
return val
if id in skipped_test_ids:
return
fname = nameAdapter.get(funct, funct)
if fname == 'rescale':
return
funct = getattr(self.context, fname)
vals = []
conglomerate = ''
quote = 0
theirexceptions = [ErrorNames[x.lower()] for x in exceptions]
for exception in Signals:
self.context.traps[exception] = 1 #Catch these bugs...
for exception in theirexceptions:
self.context.traps[exception] = 0
for i, val in enumerate(valstemp):
if val.count("'") % 2 == 1:
quote = 1 - quote
if quote:
conglomerate = conglomerate + ' ' + val
continue
else:
val = conglomerate + val
conglomerate = ''
v = FixQuotes(val)
if fname in ('to_sci_string', 'to_eng_string'):
if EXTENDEDERRORTEST:
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(self.context.create_decimal(v))
except error:
pass
except Signals as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
v = self.context.create_decimal(v)
else:
v = Decimal(v, self.context)
vals.append(v)
ans = FixQuotes(ans)
if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'):
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
# as above, but add traps cumulatively, to check precedence
ordered_errors = [e for e in OrderedSignals if e in theirexceptions]
for error in ordered_errors:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals as e:
self.fail("Raised %s in %s; expected %s" %
(type(e), s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
# reset traps
for error in ordered_errors:
self.context.traps[error] = 0
if DEBUG:
print("--", self.context)
try:
result = str(funct(*vals))
if fname in LOGICAL_FUNCTIONS:
result = str(int(eval(result))) # 'True', 'False' -> '1', '0'
except Signals as error:
self.fail("Raised %s in %s" % (error, s))
except: #Catch any error long enough to state the test case.
print("ERROR:", s)
raise
myexceptions = self.getexceptions()
self.context.clear_flags()
myexceptions.sort(key=repr)
theirexceptions.sort(key=repr)
self.assertEqual(result, ans,
'Incorrect answer for ' + s + ' -- got ' + result)
self.assertEqual(myexceptions, theirexceptions,
'Incorrect flags set in ' + s + ' -- got ' + str(myexceptions))
return
def getexceptions(self):
return [e for e in Signals if self.context.flags[e]]
def change_precision(self, prec):
self.context.prec = prec
def change_rounding_method(self, rounding):
self.context.rounding = rounding
def change_min_exponent(self, exp):
self.context.Emin = exp
def change_max_exponent(self, exp):
self.context.Emax = exp
def change_clamp(self, clamp):
self.context.clamp = clamp
# The following classes test the behaviour of Decimal according to PEP 327
class DecimalExplicitConstructionTest(unittest.TestCase):
'''Unit tests for Explicit Construction cases of Decimal.'''
def test_explicit_empty(self):
self.assertEqual(Decimal(), Decimal("0"))
def test_explicit_from_None(self):
self.assertRaises(TypeError, Decimal, None)
def test_explicit_from_int(self):
#positive
d = Decimal(45)
self.assertEqual(str(d), '45')
#very large positive
d = Decimal(500000123)
self.assertEqual(str(d), '500000123')
#negative
d = Decimal(-45)
self.assertEqual(str(d), '-45')
#zero
d = Decimal(0)
self.assertEqual(str(d), '0')
def test_explicit_from_string(self):
#empty
self.assertEqual(str(Decimal('')), 'NaN')
#int
self.assertEqual(str(Decimal('45')), '45')
#float
self.assertEqual(str(Decimal('45.34')), '45.34')
#engineer notation
self.assertEqual(str(Decimal('45e2')), '4.5E+3')
#just not a number
self.assertEqual(str(Decimal('ugly')), 'NaN')
#leading and trailing whitespace permitted
self.assertEqual(str(Decimal('1.3E4 \n')), '1.3E+4')
self.assertEqual(str(Decimal(' -7.89')), '-7.89')
def test_explicit_from_tuples(self):
#zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(str(d), '0')
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(str(d), '-45')
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(str(d), '45.34')
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
#wrong number of items
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) )
#bad sign
self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (0., (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (Decimal(1), (4, 3, 4, 9, 1), 2))
#bad exp
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 0.) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), '1') )
#bad coefficients
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 10, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 'a', 1), 2) )
def test_explicit_from_bool(self):
self.assertIs(bool(Decimal(0)), False)
self.assertIs(bool(Decimal(1)), True)
self.assertEqual(Decimal(False), Decimal(0))
self.assertEqual(Decimal(True), Decimal(1))
def test_explicit_from_Decimal(self):
#positive
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
self.assertNotEqual(id(d), id(e))
#very large positive
d = Decimal(500000123)
e = Decimal(d)
self.assertEqual(str(e), '500000123')
self.assertNotEqual(id(d), id(e))
#negative
d = Decimal(-45)
e = Decimal(d)
self.assertEqual(str(e), '-45')
self.assertNotEqual(id(d), id(e))
#zero
d = Decimal(0)
e = Decimal(d)
self.assertEqual(str(e), '0')
self.assertNotEqual(id(d), id(e))
@requires_IEEE_754
def test_explicit_from_float(self):
r = Decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertTrue(Decimal(float('nan')).is_qnan())
self.assertTrue(Decimal(float('inf')).is_infinite())
self.assertTrue(Decimal(float('-inf')).is_infinite())
self.assertEqual(str(Decimal(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(Decimal(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(Decimal(float('-inf'))),
str(Decimal('-Infinity')))
self.assertEqual(str(Decimal(float('-0.0'))),
str(Decimal('-0')))
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(Decimal(x))) # roundtrip
def test_explicit_context_create_decimal(self):
nc = copy.copy(getcontext())
nc.prec = 3
# empty
d = Decimal()
self.assertEqual(str(d), '0')
d = nc.create_decimal()
self.assertEqual(str(d), '0')
# from None
self.assertRaises(TypeError, nc.create_decimal, None)
# from int
d = nc.create_decimal(456)
self.assertIsInstance(d, Decimal)
self.assertEqual(nc.create_decimal(45678),
nc.create_decimal('457E+2'))
# from string
d = Decimal('456789')
self.assertEqual(str(d), '456789')
d = nc.create_decimal('456789')
self.assertEqual(str(d), '4.57E+5')
# leading and trailing whitespace should result in a NaN;
# spaces are already checked in Cowlishaw's test-suite, so
# here we just check that a trailing newline results in a NaN
self.assertEqual(str(nc.create_decimal('3.14\n')), 'NaN')
# from tuples
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.35E-17')
# from Decimal
prevdec = Decimal(500000123)
d = Decimal(prevdec)
self.assertEqual(str(d), '500000123')
d = nc.create_decimal(prevdec)
self.assertEqual(str(d), '5.00E+8')
def test_unicode_digits(self):
test_values = {
'\uff11': '1',
'\u0660.\u0660\u0663\u0667\u0662e-\u0663' : '0.0000372',
'-nan\u0c68\u0c6a\u0c66\u0c66' : '-NaN2400',
}
for input, expected in test_values.items():
self.assertEqual(str(Decimal(input)), expected)
class DecimalImplicitConstructionTest(unittest.TestCase):
'''Unit tests for Implicit Construction cases of Decimal.'''
def test_implicit_from_None(self):
self.assertRaises(TypeError, eval, 'Decimal(5) + None', globals())
def test_implicit_from_int(self):
#normal
self.assertEqual(str(Decimal(5) + 45), '50')
#exceeding precision
self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000))
def test_implicit_from_string(self):
self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', globals())
def test_implicit_from_float(self):
self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', globals())
def test_implicit_from_Decimal(self):
self.assertEqual(Decimal(5) + Decimal(45), Decimal(50))
def test_rop(self):
# Allow other classes to be trained to interact with Decimals
class E:
def __divmod__(self, other):
return 'divmod ' + str(other)
def __rdivmod__(self, other):
return str(other) + ' rdivmod'
def __lt__(self, other):
return 'lt ' + str(other)
def __gt__(self, other):
return 'gt ' + str(other)
def __le__(self, other):
return 'le ' + str(other)
def __ge__(self, other):
return 'ge ' + str(other)
def __eq__(self, other):
return 'eq ' + str(other)
def __ne__(self, other):
return 'ne ' + str(other)
self.assertEqual(divmod(E(), Decimal(10)), 'divmod 10')
self.assertEqual(divmod(Decimal(10), E()), '10 rdivmod')
self.assertEqual(eval('Decimal(10) < E()'), 'gt 10')
self.assertEqual(eval('Decimal(10) > E()'), 'lt 10')
self.assertEqual(eval('Decimal(10) <= E()'), 'ge 10')
self.assertEqual(eval('Decimal(10) >= E()'), 'le 10')
self.assertEqual(eval('Decimal(10) == E()'), 'eq 10')
self.assertEqual(eval('Decimal(10) != E()'), 'ne 10')
# insert operator methods and then exercise them
oplist = [
('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('/', '__truediv__', '__rtruediv__'),
('%', '__mod__', '__rmod__'),
('//', '__floordiv__', '__rfloordiv__'),
('**', '__pow__', '__rpow__')
]
for sym, lop, rop in oplist:
setattr(E, lop, lambda self, other: 'str' + lop + str(other))
setattr(E, rop, lambda self, other: str(other) + rop + 'str')
self.assertEqual(eval('E()' + sym + 'Decimal(10)'),
'str' + lop + '10')
self.assertEqual(eval('Decimal(10)' + sym + 'E()'),
'10' + rop + 'str')
class DecimalFormatTest(unittest.TestCase):
'''Unit tests for the format function.'''
def test_formatting(self):
# triples giving a format, a Decimal, and the expected result
test_values = [
('e', '0E-15', '0e-15'),
('e', '2.3E-15', '2.3e-15'),
('e', '2.30E+2', '2.30e+2'), # preserve significant zeros
('e', '2.30000E-15', '2.30000e-15'),
('e', '1.23456789123456789e40', '1.23456789123456789e+40'),
('e', '1.5', '1.5e+0'),
('e', '0.15', '1.5e-1'),
('e', '0.015', '1.5e-2'),
('e', '0.0000000000015', '1.5e-12'),
('e', '15.0', '1.50e+1'),
('e', '-15', '-1.5e+1'),
('e', '0', '0e+0'),
('e', '0E1', '0e+1'),
('e', '0.0', '0e-1'),
('e', '0.00', '0e-2'),
('.6e', '0E-15', '0.000000e-9'),
('.6e', '0', '0.000000e+6'),
('.6e', '9.999999', '9.999999e+0'),
('.6e', '9.9999999', '1.000000e+1'),
('.6e', '-1.23e5', '-1.230000e+5'),
('.6e', '1.23456789e-3', '1.234568e-3'),
('f', '0', '0'),
('f', '0.0', '0.0'),
('f', '0E-2', '0.00'),
('f', '0.00E-8', '0.0000000000'),
('f', '0E1', '0'), # loses exponent information
('f', '3.2E1', '32'),
('f', '3.2E2', '320'),
('f', '3.20E2', '320'),
('f', '3.200E2', '320.0'),
('f', '3.2E-6', '0.0000032'),
('.6f', '0E-15', '0.000000'), # all zeros treated equally
('.6f', '0E1', '0.000000'),
('.6f', '0', '0.000000'),
('.0f', '0', '0'), # no decimal point
('.0f', '0e-2', '0'),
('.0f', '3.14159265', '3'),
('.1f', '3.14159265', '3.1'),
('.4f', '3.14159265', '3.1416'),
('.6f', '3.14159265', '3.141593'),
('.7f', '3.14159265', '3.1415926'), # round-half-even!
('.8f', '3.14159265', '3.14159265'),
('.9f', '3.14159265', '3.141592650'),
('g', '0', '0'),
('g', '0.0', '0.0'),
('g', '0E1', '0e+1'),
('G', '0E1', '0E+1'),
('g', '0E-5', '0.00000'),
('g', '0E-6', '0.000000'),
('g', '0E-7', '0e-7'),
('g', '-0E2', '-0e+2'),
('.0g', '3.14159265', '3'), # 0 sig fig -> 1 sig fig
('.1g', '3.14159265', '3'),
('.2g', '3.14159265', '3.1'),
('.5g', '3.14159265', '3.1416'),
('.7g', '3.14159265', '3.141593'),
('.8g', '3.14159265', '3.1415926'), # round-half-even!
('.9g', '3.14159265', '3.14159265'),
('.10g', '3.14159265', '3.14159265'), # don't pad
('%', '0E1', '0%'),
('%', '0E0', '0%'),
('%', '0E-1', '0%'),
('%', '0E-2', '0%'),
('%', '0E-3', '0.0%'),
('%', '0E-4', '0.00%'),
('.3%', '0', '0.000%'), # all zeros treated equally
('.3%', '0E10', '0.000%'),
('.3%', '0E-10', '0.000%'),
('.3%', '2.34', '234.000%'),
('.3%', '1.234567', '123.457%'),
('.0%', '1.23', '123%'),
('e', 'NaN', 'NaN'),
('f', '-NaN123', '-NaN123'),
('+g', 'NaN456', '+NaN456'),
('.3e', 'Inf', 'Infinity'),
('.16f', '-Inf', '-Infinity'),
('.0g', '-sNaN', '-sNaN'),
('', '1.00', '1.00'),
# test alignment and padding
('6', '123', ' 123'),
('<6', '123', '123 '),
('>6', '123', ' 123'),
('^6', '123', ' 123 '),
('=+6', '123', '+ 123'),
('#<10', 'NaN', 'NaN#######'),
('#<10', '-4.3', '-4.3######'),
('#<+10', '0.0130', '+0.0130###'),
('#< 10', '0.0130', ' 0.0130###'),
('@>10', '-Inf', '@-Infinity'),
('#>5', '-Inf', '-Infinity'),
('?^5', '123', '?123?'),
('%^6', '123', '%123%%'),
(' ^6', '-45.6', '-45.6 '),
('/=10', '-45.6', '-/////45.6'),
('/=+10', '45.6', '+/////45.6'),
('/= 10', '45.6', ' /////45.6'),
# thousands separator
(',', '1234567', '1,234,567'),
(',', '123456', '123,456'),
(',', '12345', '12,345'),
(',', '1234', '1,234'),
(',', '123', '123'),
(',', '12', '12'),
(',', '1', '1'),
(',', '0', '0'),
(',', '-1234567', '-1,234,567'),
(',', '-123456', '-123,456'),
('7,', '123456', '123,456'),
('8,', '123456', ' 123,456'),
('08,', '123456', '0,123,456'), # special case: extra 0 needed
('+08,', '123456', '+123,456'), # but not if there's a sign
(' 08,', '123456', ' 123,456'),
('08,', '-123456', '-123,456'),
('+09,', '123456', '+0,123,456'),
# ... with fractional part...
('07,', '1234.56', '1,234.56'),
('08,', '1234.56', '1,234.56'),
('09,', '1234.56', '01,234.56'),
('010,', '1234.56', '001,234.56'),
('011,', '1234.56', '0,001,234.56'),
('012,', '1234.56', '0,001,234.56'),
('08,.1f', '1234.5', '01,234.5'),
# no thousands separators in fraction part
(',', '1.23456789', '1.23456789'),
(',%', '123.456789', '12,345.6789%'),
(',e', '123456', '1.23456e+5'),
(',E', '123456', '1.23456E+5'),
# issue 6850
('a=-7.0', '0.12345', 'aaaa0.1'),
# Issue 7094: Alternate formatting (specified by #)
('.0e', '1.0', '1e+0'),
('#.0e', '1.0', '1.e+0'),
('.0f', '1.0', '1'),
('#.0f', '1.0', '1.'),
('g', '1.1', '1.1'),
('#g', '1.1', '1.1'),
('.0g', '1', '1'),
('#.0g', '1', '1.'),
('.0%', '1.0', '100%'),
('#.0%', '1.0', '100.%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
def test_n_format(self):
try:
from locale import CHAR_MAX
except ImportError:
return
# Set up some localeconv-like dictionaries
en_US = {
'decimal_point' : '.',
'grouping' : [3, 3, 0],
'thousands_sep': ','
}
fr_FR = {
'decimal_point' : ',',
'grouping' : [CHAR_MAX],
'thousands_sep' : ''
}
ru_RU = {
'decimal_point' : ',',
'grouping' : [3, 3, 0],
'thousands_sep' : ' '
}
crazy = {
'decimal_point' : '&',
'grouping' : [1, 4, 2, CHAR_MAX],
'thousands_sep' : '-'
}
def get_fmt(x, locale, fmt='n'):
return Decimal.__format__(Decimal(x), fmt, _localeconv=locale)
self.assertEqual(get_fmt(Decimal('12.7'), en_US), '12.7')
self.assertEqual(get_fmt(Decimal('12.7'), fr_FR), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), ru_RU), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), crazy), '1-2&7')
self.assertEqual(get_fmt(123456789, en_US), '123,456,789')
self.assertEqual(get_fmt(123456789, fr_FR), '123456789')
self.assertEqual(get_fmt(123456789, ru_RU), '123 456 789')
self.assertEqual(get_fmt(1234567890123, crazy), '123456-78-9012-3')
self.assertEqual(get_fmt(123456789, en_US, '.6n'), '1.23457e+8')
self.assertEqual(get_fmt(123456789, fr_FR, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, ru_RU, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, crazy, '.6n'), '1&23457e+8')
# zero padding
self.assertEqual(get_fmt(1234, fr_FR, '03n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '04n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '05n'), '01234')
self.assertEqual(get_fmt(1234, fr_FR, '06n'), '001234')
self.assertEqual(get_fmt(12345, en_US, '05n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '06n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '07n'), '012,345')
self.assertEqual(get_fmt(12345, en_US, '08n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '09n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '010n'), '00,012,345')
self.assertEqual(get_fmt(123456, crazy, '06n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '07n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '08n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '09n'), '01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '010n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '011n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '012n'), '00-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '013n'), '000-01-2345-6')
class DecimalArithmeticOperatorsTest(unittest.TestCase):
'''Unit tests for all arithmetic operators, binary and unary.'''
def test_addition(self):
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1+d2, Decimal('11.1'))
self.assertEqual(d2+d1, Decimal('11.1'))
#with other type, left
c = d1 + 5
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 + d1
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 += d2
self.assertEqual(d1, Decimal('11.1'))
#inline with other type
d1 += 5
self.assertEqual(d1, Decimal('16.1'))
def test_subtraction(self):
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1-d2, Decimal('-33.3'))
self.assertEqual(d2-d1, Decimal('33.3'))
#with other type, left
c = d1 - 5
self.assertEqual(c, Decimal('-16.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 - d1
self.assertEqual(c, Decimal('16.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 -= d2
self.assertEqual(d1, Decimal('-33.3'))
#inline with other type
d1 -= 5
self.assertEqual(d1, Decimal('-38.3'))
def test_multiplication(self):
d1 = Decimal('-5')
d2 = Decimal('3')
#two Decimals
self.assertEqual(d1*d2, Decimal('-15'))
self.assertEqual(d2*d1, Decimal('-15'))
#with other type, left
c = d1 * 5
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 * d1
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 *= d2
self.assertEqual(d1, Decimal('-15'))
#inline with other type
d1 *= 5
self.assertEqual(d1, Decimal('-75'))
def test_division(self):
d1 = Decimal('-5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1/d2, Decimal('-2.5'))
self.assertEqual(d2/d1, Decimal('-0.4'))
#with other type, left
c = d1 / 4
self.assertEqual(c, Decimal('-1.25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 4 / d1
self.assertEqual(c, Decimal('-0.8'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 /= d2
self.assertEqual(d1, Decimal('-2.5'))
#inline with other type
d1 /= 4
self.assertEqual(d1, Decimal('-0.625'))
def test_floor_division(self):
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1//d2, Decimal('2'))
self.assertEqual(d2//d1, Decimal('0'))
#with other type, left
c = d1 // 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 // d1
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 //= d2
self.assertEqual(d1, Decimal('2'))
#inline with other type
d1 //= 2
self.assertEqual(d1, Decimal('1'))
def test_powering(self):
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1**d2, Decimal('25'))
self.assertEqual(d2**d1, Decimal('32'))
#with other type, left
c = d1 ** 4
self.assertEqual(c, Decimal('625'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 ** d1
self.assertEqual(c, Decimal('16807'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 **= d2
self.assertEqual(d1, Decimal('25'))
#inline with other type
d1 **= 4
self.assertEqual(d1, Decimal('390625'))
def test_module(self):
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1%d2, Decimal('1'))
self.assertEqual(d2%d1, Decimal('2'))
#with other type, left
c = d1 % 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 % d1
self.assertEqual(c, Decimal('2'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 %= d2
self.assertEqual(d1, Decimal('1'))
#inline with other type
d1 %= 4
self.assertEqual(d1, Decimal('1'))
def test_floor_div_module(self):
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
(p, q) = divmod(d1, d2)
self.assertEqual(p, Decimal('2'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, left
(p, q) = divmod(d1, 4)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, right
(p, q) = divmod(7, d1)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('2'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
def test_unary_operators(self):
self.assertEqual(+Decimal(45), Decimal(+45)) # +
self.assertEqual(-Decimal(45), Decimal(-45)) # -
self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs
def test_nan_comparisons(self):
# comparisons involving signaling nans signal InvalidOperation
# order comparisons (<, <=, >, >=) involving only quiet nans
# also signal InvalidOperation
# equality comparisons (==, !=) involving only quiet nans
# don't signal, but return False or True respectively.
n = Decimal('NaN')
s = Decimal('sNaN')
i = Decimal('Inf')
f = Decimal('2')
qnan_pairs = (n, n), (n, i), (i, n), (n, f), (f, n)
snan_pairs = (s, n), (n, s), (s, i), (i, s), (s, f), (f, s), (s, s)
order_ops = operator.lt, operator.le, operator.gt, operator.ge
equality_ops = operator.eq, operator.ne
# results when InvalidOperation is not trapped
for x, y in qnan_pairs + snan_pairs:
for op in order_ops + equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
# repeat the above, but this time trap the InvalidOperation
with localcontext() as ctx:
ctx.traps[InvalidOperation] = 1
for x, y in qnan_pairs:
for op in equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for "
"operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
for x, y in snan_pairs:
for op in equality_ops:
self.assertRaises(InvalidOperation, operator.eq, x, y)
self.assertRaises(InvalidOperation, operator.ne, x, y)
for x, y in qnan_pairs + snan_pairs:
for op in order_ops:
self.assertRaises(InvalidOperation, op, x, y)
def test_copy_sign(self):
d = Decimal(1).copy_sign(Decimal(-2))
self.assertEqual(Decimal(1).copy_sign(-2), d)
self.assertRaises(TypeError, Decimal(1).copy_sign, '-2')
# The following are two functions used to test threading in the next class
def thfunc1(cls):
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
cls.synchro.wait()
test2 = d1/d3
cls.finish1.set()
cls.assertEqual(test1, Decimal('0.3333333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.3333333333333333333333333333'))
return
def thfunc2(cls):
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
thiscontext = getcontext()
thiscontext.prec = 18
test2 = d1/d3
cls.synchro.set()
cls.finish2.set()
cls.assertEqual(test1, Decimal('0.3333333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333'))
return
class DecimalUseOfContextTest(unittest.TestCase):
'''Unit tests for Use of Context cases in Decimal.'''
try:
import threading
except ImportError:
threading = None
# Take care executing this test from IDLE, there's an issue in threading
# that hangs IDLE and I couldn't find it
def test_threading(self):
#Test the "threading isolation" of a Context.
self.synchro = threading.Event()
self.finish1 = threading.Event()
self.finish2 = threading.Event()
th1 = threading.Thread(target=thfunc1, args=(self,))
th2 = threading.Thread(target=thfunc2, args=(self,))
th1.start()
th2.start()
self.finish1.wait()
self.finish2.wait()
return
if threading is None:
del test_threading
class DecimalUsabilityTest(unittest.TestCase):
'''Unit tests for Usability cases of Decimal.'''
def test_comparison_operators(self):
da = Decimal('23.42')
db = Decimal('23.42')
dc = Decimal('45')
#two Decimals
self.assertGreater(dc, da)
self.assertGreaterEqual(dc, da)
self.assertLess(da, dc)
self.assertLessEqual(da, dc)
self.assertEqual(da, db)
self.assertNotEqual(da, dc)
self.assertLessEqual(da, db)
self.assertGreaterEqual(da, db)
#a Decimal and an int
self.assertGreater(dc, 23)
self.assertLess(23, dc)
self.assertEqual(dc, 45)
#a Decimal and uncomparable
self.assertNotEqual(da, 'ugly')
self.assertNotEqual(da, 32.7)
self.assertNotEqual(da, object())
self.assertNotEqual(da, object)
# sortable
a = list(map(Decimal, range(100)))
b = a[:]
random.shuffle(a)
a.sort()
self.assertEqual(a, b)
def test_decimal_float_comparison(self):
da = Decimal('0.25')
db = Decimal('3.0')
self.assertLess(da, 3.0)
self.assertLessEqual(da, 3.0)
self.assertGreater(db, 0.25)
self.assertGreaterEqual(db, 0.25)
self.assertNotEqual(da, 1.5)
self.assertEqual(da, 0.25)
self.assertGreater(3.0, da)
self.assertGreaterEqual(3.0, da)
self.assertLess(0.25, db)
self.assertLessEqual(0.25, db)
self.assertNotEqual(0.25, db)
self.assertEqual(3.0, db)
self.assertNotEqual(0.1, Decimal('0.1'))
def test_copy_and_deepcopy_methods(self):
d = Decimal('43.24')
c = copy.copy(d)
self.assertEqual(id(c), id(d))
dc = copy.deepcopy(d)
self.assertEqual(id(dc), id(d))
def test_hash_method(self):
def hashit(d):
a = hash(d)
b = d.__hash__()
self.assertEqual(a, b)
return a
#just that it's hashable
hashit(Decimal(23))
hashit(Decimal('Infinity'))
hashit(Decimal('-Infinity'))
hashit(Decimal('nan123'))
hashit(Decimal('-NaN'))
test_values = [Decimal(sign*(2**m + n))
for m in [0, 14, 15, 16, 17, 30, 31,
32, 33, 61, 62, 63, 64, 65, 66]
for n in range(-10, 10)
for sign in [-1, 1]]
test_values.extend([
Decimal("-1"), # ==> -2
Decimal("-0"), # zeros
Decimal("0.00"),
Decimal("-0.000"),
Decimal("0E10"),
Decimal("-0E12"),
Decimal("10.0"), # negative exponent
Decimal("-23.00000"),
Decimal("1230E100"), # positive exponent
Decimal("-4.5678E50"),
# a value for which hash(n) != hash(n % (2**64-1))
# in Python pre-2.6
Decimal(2**64 + 2**32 - 1),
# selection of values which fail with the old (before
# version 2.6) long.__hash__
Decimal("1.634E100"),
Decimal("90.697E100"),
Decimal("188.83E100"),
Decimal("1652.9E100"),
Decimal("56531E100"),
])
# check that hash(d) == hash(int(d)) for integral values
for value in test_values:
self.assertEqual(hashit(value), hashit(int(value)))
#the same hash that to an int
self.assertEqual(hashit(Decimal(23)), hashit(23))
self.assertRaises(TypeError, hash, Decimal('sNaN'))
self.assertTrue(hashit(Decimal('Inf')))
self.assertTrue(hashit(Decimal('-Inf')))
# check that the hashes of a Decimal float match when they
# represent exactly the same values
test_strings = ['inf', '-Inf', '0.0', '-.0e1',
'34.0', '2.5', '112390.625', '-0.515625']
for s in test_strings:
f = float(s)
d = Decimal(s)
self.assertEqual(hashit(f), hashit(d))
# check that the value of the hash doesn't depend on the
# current context (issue #1757)
c = getcontext()
old_precision = c.prec
x = Decimal("123456789.1")
c.prec = 6
h1 = hashit(x)
c.prec = 10
h2 = hashit(x)
c.prec = 16
h3 = hashit(x)
self.assertEqual(h1, h2)
self.assertEqual(h1, h3)
c.prec = old_precision
def test_min_and_max_methods(self):
d1 = Decimal('15.32')
d2 = Decimal('28.5')
l1 = 15
l2 = 28
#between Decimals
self.assertIs(min(d1,d2), d1)
self.assertIs(min(d2,d1), d1)
self.assertIs(max(d1,d2), d2)
self.assertIs(max(d2,d1), d2)
#between Decimal and long
self.assertIs(min(d1,l2), d1)
self.assertIs(min(l2,d1), d1)
self.assertIs(max(l1,d2), d2)
self.assertIs(max(d2,l1), d2)
def test_as_nonzero(self):
#as false
self.assertFalse(Decimal(0))
#as true
self.assertTrue(Decimal('0.372'))
def test_tostring_methods(self):
#Test str and repr methods.
d = Decimal('15.32')
self.assertEqual(str(d), '15.32') # str
self.assertEqual(repr(d), "Decimal('15.32')") # repr
def test_tonum_methods(self):
#Test float and int methods.
d1 = Decimal('66')
d2 = Decimal('15.32')
#int
self.assertEqual(int(d1), 66)
self.assertEqual(int(d2), 15)
#float
self.assertEqual(float(d1), 66)
self.assertEqual(float(d2), 15.32)
#floor
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 3),
('3.899', 3),
('-2.3', -3),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
]
for d, i in test_pairs:
self.assertEqual(math.floor(Decimal(d)), i)
self.assertRaises(ValueError, math.floor, Decimal('-NaN'))
self.assertRaises(ValueError, math.floor, Decimal('sNaN'))
self.assertRaises(ValueError, math.floor, Decimal('NaN123'))
self.assertRaises(OverflowError, math.floor, Decimal('Inf'))
self.assertRaises(OverflowError, math.floor, Decimal('-Inf'))
#ceiling
test_pairs = [
('123.00', 123),
('3.2', 4),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
]
for d, i in test_pairs:
self.assertEqual(math.ceil(Decimal(d)), i)
self.assertRaises(ValueError, math.ceil, Decimal('-NaN'))
self.assertRaises(ValueError, math.ceil, Decimal('sNaN'))
self.assertRaises(ValueError, math.ceil, Decimal('NaN123'))
self.assertRaises(OverflowError, math.ceil, Decimal('Inf'))
self.assertRaises(OverflowError, math.ceil, Decimal('-Inf'))
#round, single argument
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('-3.5', -4),
('-2.5', -2),
('-1.5', -2),
('-0.5', 0),
('0.5', 0),
('1.5', 2),
('2.5', 2),
('3.5', 4),
]
for d, i in test_pairs:
self.assertEqual(round(Decimal(d)), i)
self.assertRaises(ValueError, round, Decimal('-NaN'))
self.assertRaises(ValueError, round, Decimal('sNaN'))
self.assertRaises(ValueError, round, Decimal('NaN123'))
self.assertRaises(OverflowError, round, Decimal('Inf'))
self.assertRaises(OverflowError, round, Decimal('-Inf'))
#round, two arguments; this is essentially equivalent
#to quantize, which is already extensively tested
test_triples = [
('123.456', -4, '0E+4'),
('123.456', -3, '0E+3'),
('123.456', -2, '1E+2'),
('123.456', -1, '1.2E+2'),
('123.456', 0, '123'),
('123.456', 1, '123.5'),
('123.456', 2, '123.46'),
('123.456', 3, '123.456'),
('123.456', 4, '123.4560'),
('123.455', 2, '123.46'),
('123.445', 2, '123.44'),
('Inf', 4, 'NaN'),
('-Inf', -23, 'NaN'),
('sNaN314', 3, 'NaN314'),
]
for d, n, r in test_triples:
self.assertEqual(str(round(Decimal(d), n)), r)
def test_eval_round_trip(self):
#with zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(d, eval(repr(d)))
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(d, eval(repr(d)))
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(d, eval(repr(d)))
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(d, eval(repr(d)))
def test_as_tuple(self):
#with zero
d = Decimal(0)
self.assertEqual(d.as_tuple(), (0, (0,), 0) )
#int
d = Decimal(-45)
self.assertEqual(d.as_tuple(), (1, (4, 5), 0) )
#complicated string
d = Decimal("-4.34913534E-17")
self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
#inf
d = Decimal("Infinity")
self.assertEqual(d.as_tuple(), (0, (0,), 'F') )
#leading zeros in coefficient should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), -2) )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), -2) )
d = Decimal( (1, (0, 0, 0), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
d = Decimal( (1, (), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
#leading zeros in NaN diagnostic info should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), 'n') )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), 'n') )
d = Decimal( (1, (0, 0, 0), 'N') )
self.assertEqual(d.as_tuple(), (1, (), 'N') )
d = Decimal( (1, (), 'n') )
self.assertEqual(d.as_tuple(), (1, (), 'n') )
#coefficient in infinity should be ignored
d = Decimal( (0, (4, 5, 3, 4), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (1, (0, 2, 7, 1), 'F') )
self.assertEqual(d.as_tuple(), (1, (0,), 'F'))
def test_immutability_operations(self):
# Do operations and check that it didn't change change internal objects.
d1 = Decimal('-25e55')
b1 = Decimal('-25e55')
d2 = Decimal('33e+33')
b2 = Decimal('33e+33')
def checkSameDec(operation, useOther=False):
if useOther:
eval("d1." + operation + "(d2)")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
self.assertEqual(d2._sign, b2._sign)
self.assertEqual(d2._int, b2._int)
self.assertEqual(d2._exp, b2._exp)
else:
eval("d1." + operation + "()")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
return
Decimal(d1)
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
checkSameDec("__abs__")
checkSameDec("__add__", True)
checkSameDec("__divmod__", True)
checkSameDec("__eq__", True)
checkSameDec("__ne__", True)
checkSameDec("__le__", True)
checkSameDec("__lt__", True)
checkSameDec("__ge__", True)
checkSameDec("__gt__", True)
checkSameDec("__float__")
checkSameDec("__floordiv__", True)
checkSameDec("__hash__")
checkSameDec("__int__")
checkSameDec("__trunc__")
checkSameDec("__mod__", True)
checkSameDec("__mul__", True)
checkSameDec("__neg__")
checkSameDec("__bool__")
checkSameDec("__pos__")
checkSameDec("__pow__", True)
checkSameDec("__radd__", True)
checkSameDec("__rdivmod__", True)
checkSameDec("__repr__")
checkSameDec("__rfloordiv__", True)
checkSameDec("__rmod__", True)
checkSameDec("__rmul__", True)
checkSameDec("__rpow__", True)
checkSameDec("__rsub__", True)
checkSameDec("__str__")
checkSameDec("__sub__", True)
checkSameDec("__truediv__", True)
checkSameDec("adjusted")
checkSameDec("as_tuple")
checkSameDec("compare", True)
checkSameDec("max", True)
checkSameDec("min", True)
checkSameDec("normalize")
checkSameDec("quantize", True)
checkSameDec("remainder_near", True)
checkSameDec("same_quantum", True)
checkSameDec("sqrt")
checkSameDec("to_eng_string")
checkSameDec("to_integral")
def test_subclassing(self):
# Different behaviours when subclassing Decimal
class MyDecimal(Decimal):
pass
d1 = MyDecimal(1)
d2 = MyDecimal(2)
d = d1 + d2
self.assertIs(type(d), Decimal)
d = d1.max(d2)
self.assertIs(type(d), Decimal)
def test_implicit_context(self):
# Check results when context given implicitly. (Issue 2478)
c = getcontext()
self.assertEqual(str(Decimal(0).sqrt()),
str(c.sqrt(Decimal(0))))
def test_conversions_from_int(self):
# Check that methods taking a second Decimal argument will
# always accept an integer in place of a Decimal.
self.assertEqual(Decimal(4).compare(3),
Decimal(4).compare(Decimal(3)))
self.assertEqual(Decimal(4).compare_signal(3),
Decimal(4).compare_signal(Decimal(3)))
self.assertEqual(Decimal(4).compare_total(3),
Decimal(4).compare_total(Decimal(3)))
self.assertEqual(Decimal(4).compare_total_mag(3),
Decimal(4).compare_total_mag(Decimal(3)))
self.assertEqual(Decimal(10101).logical_and(1001),
Decimal(10101).logical_and(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_or(1001),
Decimal(10101).logical_or(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_xor(1001),
Decimal(10101).logical_xor(Decimal(1001)))
self.assertEqual(Decimal(567).max(123),
Decimal(567).max(Decimal(123)))
self.assertEqual(Decimal(567).max_mag(123),
Decimal(567).max_mag(Decimal(123)))
self.assertEqual(Decimal(567).min(123),
Decimal(567).min(Decimal(123)))
self.assertEqual(Decimal(567).min_mag(123),
Decimal(567).min_mag(Decimal(123)))
self.assertEqual(Decimal(567).next_toward(123),
Decimal(567).next_toward(Decimal(123)))
self.assertEqual(Decimal(1234).quantize(100),
Decimal(1234).quantize(Decimal(100)))
self.assertEqual(Decimal(768).remainder_near(1234),
Decimal(768).remainder_near(Decimal(1234)))
self.assertEqual(Decimal(123).rotate(1),
Decimal(123).rotate(Decimal(1)))
self.assertEqual(Decimal(1234).same_quantum(1000),
Decimal(1234).same_quantum(Decimal(1000)))
self.assertEqual(Decimal('9.123').scaleb(-100),
Decimal('9.123').scaleb(Decimal(-100)))
self.assertEqual(Decimal(456).shift(-1),
Decimal(456).shift(Decimal(-1)))
self.assertEqual(Decimal(-12).fma(Decimal(45), 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, Decimal(67)),
Decimal(-12).fma(Decimal(45), Decimal(67)))
class DecimalPythonAPItests(unittest.TestCase):
def test_abc(self):
self.assertTrue(issubclass(Decimal, numbers.Number))
self.assertFalse(issubclass(Decimal, numbers.Real))
self.assertIsInstance(Decimal(0), numbers.Number)
self.assertNotIsInstance(Decimal(0), numbers.Real)
def test_pickle(self):
d = Decimal('-3.141590000')
p = pickle.dumps(d)
e = pickle.loads(p)
self.assertEqual(d, e)
def test_int(self):
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(int(d)), r)
self.assertRaises(ValueError, int, Decimal('-nan'))
self.assertRaises(ValueError, int, Decimal('snan'))
self.assertRaises(OverflowError, int, Decimal('inf'))
self.assertRaises(OverflowError, int, Decimal('-inf'))
def test_trunc(self):
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(math.trunc(d)), r)
def test_from_float(self):
class MyDecimal(Decimal):
pass
r = MyDecimal.from_float(0.1)
self.assertEqual(type(r), MyDecimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
bigint = 12345678901234567890123456789
self.assertEqual(MyDecimal.from_float(bigint), MyDecimal(bigint))
self.assertTrue(MyDecimal.from_float(float('nan')).is_qnan())
self.assertTrue(MyDecimal.from_float(float('inf')).is_infinite())
self.assertTrue(MyDecimal.from_float(float('-inf')).is_infinite())
self.assertEqual(str(MyDecimal.from_float(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(MyDecimal.from_float(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(MyDecimal.from_float(float('-inf'))),
str(Decimal('-Infinity')))
self.assertRaises(TypeError, MyDecimal.from_float, 'abc')
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(MyDecimal.from_float(x))) # roundtrip
def test_create_decimal_from_float(self):
context = Context(prec=5, rounding=ROUND_DOWN)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1415')
)
context = Context(prec=5, rounding=ROUND_UP)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1416')
)
context = Context(prec=5, traps=[Inexact])
self.assertRaises(
Inexact,
context.create_decimal_from_float,
math.pi
)
self.assertEqual(repr(context.create_decimal_from_float(-0.0)),
"Decimal('-0')")
self.assertEqual(repr(context.create_decimal_from_float(1.0)),
"Decimal('1')")
self.assertEqual(repr(context.create_decimal_from_float(10)),
"Decimal('10')")
class ContextAPItests(unittest.TestCase):
def test_pickle(self):
c = Context()
e = pickle.loads(pickle.dumps(c))
for k in vars(c):
v1 = vars(c)[k]
v2 = vars(e)[k]
self.assertEqual(v1, v2)
def test_equality_with_other_types(self):
self.assertIn(Decimal(10), ['a', 1.0, Decimal(10), (1,2), {}])
self.assertNotIn(Decimal(10), ['a', 1.0, (1,2), {}])
def test_copy(self):
# All copies should be deep
c = Context()
d = c.copy()
self.assertNotEqual(id(c), id(d))
self.assertNotEqual(id(c.flags), id(d.flags))
self.assertNotEqual(id(c.traps), id(d.traps))
def test__clamp(self):
# In Python 3.2, the private attribute `_clamp` was made
# public (issue 8540), with the old `_clamp` becoming a
# property wrapping `clamp`. For the duration of Python 3.2
# only, the attribute should be gettable/settable via both
# `clamp` and `_clamp`; in Python 3.3, `_clamp` should be
# removed.
c = Context(clamp = 0)
self.assertEqual(c.clamp, 0)
with check_warnings(("", DeprecationWarning)):
c._clamp = 1
self.assertEqual(c.clamp, 1)
with check_warnings(("", DeprecationWarning)):
self.assertEqual(c._clamp, 1)
c.clamp = 0
self.assertEqual(c.clamp, 0)
with check_warnings(("", DeprecationWarning)):
self.assertEqual(c._clamp, 0)
def test_abs(self):
c = Context()
d = c.abs(Decimal(-1))
self.assertEqual(c.abs(-1), d)
self.assertRaises(TypeError, c.abs, '-1')
def test_add(self):
c = Context()
d = c.add(Decimal(1), Decimal(1))
self.assertEqual(c.add(1, 1), d)
self.assertEqual(c.add(Decimal(1), 1), d)
self.assertEqual(c.add(1, Decimal(1)), d)
self.assertRaises(TypeError, c.add, '1', 1)
self.assertRaises(TypeError, c.add, 1, '1')
def test_compare(self):
c = Context()
d = c.compare(Decimal(1), Decimal(1))
self.assertEqual(c.compare(1, 1), d)
self.assertEqual(c.compare(Decimal(1), 1), d)
self.assertEqual(c.compare(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare, '1', 1)
self.assertRaises(TypeError, c.compare, 1, '1')
def test_compare_signal(self):
c = Context()
d = c.compare_signal(Decimal(1), Decimal(1))
self.assertEqual(c.compare_signal(1, 1), d)
self.assertEqual(c.compare_signal(Decimal(1), 1), d)
self.assertEqual(c.compare_signal(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_signal, '1', 1)
self.assertRaises(TypeError, c.compare_signal, 1, '1')
def test_compare_total(self):
c = Context()
d = c.compare_total(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total(1, 1), d)
self.assertEqual(c.compare_total(Decimal(1), 1), d)
self.assertEqual(c.compare_total(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total, '1', 1)
self.assertRaises(TypeError, c.compare_total, 1, '1')
def test_compare_total_mag(self):
c = Context()
d = c.compare_total_mag(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total_mag(1, 1), d)
self.assertEqual(c.compare_total_mag(Decimal(1), 1), d)
self.assertEqual(c.compare_total_mag(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total_mag, '1', 1)
self.assertRaises(TypeError, c.compare_total_mag, 1, '1')
def test_copy_abs(self):
c = Context()
d = c.copy_abs(Decimal(-1))
self.assertEqual(c.copy_abs(-1), d)
self.assertRaises(TypeError, c.copy_abs, '-1')
def test_copy_decimal(self):
c = Context()
d = c.copy_decimal(Decimal(-1))
self.assertEqual(c.copy_decimal(-1), d)
self.assertRaises(TypeError, c.copy_decimal, '-1')
def test_copy_negate(self):
c = Context()
d = c.copy_negate(Decimal(-1))
self.assertEqual(c.copy_negate(-1), d)
self.assertRaises(TypeError, c.copy_negate, '-1')
def test_copy_sign(self):
c = Context()
d = c.copy_sign(Decimal(1), Decimal(-2))
self.assertEqual(c.copy_sign(1, -2), d)
self.assertEqual(c.copy_sign(Decimal(1), -2), d)
self.assertEqual(c.copy_sign(1, Decimal(-2)), d)
self.assertRaises(TypeError, c.copy_sign, '1', -2)
self.assertRaises(TypeError, c.copy_sign, 1, '-2')
def test_divide(self):
c = Context()
d = c.divide(Decimal(1), Decimal(2))
self.assertEqual(c.divide(1, 2), d)
self.assertEqual(c.divide(Decimal(1), 2), d)
self.assertEqual(c.divide(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide, '1', 2)
self.assertRaises(TypeError, c.divide, 1, '2')
def test_divide_int(self):
c = Context()
d = c.divide_int(Decimal(1), Decimal(2))
self.assertEqual(c.divide_int(1, 2), d)
self.assertEqual(c.divide_int(Decimal(1), 2), d)
self.assertEqual(c.divide_int(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide_int, '1', 2)
self.assertRaises(TypeError, c.divide_int, 1, '2')
def test_divmod(self):
c = Context()
d = c.divmod(Decimal(1), Decimal(2))
self.assertEqual(c.divmod(1, 2), d)
self.assertEqual(c.divmod(Decimal(1), 2), d)
self.assertEqual(c.divmod(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divmod, '1', 2)
self.assertRaises(TypeError, c.divmod, 1, '2')
def test_exp(self):
c = Context()
d = c.exp(Decimal(10))
self.assertEqual(c.exp(10), d)
self.assertRaises(TypeError, c.exp, '10')
def test_fma(self):
c = Context()
d = c.fma(Decimal(2), Decimal(3), Decimal(4))
self.assertEqual(c.fma(2, 3, 4), d)
self.assertEqual(c.fma(Decimal(2), 3, 4), d)
self.assertEqual(c.fma(2, Decimal(3), 4), d)
self.assertEqual(c.fma(2, 3, Decimal(4)), d)
self.assertEqual(c.fma(Decimal(2), Decimal(3), 4), d)
self.assertRaises(TypeError, c.fma, '2', 3, 4)
self.assertRaises(TypeError, c.fma, 2, '3', 4)
self.assertRaises(TypeError, c.fma, 2, 3, '4')
def test_is_finite(self):
c = Context()
d = c.is_finite(Decimal(10))
self.assertEqual(c.is_finite(10), d)
self.assertRaises(TypeError, c.is_finite, '10')
def test_is_infinite(self):
c = Context()
d = c.is_infinite(Decimal(10))
self.assertEqual(c.is_infinite(10), d)
self.assertRaises(TypeError, c.is_infinite, '10')
def test_is_nan(self):
c = Context()
d = c.is_nan(Decimal(10))
self.assertEqual(c.is_nan(10), d)
self.assertRaises(TypeError, c.is_nan, '10')
def test_is_normal(self):
c = Context()
d = c.is_normal(Decimal(10))
self.assertEqual(c.is_normal(10), d)
self.assertRaises(TypeError, c.is_normal, '10')
def test_is_qnan(self):
c = Context()
d = c.is_qnan(Decimal(10))
self.assertEqual(c.is_qnan(10), d)
self.assertRaises(TypeError, c.is_qnan, '10')
def test_is_signed(self):
c = Context()
d = c.is_signed(Decimal(10))
self.assertEqual(c.is_signed(10), d)
self.assertRaises(TypeError, c.is_signed, '10')
def test_is_snan(self):
c = Context()
d = c.is_snan(Decimal(10))
self.assertEqual(c.is_snan(10), d)
self.assertRaises(TypeError, c.is_snan, '10')
def test_is_subnormal(self):
c = Context()
d = c.is_subnormal(Decimal(10))
self.assertEqual(c.is_subnormal(10), d)
self.assertRaises(TypeError, c.is_subnormal, '10')
def test_is_zero(self):
c = Context()
d = c.is_zero(Decimal(10))
self.assertEqual(c.is_zero(10), d)
self.assertRaises(TypeError, c.is_zero, '10')
def test_ln(self):
c = Context()
d = c.ln(Decimal(10))
self.assertEqual(c.ln(10), d)
self.assertRaises(TypeError, c.ln, '10')
def test_log10(self):
c = Context()
d = c.log10(Decimal(10))
self.assertEqual(c.log10(10), d)
self.assertRaises(TypeError, c.log10, '10')
def test_logb(self):
c = Context()
d = c.logb(Decimal(10))
self.assertEqual(c.logb(10), d)
self.assertRaises(TypeError, c.logb, '10')
def test_logical_and(self):
c = Context()
d = c.logical_and(Decimal(1), Decimal(1))
self.assertEqual(c.logical_and(1, 1), d)
self.assertEqual(c.logical_and(Decimal(1), 1), d)
self.assertEqual(c.logical_and(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_and, '1', 1)
self.assertRaises(TypeError, c.logical_and, 1, '1')
def test_logical_invert(self):
c = Context()
d = c.logical_invert(Decimal(1000))
self.assertEqual(c.logical_invert(1000), d)
self.assertRaises(TypeError, c.logical_invert, '1000')
def test_logical_or(self):
c = Context()
d = c.logical_or(Decimal(1), Decimal(1))
self.assertEqual(c.logical_or(1, 1), d)
self.assertEqual(c.logical_or(Decimal(1), 1), d)
self.assertEqual(c.logical_or(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_or, '1', 1)
self.assertRaises(TypeError, c.logical_or, 1, '1')
def test_logical_xor(self):
c = Context()
d = c.logical_xor(Decimal(1), Decimal(1))
self.assertEqual(c.logical_xor(1, 1), d)
self.assertEqual(c.logical_xor(Decimal(1), 1), d)
self.assertEqual(c.logical_xor(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_xor, '1', 1)
self.assertRaises(TypeError, c.logical_xor, 1, '1')
def test_max(self):
c = Context()
d = c.max(Decimal(1), Decimal(2))
self.assertEqual(c.max(1, 2), d)
self.assertEqual(c.max(Decimal(1), 2), d)
self.assertEqual(c.max(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max, '1', 2)
self.assertRaises(TypeError, c.max, 1, '2')
def test_max_mag(self):
c = Context()
d = c.max_mag(Decimal(1), Decimal(2))
self.assertEqual(c.max_mag(1, 2), d)
self.assertEqual(c.max_mag(Decimal(1), 2), d)
self.assertEqual(c.max_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max_mag, '1', 2)
self.assertRaises(TypeError, c.max_mag, 1, '2')
def test_min(self):
c = Context()
d = c.min(Decimal(1), Decimal(2))
self.assertEqual(c.min(1, 2), d)
self.assertEqual(c.min(Decimal(1), 2), d)
self.assertEqual(c.min(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min, '1', 2)
self.assertRaises(TypeError, c.min, 1, '2')
def test_min_mag(self):
c = Context()
d = c.min_mag(Decimal(1), Decimal(2))
self.assertEqual(c.min_mag(1, 2), d)
self.assertEqual(c.min_mag(Decimal(1), 2), d)
self.assertEqual(c.min_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min_mag, '1', 2)
self.assertRaises(TypeError, c.min_mag, 1, '2')
def test_minus(self):
c = Context()
d = c.minus(Decimal(10))
self.assertEqual(c.minus(10), d)
self.assertRaises(TypeError, c.minus, '10')
def test_multiply(self):
c = Context()
d = c.multiply(Decimal(1), Decimal(2))
self.assertEqual(c.multiply(1, 2), d)
self.assertEqual(c.multiply(Decimal(1), 2), d)
self.assertEqual(c.multiply(1, Decimal(2)), d)
self.assertRaises(TypeError, c.multiply, '1', 2)
self.assertRaises(TypeError, c.multiply, 1, '2')
def test_next_minus(self):
c = Context()
d = c.next_minus(Decimal(10))
self.assertEqual(c.next_minus(10), d)
self.assertRaises(TypeError, c.next_minus, '10')
def test_next_plus(self):
c = Context()
d = c.next_plus(Decimal(10))
self.assertEqual(c.next_plus(10), d)
self.assertRaises(TypeError, c.next_plus, '10')
def test_next_toward(self):
c = Context()
d = c.next_toward(Decimal(1), Decimal(2))
self.assertEqual(c.next_toward(1, 2), d)
self.assertEqual(c.next_toward(Decimal(1), 2), d)
self.assertEqual(c.next_toward(1, Decimal(2)), d)
self.assertRaises(TypeError, c.next_toward, '1', 2)
self.assertRaises(TypeError, c.next_toward, 1, '2')
def test_normalize(self):
c = Context()
d = c.normalize(Decimal(10))
self.assertEqual(c.normalize(10), d)
self.assertRaises(TypeError, c.normalize, '10')
def test_number_class(self):
c = Context()
self.assertEqual(c.number_class(123), c.number_class(Decimal(123)))
self.assertEqual(c.number_class(0), c.number_class(Decimal(0)))
self.assertEqual(c.number_class(-45), c.number_class(Decimal(-45)))
def test_power(self):
c = Context()
d = c.power(Decimal(1), Decimal(4), Decimal(2))
self.assertEqual(c.power(1, 4, 2), d)
self.assertEqual(c.power(Decimal(1), 4, 2), d)
self.assertEqual(c.power(1, Decimal(4), 2), d)
self.assertEqual(c.power(1, 4, Decimal(2)), d)
self.assertEqual(c.power(Decimal(1), Decimal(4), 2), d)
self.assertRaises(TypeError, c.power, '1', 4, 2)
self.assertRaises(TypeError, c.power, 1, '4', 2)
self.assertRaises(TypeError, c.power, 1, 4, '2')
def test_plus(self):
c = Context()
d = c.plus(Decimal(10))
self.assertEqual(c.plus(10), d)
self.assertRaises(TypeError, c.plus, '10')
def test_quantize(self):
c = Context()
d = c.quantize(Decimal(1), Decimal(2))
self.assertEqual(c.quantize(1, 2), d)
self.assertEqual(c.quantize(Decimal(1), 2), d)
self.assertEqual(c.quantize(1, Decimal(2)), d)
self.assertRaises(TypeError, c.quantize, '1', 2)
self.assertRaises(TypeError, c.quantize, 1, '2')
def test_remainder(self):
c = Context()
d = c.remainder(Decimal(1), Decimal(2))
self.assertEqual(c.remainder(1, 2), d)
self.assertEqual(c.remainder(Decimal(1), 2), d)
self.assertEqual(c.remainder(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder, '1', 2)
self.assertRaises(TypeError, c.remainder, 1, '2')
def test_remainder_near(self):
c = Context()
d = c.remainder_near(Decimal(1), Decimal(2))
self.assertEqual(c.remainder_near(1, 2), d)
self.assertEqual(c.remainder_near(Decimal(1), 2), d)
self.assertEqual(c.remainder_near(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder_near, '1', 2)
self.assertRaises(TypeError, c.remainder_near, 1, '2')
def test_rotate(self):
c = Context()
d = c.rotate(Decimal(1), Decimal(2))
self.assertEqual(c.rotate(1, 2), d)
self.assertEqual(c.rotate(Decimal(1), 2), d)
self.assertEqual(c.rotate(1, Decimal(2)), d)
self.assertRaises(TypeError, c.rotate, '1', 2)
self.assertRaises(TypeError, c.rotate, 1, '2')
def test_sqrt(self):
c = Context()
d = c.sqrt(Decimal(10))
self.assertEqual(c.sqrt(10), d)
self.assertRaises(TypeError, c.sqrt, '10')
def test_same_quantum(self):
c = Context()
d = c.same_quantum(Decimal(1), Decimal(2))
self.assertEqual(c.same_quantum(1, 2), d)
self.assertEqual(c.same_quantum(Decimal(1), 2), d)
self.assertEqual(c.same_quantum(1, Decimal(2)), d)
self.assertRaises(TypeError, c.same_quantum, '1', 2)
self.assertRaises(TypeError, c.same_quantum, 1, '2')
def test_scaleb(self):
c = Context()
d = c.scaleb(Decimal(1), Decimal(2))
self.assertEqual(c.scaleb(1, 2), d)
self.assertEqual(c.scaleb(Decimal(1), 2), d)
self.assertEqual(c.scaleb(1, Decimal(2)), d)
self.assertRaises(TypeError, c.scaleb, '1', 2)
self.assertRaises(TypeError, c.scaleb, 1, '2')
def test_shift(self):
c = Context()
d = c.shift(Decimal(1), Decimal(2))
self.assertEqual(c.shift(1, 2), d)
self.assertEqual(c.shift(Decimal(1), 2), d)
self.assertEqual(c.shift(1, Decimal(2)), d)
self.assertRaises(TypeError, c.shift, '1', 2)
self.assertRaises(TypeError, c.shift, 1, '2')
def test_subtract(self):
c = Context()
d = c.subtract(Decimal(1), Decimal(2))
self.assertEqual(c.subtract(1, 2), d)
self.assertEqual(c.subtract(Decimal(1), 2), d)
self.assertEqual(c.subtract(1, Decimal(2)), d)
self.assertRaises(TypeError, c.subtract, '1', 2)
self.assertRaises(TypeError, c.subtract, 1, '2')
def test_to_eng_string(self):
c = Context()
d = c.to_eng_string(Decimal(10))
self.assertEqual(c.to_eng_string(10), d)
self.assertRaises(TypeError, c.to_eng_string, '10')
def test_to_sci_string(self):
c = Context()
d = c.to_sci_string(Decimal(10))
self.assertEqual(c.to_sci_string(10), d)
self.assertRaises(TypeError, c.to_sci_string, '10')
def test_to_integral_exact(self):
c = Context()
d = c.to_integral_exact(Decimal(10))
self.assertEqual(c.to_integral_exact(10), d)
self.assertRaises(TypeError, c.to_integral_exact, '10')
def test_to_integral_value(self):
c = Context()
d = c.to_integral_value(Decimal(10))
self.assertEqual(c.to_integral_value(10), d)
self.assertRaises(TypeError, c.to_integral_value, '10')
class WithStatementTest(unittest.TestCase):
# Can't do these as docstrings until Python 2.6
# as doctest can't handle __future__ statements
def test_localcontext(self):
# Use a copy of the current context in the block
orig_ctx = getcontext()
with localcontext() as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertIsNot(orig_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_localcontextarg(self):
# Use a copy of the supplied context in the block
orig_ctx = getcontext()
new_ctx = Context(prec=42)
with localcontext(new_ctx) as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertEqual(set_ctx.prec, new_ctx.prec, 'did not set correct context')
self.assertIsNot(new_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
class ContextFlags(unittest.TestCase):
def test_flags_irrelevant(self):
# check that the result (numeric result + flags raised) of an
# arithmetic operation doesn't depend on the current flags
context = Context(prec=9, Emin = -999999999, Emax = 999999999,
rounding=ROUND_HALF_EVEN, traps=[], flags=[])
# operations that raise various flags, in the form (function, arglist)
operations = [
(context._apply, [Decimal("100E-1000000009")]),
(context.sqrt, [Decimal(2)]),
(context.add, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.multiply, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.subtract, [Decimal("1.23456789"), Decimal("9.87654321")]),
]
# try various flags individually, then a whole lot at once
flagsets = [[Inexact], [Rounded], [Underflow], [Clamped], [Subnormal],
[Inexact, Rounded, Underflow, Clamped, Subnormal]]
for fn, args in operations:
# find answer and flags raised using a clean context
context.clear_flags()
ans = fn(*args)
flags = [k for k, v in context.flags.items() if v]
for extra_flags in flagsets:
# set flags, before calling operation
context.clear_flags()
for flag in extra_flags:
context._raise_error(flag)
new_ans = fn(*args)
# flags that we expect to be set after the operation
expected_flags = list(flags)
for flag in extra_flags:
if flag not in expected_flags:
expected_flags.append(flag)
expected_flags.sort(key=id)
# flags we actually got
new_flags = [k for k,v in context.flags.items() if v]
new_flags.sort(key=id)
self.assertEqual(ans, new_ans,
"operation produces different answers depending on flags set: " +
"expected %s, got %s." % (ans, new_ans))
self.assertEqual(new_flags, expected_flags,
"operation raises different flags depending on flags set: " +
"expected %s, got %s" % (expected_flags, new_flags))
def test_main(arith=False, verbose=None, todo_tests=None, debug=None):
""" Execute the tests.
Runs all arithmetic tests if arith is True or if the "decimal" resource
is enabled in regrtest.py
"""
init()
global TEST_ALL, DEBUG
TEST_ALL = arith or is_resource_enabled('decimal')
DEBUG = debug
if todo_tests is None:
test_classes = [
DecimalExplicitConstructionTest,
DecimalImplicitConstructionTest,
DecimalArithmeticOperatorsTest,
DecimalFormatTest,
DecimalUseOfContextTest,
DecimalUsabilityTest,
DecimalPythonAPItests,
ContextAPItests,
DecimalTest,
WithStatementTest,
ContextFlags
]
else:
test_classes = [DecimalTest]
# Dynamically build custom test definition for each file in the test
# directory and add the definitions to the DecimalTest class. This
# procedure insures that new files do not get skipped.
for filename in os.listdir(directory):
if '.decTest' not in filename or filename.startswith("."):
continue
head, tail = filename.split('.')
if todo_tests is not None and head not in todo_tests:
continue
tester = lambda self, f=filename: self.eval_file(directory + f)
setattr(DecimalTest, 'test_' + head, tester)
del filename, head, tail, tester
try:
run_unittest(*test_classes)
if todo_tests is None:
import decimal as DecimalModule
run_doctest(DecimalModule, verbose)
finally:
setcontext(ORIGINAL_CONTEXT)
if __name__ == '__main__':
import optparse
p = optparse.OptionParser("test_decimal.py [--debug] [{--skip | test1 [test2 [...]]}]")
p.add_option('--debug', '-d', action='store_true', help='shows the test number and context before each test')
p.add_option('--skip', '-s', action='store_true', help='skip over 90% of the arithmetic tests')
(opt, args) = p.parse_args()
if opt.skip:
test_main(arith=False, verbose=True)
elif args:
test_main(arith=True, verbose=True, todo_tests=args, debug=opt.debug)
else:
test_main(arith=True, verbose=True)
|
hafeez3000/zulip | refs/heads/master | zerver/lib/test_helpers.py | 114 | from django.test import TestCase
from zerver.lib.initial_password import initial_password
from zerver.lib.db import TimeTrackingCursor
from zerver.lib import cache
from zerver.lib import event_queue
from zerver.worker import queue_processors
from zerver.lib.actions import (
check_send_message, create_stream_if_needed, do_add_subscription,
get_display_recipient, get_user_profile_by_email,
)
from zerver.models import (
resolve_email_to_domain,
Client,
Message,
Realm,
Recipient,
Stream,
Subscription,
UserMessage,
)
import base64
import os
import re
import time
import ujson
import urllib
from contextlib import contextmanager
API_KEYS = {}
@contextmanager
def stub(obj, name, f):
old_f = getattr(obj, name)
setattr(obj, name, f)
yield
setattr(obj, name, old_f)
@contextmanager
def simulated_queue_client(client):
real_SimpleQueueClient = queue_processors.SimpleQueueClient
queue_processors.SimpleQueueClient = client
yield
queue_processors.SimpleQueueClient = real_SimpleQueueClient
@contextmanager
def tornado_redirected_to_list(lst):
real_event_queue_process_notification = event_queue.process_notification
event_queue.process_notification = lst.append
yield
event_queue.process_notification = real_event_queue_process_notification
@contextmanager
def simulated_empty_cache():
cache_queries = []
def my_cache_get(key, cache_name=None):
cache_queries.append(('get', key, cache_name))
return None
def my_cache_get_many(keys, cache_name=None):
cache_queries.append(('getmany', keys, cache_name))
return None
old_get = cache.cache_get
old_get_many = cache.cache_get_many
cache.cache_get = my_cache_get
cache.cache_get_many = my_cache_get_many
yield cache_queries
cache.cache_get = old_get
cache.cache_get_many = old_get_many
@contextmanager
def queries_captured():
'''
Allow a user to capture just the queries executed during
the with statement.
'''
queries = []
def wrapper_execute(self, action, sql, params=()):
start = time.time()
try:
return action(sql, params)
finally:
stop = time.time()
duration = stop - start
queries.append({
'sql': self.mogrify(sql, params),
'time': "%.3f" % duration,
})
old_execute = TimeTrackingCursor.execute
old_executemany = TimeTrackingCursor.executemany
def cursor_execute(self, sql, params=()):
return wrapper_execute(self, super(TimeTrackingCursor, self).execute, sql, params)
TimeTrackingCursor.execute = cursor_execute
def cursor_executemany(self, sql, params=()):
return wrapper_execute(self, super(TimeTrackingCursor, self).executemany, sql, params)
TimeTrackingCursor.executemany = cursor_executemany
yield queries
TimeTrackingCursor.execute = old_execute
TimeTrackingCursor.executemany = old_executemany
def find_key_by_email(address):
from django.core.mail import outbox
key_regex = re.compile("accounts/do_confirm/([a-f0-9]{40})>")
for message in reversed(outbox):
if address in message.to:
return key_regex.search(message.body).groups()[0]
def message_ids(result):
return set(message['id'] for message in result['messages'])
def message_stream_count(user_profile):
return UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
count()
def most_recent_usermessage(user_profile):
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('-message')
return query[0] # Django does LIMIT here
def most_recent_message(user_profile):
usermessage = most_recent_usermessage(user_profile)
return usermessage.message
def get_user_messages(user_profile):
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('message')
return [um.message for um in query]
class DummyObject:
pass
class DummyTornadoRequest:
def __init__(self):
self.connection = DummyObject()
self.connection.stream = DummyStream()
class DummyHandler(object):
def __init__(self, assert_callback):
self.assert_callback = assert_callback
self.request = DummyTornadoRequest()
# Mocks RequestHandler.async_callback, which wraps a callback to
# handle exceptions. We return the callback as-is.
def async_callback(self, cb):
return cb
def write(self, response):
raise NotImplemented
def zulip_finish(self, response, *ignore):
if self.assert_callback:
self.assert_callback(response)
class DummySession(object):
session_key = "0"
class DummyStream:
def closed(self):
return False
class POSTRequestMock(object):
method = "POST"
def __init__(self, post_data, user_profile, assert_callback=None):
self.REQUEST = self.POST = post_data
self.user = user_profile
self._tornado_handler = DummyHandler(assert_callback)
self.session = DummySession()
self._log_data = {}
self.META = {'PATH_INFO': 'test'}
self._log_data = {}
class AuthedTestCase(TestCase):
# Helper because self.client.patch annoying requires you to urlencode
def client_patch(self, url, info={}, **kwargs):
info = urllib.urlencode(info)
return self.client.patch(url, info, **kwargs)
def client_put(self, url, info={}, **kwargs):
info = urllib.urlencode(info)
return self.client.put(url, info, **kwargs)
def client_delete(self, url, info={}, **kwargs):
info = urllib.urlencode(info)
return self.client.delete(url, info, **kwargs)
def login(self, email, password=None):
if password is None:
password = initial_password(email)
return self.client.post('/accounts/login/',
{'username':email, 'password':password})
def register(self, username, password, domain="zulip.com"):
self.client.post('/accounts/home/',
{'email': username + "@" + domain})
return self.submit_reg_form_for_user(username, password, domain=domain)
def submit_reg_form_for_user(self, username, password, domain="zulip.com"):
"""
Stage two of the two-step registration process.
If things are working correctly the account should be fully
registered after this call.
"""
return self.client.post('/accounts/register/',
{'full_name': username, 'password': password,
'key': find_key_by_email(username + '@' + domain),
'terms': True})
def get_api_key(self, email):
if email not in API_KEYS:
API_KEYS[email] = get_user_profile_by_email(email).api_key
return API_KEYS[email]
def api_auth(self, email):
credentials = "%s:%s" % (email, self.get_api_key(email))
return {
'HTTP_AUTHORIZATION': 'Basic ' + base64.b64encode(credentials)
}
def get_streams(self, email):
"""
Helper function to get the stream names for a user
"""
user_profile = get_user_profile_by_email(email)
subs = Subscription.objects.filter(
user_profile = user_profile,
active = True,
recipient__type = Recipient.STREAM)
return [get_display_recipient(sub.recipient) for sub in subs]
def send_message(self, sender_name, recipient_list, message_type,
content="test content", subject="test", **kwargs):
sender = get_user_profile_by_email(sender_name)
if message_type == Recipient.PERSONAL:
message_type_name = "private"
else:
message_type_name = "stream"
if isinstance(recipient_list, basestring):
recipient_list = [recipient_list]
(sending_client, _) = Client.objects.get_or_create(name="test suite")
return check_send_message(
sender, sending_client, message_type_name, recipient_list, subject,
content, forged=False, forged_timestamp=None,
forwarder_user_profile=sender, realm=sender.realm, **kwargs)
def get_old_messages(self, anchor=1, num_before=100, num_after=100):
post_params = {"anchor": anchor, "num_before": num_before,
"num_after": num_after}
result = self.client.post("/json/get_old_messages", dict(post_params))
data = ujson.loads(result.content)
return data['messages']
def users_subscribed_to_stream(self, stream_name, realm_domain):
realm = Realm.objects.get(domain=realm_domain)
stream = Stream.objects.get(name=stream_name, realm=realm)
recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
subscriptions = Subscription.objects.filter(recipient=recipient, active=True)
return [subscription.user_profile for subscription in subscriptions]
def assert_json_success(self, result):
"""
Successful POSTs return a 200 and JSON of the form {"result": "success",
"msg": ""}.
"""
self.assertEqual(result.status_code, 200, result)
json = ujson.loads(result.content)
self.assertEqual(json.get("result"), "success")
# We have a msg key for consistency with errors, but it typically has an
# empty value.
self.assertIn("msg", json)
return json
def get_json_error(self, result, status_code=400):
self.assertEqual(result.status_code, status_code)
json = ujson.loads(result.content)
self.assertEqual(json.get("result"), "error")
return json['msg']
def assert_json_error(self, result, msg, status_code=400):
"""
Invalid POSTs return an error status code and JSON of the form
{"result": "error", "msg": "reason"}.
"""
self.assertEqual(self.get_json_error(result, status_code=status_code), msg)
def assert_length(self, queries, count, exact=False):
actual_count = len(queries)
if exact:
return self.assertTrue(actual_count == count,
"len(%s) == %s, != %s" % (queries, actual_count, count))
return self.assertTrue(actual_count <= count,
"len(%s) == %s, > %s" % (queries, actual_count, count))
def assert_json_error_contains(self, result, msg_substring):
self.assertIn(msg_substring, self.get_json_error(result))
def fixture_data(self, type, action, file_type='json'):
return open(os.path.join(os.path.dirname(__file__),
"../fixtures/%s/%s_%s.%s" % (type, type, action,file_type))).read()
# Subscribe to a stream directly
def subscribe_to_stream(self, email, stream_name, realm=None):
realm = Realm.objects.get(domain=resolve_email_to_domain(email))
stream, _ = create_stream_if_needed(realm, stream_name)
user_profile = get_user_profile_by_email(email)
do_add_subscription(user_profile, stream, no_log=True)
# Subscribe to a stream by making an API request
def common_subscribe_to_streams(self, email, streams, extra_post_data = {}, invite_only=False):
post_data = {'subscriptions': ujson.dumps([{"name": stream} for stream in streams]),
'invite_only': ujson.dumps(invite_only)}
post_data.update(extra_post_data)
result = self.client.post("/api/v1/users/me/subscriptions", post_data, **self.api_auth(email))
return result
def send_json_payload(self, email, url, payload, stream_name=None, **post_params):
if stream_name != None:
self.subscribe_to_stream(email, stream_name)
result = self.client.post(url, payload, **post_params)
self.assert_json_success(result)
# Check the correct message was sent
msg = Message.objects.filter().order_by('-id')[0]
self.assertEqual(msg.sender.email, email)
self.assertEqual(get_display_recipient(msg.recipient), stream_name)
return msg
|
adviti/melange | refs/heads/master | thirdparty/google_appengine/google/appengine/_internal/django/core/serializers/base.py | 23 | """
Module for abstract serializer/unserializer base classes.
"""
from StringIO import StringIO
from google.appengine._internal.django.db import models
from google.appengine._internal.django.utils.encoding import smart_str, smart_unicode
from google.appengine._internal.django.utils import datetime_safe
class SerializationError(Exception):
"""Something bad happened during serialization."""
pass
class DeserializationError(Exception):
"""Something bad happened during deserialization."""
pass
class Serializer(object):
"""
Abstract serializer base class.
"""
# Indicates if the implemented serializer is only available for
# internal Django use.
internal_use_only = False
def serialize(self, queryset, **options):
"""
Serialize a queryset.
"""
self.options = options
self.stream = options.pop("stream", StringIO())
self.selected_fields = options.pop("fields", None)
self.use_natural_keys = options.pop("use_natural_keys", False)
self.start_serialization()
for obj in queryset:
self.start_object(obj)
for field in obj._meta.local_fields:
if field.serialize:
if field.rel is None:
if self.selected_fields is None or field.attname in self.selected_fields:
self.handle_field(obj, field)
else:
if self.selected_fields is None or field.attname[:-3] in self.selected_fields:
self.handle_fk_field(obj, field)
for field in obj._meta.many_to_many:
if field.serialize:
if self.selected_fields is None or field.attname in self.selected_fields:
self.handle_m2m_field(obj, field)
self.end_object(obj)
self.end_serialization()
return self.getvalue()
def get_string_value(self, obj, field):
"""
Convert a field's value to a string.
"""
return smart_unicode(field.value_to_string(obj))
def start_serialization(self):
"""
Called when serializing of the queryset starts.
"""
raise NotImplementedError
def end_serialization(self):
"""
Called when serializing of the queryset ends.
"""
pass
def start_object(self, obj):
"""
Called when serializing of an object starts.
"""
raise NotImplementedError
def end_object(self, obj):
"""
Called when serializing of an object ends.
"""
pass
def handle_field(self, obj, field):
"""
Called to handle each individual (non-relational) field on an object.
"""
raise NotImplementedError
def handle_fk_field(self, obj, field):
"""
Called to handle a ForeignKey field.
"""
raise NotImplementedError
def handle_m2m_field(self, obj, field):
"""
Called to handle a ManyToManyField.
"""
raise NotImplementedError
def getvalue(self):
"""
Return the fully serialized queryset (or None if the output stream is
not seekable).
"""
if callable(getattr(self.stream, 'getvalue', None)):
return self.stream.getvalue()
class Deserializer(object):
"""
Abstract base deserializer class.
"""
def __init__(self, stream_or_string, **options):
"""
Init this serializer given a stream or a string
"""
self.options = options
if isinstance(stream_or_string, basestring):
self.stream = StringIO(stream_or_string)
else:
self.stream = stream_or_string
# hack to make sure that the models have all been loaded before
# deserialization starts (otherwise subclass calls to get_model()
# and friends might fail...)
models.get_apps()
def __iter__(self):
return self
def next(self):
"""Iteration iterface -- return the next item in the stream"""
raise NotImplementedError
class DeserializedObject(object):
"""
A deserialized model.
Basically a container for holding the pre-saved deserialized data along
with the many-to-many data saved with the object.
Call ``save()`` to save the object (with the many-to-many data) to the
database; call ``save(save_m2m=False)`` to save just the object fields
(and not touch the many-to-many stuff.)
"""
def __init__(self, obj, m2m_data=None):
self.object = obj
self.m2m_data = m2m_data
def __repr__(self):
return "<DeserializedObject: %s.%s(pk=%s)>" % (
self.object._meta.app_label, self.object._meta.object_name, self.object.pk)
def save(self, save_m2m=True, using=None):
# Call save on the Model baseclass directly. This bypasses any
# model-defined save. The save is also forced to be raw.
# This ensures that the data that is deserialized is literally
# what came from the file, not post-processed by pre_save/save
# methods.
models.Model.save_base(self.object, using=using, raw=True)
if self.m2m_data and save_m2m:
for accessor_name, object_list in self.m2m_data.items():
setattr(self.object, accessor_name, object_list)
# prevent a second (possibly accidental) call to save() from saving
# the m2m data twice.
self.m2m_data = None
|
interlegis/sapl | refs/heads/3.1.x | sapl/protocoloadm/migrations/0030_auto_20200114_1121.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2020-01-14 14:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('protocoloadm', '0029_merge_20191209_1531'),
]
operations = [
migrations.AlterField(
model_name='documentoadministrativo',
name='ano',
field=models.PositiveSmallIntegerField(choices=[(2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900), (1899, 1899), (1898, 1898), (1897, 1897), (1896, 1896), (1895, 1895), (1894, 1894), (1893, 1893), (1892, 1892), (1891, 1891), (1890, 1890)], verbose_name='Ano'),
),
migrations.AlterField(
model_name='protocolo',
name='ano',
field=models.PositiveSmallIntegerField(choices=[(2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900), (1899, 1899), (1898, 1898), (1897, 1897), (1896, 1896), (1895, 1895), (1894, 1894), (1893, 1893), (1892, 1892), (1891, 1891), (1890, 1890)], verbose_name='Ano do Protocolo'),
),
]
|
jinzekid/codehub | refs/heads/master | python/test_gui/TaskManager/MyWindow.py | 1 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MyWindow.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import *
from PyQt5.QtCore import QThread, pyqtSignal, QObject, QDateTime
from SetTaskDetail import *
from TaskManager import *
class BackendThread(QObject):
update_date = pyqtSignal(str)
def run(self):
while True:
data = QDateTime.currentDateTime()
currTime = data.toString('yyyy-MM-dd hh:mm:ss')
self.update_date.emit(str(currTime))
time.sleep(1)
class childWindow(QDialog):
def __init__(self):
QDialog.__init__(self)
self.child=Ui_QDSetTaskDetail()
self.child.setupUi(self)
self.child.initUIAction()
"""对QDialog类重写,实现一些功能"""
def closeEvent(self, event):
"""
重写closeEvent方法,实现dialog窗体关闭时执行一些代码
:param event: close()触发的事件
:return: None
"""
if self.child.finish_set():
event.accept()
else:
reply = QtWidgets.QMessageBox.question(self,
'本程序',
"是否取消任务设置?",
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
event.accept()
else:
event.ignore()
def close_dialog(self):
self.close()
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
self.centralwidget = QtWidgets.QWidget(MainWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setObjectName("centralwidget")
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setGeometry(QtCore.QRect(0, 60, 801, 16))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.horizontalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(0, 85, 801, 471))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.line_2 = QtWidgets.QFrame(self.horizontalLayoutWidget)
self.line_2.setFrameShape(QtWidgets.QFrame.VLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.horizontalLayout.addWidget(self.line_2)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(self.horizontalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(15)
self.label.setFont(font)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.line_4 = QtWidgets.QFrame(self.horizontalLayoutWidget)
self.line_4.setFrameShape(QtWidgets.QFrame.HLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.gridLayout.addWidget(self.line_4, 2, 0, 1, 1)
self.tabOfTasks = QtWidgets.QTableWidget(self.horizontalLayoutWidget)
self.tabOfTasks.setObjectName("tabOfTasks")
self.tabOfTasks.setColumnCount(3)
self.tabOfTasks.setHorizontalHeaderLabels(['Task Name', 'Left Time', 'Options'])
self.tabOfTasks.setRowCount(0)
# 添加多选框
'''
comb = QtWidgets.QComboBox()
comb.addItem('Famle')
comb.addItem('Male')
comb.setCurrentIndex(0)
self.tabOfTasks.setCellWidget(rowCnt, 1, comb)
'''
# 添加勾选按钮
'''
checkBox = QtWidgets.QTableWidgetItem()
checkBox.setCheckState(Qt.Unchecked)
checkBox.setText('勾选启用')
self.tabOfTasks.setItem(rowCnt, 0 , item)
self.tabOfTasks.setItem(rowCnt, 1, checkBox)
'''
self.gridLayout.addWidget(self.tabOfTasks, 3, 0, 1, 1)
self.horizontalLayout.addLayout(self.gridLayout)
self.line_3 = QtWidgets.QFrame(self.horizontalLayoutWidget)
self.line_3.setFrameShape(QtWidgets.QFrame.VLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.horizontalLayout.addWidget(self.line_3)
self.horizontalLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget)
self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(10, 10, 781, 41))
self.horizontalLayoutWidget_2.setObjectName("horizontalLayoutWidget_2")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_2)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.btnAddNewTask = QtWidgets.QPushButton(self.horizontalLayoutWidget_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnAddNewTask.sizePolicy().hasHeightForWidth())
self.btnAddNewTask.setSizePolicy(sizePolicy)
self.btnAddNewTask.setObjectName("btnAddNewTask")
self.horizontalLayout_2.addWidget(self.btnAddNewTask)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actiontest = QtWidgets.QAction(MainWindow)
self.actiontest.setObjectName("actiontest")
self.actiondakai = QtWidgets.QAction(MainWindow)
self.actiondakai.setObjectName("actiondakai")
self.actionOpen = QtWidgets.QAction(MainWindow)
self.actionOpen.setObjectName("actionOpen")
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.ui_set_task_detail = childWindow()
self.selectedTasks = []
# 初始化全局任务管理器
TaskManager().init_taskManager(self.refresh_del_task)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "任务列表:"))
self.btnAddNewTask.setText(_translate("MainWindow", "新增任务"))
self.actiontest.setText(_translate("MainWindow", "test"))
self.actiondakai.setText(_translate("MainWindow", "dakai "))
self.actionOpen.setText(_translate("MainWindow", "Open..."))
def initUIAction(self):
# 关联新增任务按钮事件
self.btnAddNewTask.clicked.connect(self.add_new_task_detail)
# 设置list的选择模式为多选
self.tabOfTasks.setSelectionMode(
QAbstractItemView.ExtendedSelection)
# 设置list的选项动作
#self.tabOfTasks.itemClicked.connect(self.clicked_items)
taskManager = TaskManager()
# 连接信号槽
taskManager.update_date.connect(self.refresh_status_task)
self.thread = QThread()
taskManager.moveToThread(self.thread)
# 开始线程
self.thread.started.connect(taskManager.run)
self.thread.start()
def clicked_items(self):
print(self.tabOfTasks.currentItem())
indexs = self.tabOfTasks.selectedIndexes()
items = self.tabOfTasks.selectedItems()
self.selectedTasks.clear()
#self.show_set_new_task_detail()
'''
for item in items:
print("task name:" + item.text())
self.selectedTasks.append(item.text())
break
'''
# 新增任务
def add_new_task_detail(self):
self.ui_set_task_detail.child.new_task_detail(self.refresh_new_task)
self.ui_set_task_detail.setModal(True)
self.ui_set_task_detail.show()
# 显示任务详情,用于修改任务内容
def show_task_detail(self, task):
print('task name:' + task.name)
self.ui_set_task_detail.child.init_task_detail_info(task)
self.ui_set_task_detail.setModal(True)
self.ui_set_task_detail.show()
# 在主界面刷洗新增的任务到列表中
def refresh_new_task(self, newTask):
print("task name: " + newTask.name)
#rowCnt = self.tabOfTasks.rowCount()
#item = QtWidgets.QTableWidgetItem(newTask.name)
#self.tabOfTasks.setItem(rowCnt, 0, item)
#self.tabOfTasks.setRowCount(rowCnt+1)
#cbox = QtWidgets.QCheckBox()
#cbox.setText(newTask.name)
#self.tabOfTasks.addItem(item)
#btn = QtWidgets.QPushButton()
#btn.setText('删除')
#self.tabOfTasks.setItemWidget(item, btn)
rowCnt = self.tabOfTasks.rowCount()
self.tabOfTasks.setRowCount(rowCnt + 1)
# 设置任务名称
item = QtWidgets.QTableWidgetItem(newTask.name)
self.tabOfTasks.setItem(rowCnt, 0 , item)
# 设置任务倒计时.
item = QtWidgets.QTableWidgetItem(utils.format_time(newTask.leftTime))
self.tabOfTasks.setItem(rowCnt, 1, item)
print('init item address:' + str(id(item)))
"""
# 连接信号槽
newTask.update_date.connect(self.refresh_status_task)
self.thread = QThread()
newTask.moveToThread(self.thread)
# 开始线程
self.thread.started.connect(newTask.run)
self.thread.start()
"""
"""
self.backend = BackendThread()
self.backend.update_date.connect(self.refresh_status_task)
self.thread = QThread()
self.backend.moveToThread(self.thread)
self.thread.started.connect(self.backend.run)
self.thread.start()
"""
# 设置任务操作 添加多个按钮
checkBtn = QtWidgets.QPushButton('查看')
checkBtn.clicked.connect(lambda:self.check_task(newTask, newTask.taskToken))
delBtn = QtWidgets.QPushButton('删除')
delBtn.clicked.connect(lambda:self.delete_task(newTask.taskToken))
widget = QtWidgets.QWidget()
hLayout = QtWidgets.QHBoxLayout()
hLayout.addWidget(checkBtn)
hLayout.addWidget(delBtn)
widget.setLayout(hLayout)
self.tabOfTasks.setCellWidget(rowCnt, 2, widget)
pass
def delete_task(self, taskToken):
print('del task token:' + taskToken)
tm = TaskManager()
tm.destory_task(taskToken)
def check_task(self, task, taskToken):
print('check task token:' + taskToken)
self.show_task_detail(task)
def mutil_box_selected(self):
nCount = self.tabOfTasks
# 列表内添加按钮
def buttonForRow(self,id):
widget=QtWidgets()
# 修改
updateBtn = QPushButton('修改')
updateBtn.setStyleSheet(''' text-align : center;
background-color : NavajoWhite;
height : 30px;
border-style: outset;
font : 13px ''')
#updateBtn.clicked.connect(lambda:self.updateTable(id))
# 查看
viewBtn = QPushButton('查看')
viewBtn.setStyleSheet(''' text-align : center;
background-color : DarkSeaGreen;
height : 30px;
border-style: outset;
font : 13px; ''')
#viewBtn.clicked.connect(lambda: self.viewTable(id))
# 删除
deleteBtn = QPushButton('删除')
deleteBtn.setStyleSheet(''' text-align : center;
background-color : LightCoral;
height : 30px;
border-style: outset;
font : 13px; ''')
hLayout = QHBoxLayout()
hLayout.addWidget(updateBtn)
hLayout.addWidget(viewBtn)
hLayout.addWidget(deleteBtn)
hLayout.setContentsMargins(5,2,5,2)
widget.setLayout(hLayout)
return widget
# 删除列表中已经结束的任务
def refresh_del_task(self, index, delTask):
self.tabOfTasks.removeRow(index)
pass
# 刷新任务的倒计时时间
def refresh_status_task(self, index, leftTime):
item = self.tabOfTasks.item(index, 1)
item.setText(leftTime)
#item.setText(utils.format_time(refTask.leftTime))
#print('refresh item address:' + str(id(item))+ ',text=' + str(item.text()))
#print('left time:' + utils.format_time(refTask.leftTime))
#print('MainThread:' + MainThread)
#item = QtWidgets.QTableWidgetItem(utils.format_time(refTask.leftTime))
#self.tabOfTasks.setItem(index, 1, item)
pass
if __name__ == '__main__':
app = QApplication(sys.argv)
MainWindow = QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
ui.initUIAction()
MainWindow.show()
sys.exit(app.exec_())
|
wrouesnel/ansible | refs/heads/devel | test/sanity/code-smell/boilerplate.py | 1 | #!/usr/bin/env python
import sys
def main():
skip = set([
'lib/ansible/compat/selectors/_selectors2.py',
'lib/ansible/module_utils/six/_six.py',
'setup.py',
])
prune = [
'contrib/inventory/',
'contrib/vault/',
'docs/',
'examples/',
'hacking/',
'lib/ansible/module_utils/',
'lib/ansible/modules/cloud/amazon/',
'lib/ansible/modules/cloud/cloudstack/',
'lib/ansible/modules/cloud/ovirt/',
'lib/ansible/modules/network/aos/',
'lib/ansible/modules/network/avi/',
'lib/ansible/modules/network/cloudengine/',
'lib/ansible/modules/network/eos/',
'lib/ansible/modules/network/ios/',
'lib/ansible/modules/network/netvisor/',
'lib/ansible/modules/network/nxos/',
'lib/ansible/modules/network/panos/',
'lib/ansible/modules/network/vyos/',
'lib/ansible/modules/windows/',
'lib/ansible/utils/module_docs_fragments/',
'test/'
]
for path in sys.argv[1:]:
if path in skip:
continue
if any(path.startswith(p) for p in prune):
continue
with open(path, 'r') as path_fd:
future_ok = None
metaclass_ok = None
lines = path_fd.read().splitlines()
if not lines:
continue
for line, text in enumerate(lines):
if text in ('from __future__ import (absolute_import, division, print_function)',
'from __future__ import absolute_import, division, print_function'):
future_ok = line
if text == '__metaclass__ = type':
metaclass_ok = line
if future_ok and metaclass_ok:
break
if future_ok is None:
print('%s: missing: from __future__ import (absolute_import, division, print_function)' % path)
if metaclass_ok is None:
print('%s: missing: __metaclass__ = type' % path)
if __name__ == '__main__':
main()
|
Endika/website-addons | refs/heads/8.0 | website_sale_order/__openerp__.py | 5 | {
'name' : 'Simplified website checkout',
'version' : '1.0.0',
'author' : 'IT-Projects LLC, Ivan Yelizariev',
'license': 'GPL-3',
'category' : 'Sale',
'website' : 'https://yelizariev.github.io',
'depends' : [
'website_sale',
],
'data':[
'website_sale_order_templates.xml',
],
'installable': True
}
|
nnupurjoshii/scrapping_task | refs/heads/master | runner.py | 1 | import requests
import redis
import time
import bs4
import json
from constants import REDIS_KEY,JSON_URL
# this code is the scrapper worker which is responsible for
# getting the content and storing it in redis
class Scrapper(object):
def __init__(self, interval,url,scrapper,json=True):
self.interval = interval
self.url = url
self.json = True
self.redis = redis.Redis() # going with default port
self.scrapper = scrapper
def get_page(self,params=None):
# this can be used in the future for more interpolated urls
params = {} if params == None else params
params["cat"] = "G"
url = self.url.format(**params)
print("getting content from {}".format(url))
response = requests.get(url)
return response.json() if self.json else response.content
def run(self):
while True:
print('sleeping for {} s.'.format(self.interval))
time.sleep(self.interval)
content = self.get_page()
listing = self.scrap(content)
try:
self.send_to_redis(listing)
except Exception as e:
print("error occured while sending data",e)
def scrap(self,content):
return self.scrapper(content)
def send_to_redis(self,listing):
print("sending data to redis at key {}".format(REDIS_KEY))
self.redis.set(REDIS_KEY,json.dumps(listing))
def jsonScrapper(json):
return json["data"]
if __name__ =="__main__":
scrapper = Scrapper(5,JSON_URL,jsonScrapper)
scrapper.run()
|
xuxiao19910803/edx | refs/heads/master | common/djangoapps/cache_toolbox/middleware.py | 211 | """
Cache-backed ``AuthenticationMiddleware``
-----------------------------------------
``CacheBackedAuthenticationMiddleware`` is an
``django.contrib.auth.middleware.AuthenticationMiddleware`` replacement to
avoid querying the database for a ``User`` instance in each request.
Whilst the built-in ``AuthenticationMiddleware`` mechanism will only obtain the
``User`` instance when it is required, the vast majority of sites will do so on
every page to render "Logged in as 'X'" text as well to evaluate the result of
``user.is_authenticated()`` and ``user.is_superuser`` to provide conditional
functionality.
This middleware eliminates the cost of retrieving this ``User`` instance by
caching it using the ``cache_toolbox`` instance caching mechanisms.
Depending on your average number of queries per page, saving one query per
request can---in aggregate---reduce load on your database. In addition,
avoiding the database entirely for pages can avoid incurring any connection
latency in your environment, resulting in faster page loads for your users.
Saving this data in the cache can also be used as a way of authenticating users
in systems outside of Django that should not access your database. For
example, a "maintenance mode" page would be able to render a personalised
message without touching the database at all but rather authenticating via the
cache.
``CacheBackedAuthenticationMiddleware`` is ``AUTHENTICATION_BACKENDS`` agnostic.
Implementation
~~~~~~~~~~~~~~
The cache and session backends are still accessed on each request - we are
simply assuming that they are cheaper (or otherwise more preferable) to access
than your database. (In the future, signed cookies may allow us to avoid this
lookup altogether -- whilst we could not safely save ``User.password`` in a
cookie, we could use delayed loading to pull it out when needed.)
Another alternative solution would be to store the attributes in the user's
session instead of in the cache. This would save the cache hit on every request
as all the relevant data would be pulled in one go from the session backend.
However, this has two main disadvantages:
* Session keys are not deterministic -- after making changes to an
``auth_user`` row in the database, you cannot determine the user's session
key to flush the now out-of-sync data (and doing so would log them out
anyway).
* Stores data per-session rather than per-user -- if a user logs in from
multiple computers the data is duplicated in each session. This problem is
compounded by most projects wishing to avoid expiring session data as long
as possible (in addition to storing sessions in persistent stores).
Usage
~~~~~
To use, find ``MIDDLEWARE_CLASSES`` in your ``settings.py`` and replace::
MIDDLEWARE_CLASSES = [
...
'django.contrib.auth.middleware.AuthenticationMiddleware',
...
]
with::
MIDDLEWARE_CLASSES = [
...
'cache_toolbox.middleware.CacheBackedAuthenticationMiddleware',
...
]
You should confirm you are using a ``SESSION_ENGINE`` that doesn't query the
database for each request. The built-in ``cached_db`` engine is the safest
choice for most environments but you may be happy with the trade-offs of the
``memcached`` backend - see the Django documentation for more details.
"""
from django.contrib.auth import SESSION_KEY
from django.contrib.auth.models import User
from django.contrib.auth.middleware import AuthenticationMiddleware
from .model import cache_model
class CacheBackedAuthenticationMiddleware(AuthenticationMiddleware):
def __init__(self):
cache_model(User)
def process_request(self, request):
try:
# Try and construct a User instance from data stored in the cache
request.user = User.get_cached(request.session[SESSION_KEY])
except:
# Fallback to constructing the User from the database.
super(CacheBackedAuthenticationMiddleware, self).process_request(request)
|
LeonardCohen/coding | refs/heads/master | py/4.2_Delegation_Iteration.py | 1 | class Node:
def __init__(self, value):
self._value = value
self._children = []
def __repr__(self):
return 'Node({!r})'.format(self._value)
def add_child(self, node):
self._children.append(node)
def __iter__(self):
return iter(self._children)
if __name__ == '__main__':
root = Node(0)
child1 = Node(1)
child2 = Node(2)
root.add_child(child1)
root.add_child(child2)
for ch in root:
print(ch)
|
osh/gnuradio | refs/heads/master | gr-analog/python/analog/am_demod.py | 58 | #
# Copyright 2006,2007,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
class am_demod_cf(gr.hier_block2):
"""
Generalized AM demodulation block with audio filtering.
This block demodulates a band-limited, complex down-converted AM
channel into the the original baseband signal, applying low pass
filtering to the audio output. It produces a float stream in the
range [-1.0, +1.0].
Args:
channel_rate: incoming sample rate of the AM baseband (integer)
audio_decim: input to output decimation rate (integer)
audio_pass: audio low pass filter passband frequency (float)
audio_stop: audio low pass filter stop frequency (float)
"""
def __init__(self, channel_rate, audio_decim, audio_pass, audio_stop):
gr.hier_block2.__init__(self, "am_demod_cf",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_float)) # Input signature
MAG = blocks.complex_to_mag()
DCR = blocks.add_const_ff(-1.0)
audio_taps = filter.optfir.low_pass(0.5, # Filter gain
channel_rate, # Sample rate
audio_pass, # Audio passband
audio_stop, # Audio stopband
0.1, # Passband ripple
60) # Stopband attenuation
LPF = filter.fir_filter_fff(audio_decim, audio_taps)
self.connect(self, MAG, DCR, LPF, self)
class demod_10k0a3e_cf(am_demod_cf):
"""
AM demodulation block, 10 KHz channel.
This block demodulates an AM channel conformant to 10K0A3E emission
standards, such as broadcast band AM transmissions.
Args:
channel_rate: incoming sample rate of the AM baseband (integer)
audio_decim: input to output decimation rate (integer)
"""
def __init__(self, channel_rate, audio_decim):
am_demod_cf.__init__(self, channel_rate, audio_decim,
5000, # Audio passband
5500) # Audio stopband
|
google-research/google-research | refs/heads/master | hyperbolic/models/__init__.py | 1 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CF embedding models."""
from hyperbolic.models.euclidean import DistE
from hyperbolic.models.euclidean import OperEmbE
from hyperbolic.models.euclidean import SMFactor
from hyperbolic.models.hyperbolic import DistH
from hyperbolic.models.hyperbolic import OperEmbH
|
asrashley/django-addressbook | refs/heads/master | demo/urls.py | 1 | from django.conf.urls.defaults import *
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^intranet/', include('intranet.foo.urls')),
(r'^$', 'views.index_view'),
# login page
url(r'^login/$', 'django.contrib.auth.views.login', name='login'),
# logout page
(r'^logout/$', 'django.contrib.auth.views.logout'),
(r'^addressbook/', include('addressbook.urls')),
)
if settings.DEV_SITE:
urlpatterns += patterns('',
(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
)
|
inyaka/floro | refs/heads/master | companies/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
sysadmin75/ansible | refs/heads/devel | lib/ansible/utils/unsafe_proxy.py | 15 | # PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
# analyze, test, perform and/or display publicly, prepare derivative works,
# distribute, and otherwise use Python alone or in any derivative version,
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved" are
# retained in Python alone or in any derivative version prepared by Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
#
# Original Python Recipe for Proxy:
# http://code.activestate.com/recipes/496741-object-proxying/
# Author: Tomer Filiba
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.common._collections_compat import Mapping, Set
from ansible.module_utils.common.collections import is_sequence
from ansible.module_utils.six import string_types, binary_type, text_type
__all__ = ['AnsibleUnsafe', 'wrap_var']
class AnsibleUnsafe(object):
__UNSAFE__ = True
class AnsibleUnsafeBytes(binary_type, AnsibleUnsafe):
def decode(self, *args, **kwargs):
"""Wrapper method to ensure type conversions maintain unsafe context"""
return AnsibleUnsafeText(super(AnsibleUnsafeBytes, self).decode(*args, **kwargs))
class AnsibleUnsafeText(text_type, AnsibleUnsafe):
def encode(self, *args, **kwargs):
"""Wrapper method to ensure type conversions maintain unsafe context"""
return AnsibleUnsafeBytes(super(AnsibleUnsafeText, self).encode(*args, **kwargs))
class UnsafeProxy(object):
def __new__(cls, obj, *args, **kwargs):
from ansible.utils.display import Display
Display().deprecated(
'UnsafeProxy is being deprecated. Use wrap_var or AnsibleUnsafeBytes/AnsibleUnsafeText directly instead',
version='2.13'
)
# In our usage we should only receive unicode strings.
# This conditional and conversion exists to sanity check the values
# we're given but we may want to take it out for testing and sanitize
# our input instead.
if isinstance(obj, AnsibleUnsafe):
return obj
if isinstance(obj, string_types):
obj = AnsibleUnsafeText(to_text(obj, errors='surrogate_or_strict'))
return obj
def _wrap_dict(v):
return dict((wrap_var(k), wrap_var(item)) for k, item in v.items())
def _wrap_sequence(v):
"""Wraps a sequence with unsafe, not meant for strings, primarily
``tuple`` and ``list``
"""
v_type = type(v)
return v_type(wrap_var(item) for item in v)
def _wrap_set(v):
return set(wrap_var(item) for item in v)
def wrap_var(v):
if v is None or isinstance(v, AnsibleUnsafe):
return v
if isinstance(v, Mapping):
v = _wrap_dict(v)
elif isinstance(v, Set):
v = _wrap_set(v)
elif is_sequence(v):
v = _wrap_sequence(v)
elif isinstance(v, binary_type):
v = AnsibleUnsafeBytes(v)
elif isinstance(v, text_type):
v = AnsibleUnsafeText(v)
return v
def to_unsafe_bytes(*args, **kwargs):
return wrap_var(to_bytes(*args, **kwargs))
def to_unsafe_text(*args, **kwargs):
return wrap_var(to_text(*args, **kwargs))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.