repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
sjlehtin/django
|
tests/foreign_object/test_empty_join.py
|
61
|
1476
|
from django.test import TestCase
from .models import SlugPage
class RestrictedConditionsTests(TestCase):
def setUp(self):
slugs = [
'a',
'a/a',
'a/b',
'a/b/a',
'x',
'x/y/z',
]
SlugPage.objects.bulk_create([SlugPage(slug=slug) for slug in slugs])
def test_restrictions_with_no_joining_columns(self):
"""
It's possible to create a working related field that doesn't
use any joining columns, as long as an extra restriction is supplied.
"""
a = SlugPage.objects.get(slug='a')
self.assertEqual(
[p.slug for p in SlugPage.objects.filter(ascendants=a)],
['a', 'a/a', 'a/b', 'a/b/a'],
)
self.assertEqual(
[p.slug for p in a.descendants.all()],
['a', 'a/a', 'a/b', 'a/b/a'],
)
aba = SlugPage.objects.get(slug='a/b/a')
self.assertEqual(
[p.slug for p in SlugPage.objects.filter(descendants__in=[aba])],
['a', 'a/b', 'a/b/a'],
)
self.assertEqual(
[p.slug for p in aba.ascendants.all()],
['a', 'a/b', 'a/b/a'],
)
def test_empty_join_conditions(self):
x = SlugPage.objects.get(slug='x')
message = "Join generated an empty ON clause."
with self.assertRaisesMessage(ValueError, message):
list(SlugPage.objects.filter(containers=x))
|
bsd-3-clause
|
lombritz/odoo
|
addons/report_webkit/header.py
|
342
|
4626
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Nicolas Bessi (Camptocamp)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
from openerp.osv import fields, osv
class HeaderHTML(osv.osv):
"""HTML Header allows you to define HTML CSS and Page format"""
_name = "ir.header_webkit"
_columns = {
'company_id' : fields.many2one('res.company', 'Company'),
'html' : fields.text('webkit header', help="Set Webkit Report Header"),
'footer_html' : fields.text('webkit footer', help="Set Webkit Report Footer."),
'css' : fields.text('Header CSS'),
'name' : fields.char('Name', required=True),
'margin_top' : fields.float('Top Margin (mm)'),
'margin_bottom' : fields.float('Bottom Margin (mm)'),
'margin_left' : fields.float('Left Margin (mm)'),
'margin_right' : fields.float('Right Margin (mm)'),
'orientation' : fields.selection(
[('Landscape','Landscape'),('Portrait', 'Portrait')],
'Orientation',
),
'format': fields.selection(
[
('A0' ,'A0 5 841 x 1189 mm'),
('A1' ,'A1 6 594 x 841 mm'),
('A2' ,'A2 7 420 x 594 mm'),
('A3' ,'A3 8 297 x 420 mm'),
('A4' ,'A4 0 210 x 297 mm, 8.26 x 11.69 inches'),
('A5' ,'A5 9 148 x 210 mm'),
('A6' ,'A6 10 105 x 148 mm'),
('A7' ,'A7 11 74 x 105 mm'),
('A8' ,'A8 12 52 x 74 mm'),
('A9' ,'A9 13 37 x 52 mm'),
('B0' ,'B0 14 1000 x 1414 mm'),
('B1' ,'B1 15 707 x 1000 mm'),
('B2' ,'B2 17 500 x 707 mm'),
('B3' ,'B3 18 353 x 500 mm'),
('B4' ,'B4 19 250 x 353 mm'),
('B5' ,'B5 1 176 x 250 mm, 6.93 x 9.84 inches'),
('B6' ,'B6 20 125 x 176 mm'),
('B7' ,'B7 21 88 x 125 mm'),
('B8' ,'B8 22 62 x 88 mm'),
('B9' ,'B9 23 33 x 62 mm'),
('B10',':B10 16 31 x 44 mm'),
('C5E','C5E 24 163 x 229 mm'),
('Comm10E','Comm10E 25 105 x 241 mm, U.S. Common 10 Envelope'),
('DLE', 'DLE 26 110 x 220 mm'),
('Executive','Executive 4 7.5 x 10 inches, 190.5 x 254 mm'),
('Folio','Folio 27 210 x 330 mm'),
('Ledger', 'Ledger 28 431.8 x 279.4 mm'),
('Legal', 'Legal 3 8.5 x 14 inches, 215.9 x 355.6 mm'),
('Letter','Letter 2 8.5 x 11 inches, 215.9 x 279.4 mm'),
('Tabloid', 'Tabloid 29 279.4 x 431.8 mm'),
],
'Paper size',
required=True,
help="Select Proper Paper size"
)
}
class HeaderImage(osv.osv):
"""Logo allows you to define multiple logo per company"""
_name = "ir.header_img"
_columns = {
'company_id' : fields.many2one('res.company', 'Company'),
'img' : fields.binary('Image'),
'name' : fields.char('Name', required =True, help="Name of Image"),
'type' : fields.char('Type', required =True, help="Image type(png,gif,jpeg)")
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
OpenAcademy-OpenStack/nova-scheduler
|
nova/version.py
|
15
|
2486
|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
from nova.openstack.common.gettextutils import _
NOVA_VENDOR = "OpenStack Foundation"
NOVA_PRODUCT = "OpenStack Nova"
NOVA_PACKAGE = None # OS distro package version suffix
loaded = False
version_info = pbr.version.VersionInfo('nova')
version_string = version_info.version_string
def _load_config():
# Don't load in global context, since we can't assume
# these modules are accessible when distutils uses
# this module
import ConfigParser
from oslo.config import cfg
from nova.openstack.common import log as logging
global loaded, NOVA_VENDOR, NOVA_PRODUCT, NOVA_PACKAGE
if loaded:
return
loaded = True
cfgfile = cfg.CONF.find_file("release")
if cfgfile is None:
return
try:
cfg = ConfigParser.RawConfigParser()
cfg.read(cfgfile)
NOVA_VENDOR = cfg.get("Nova", "vendor")
if cfg.has_option("Nova", "vendor"):
NOVA_VENDOR = cfg.get("Nova", "vendor")
NOVA_PRODUCT = cfg.get("Nova", "product")
if cfg.has_option("Nova", "product"):
NOVA_PRODUCT = cfg.get("Nova", "product")
NOVA_PACKAGE = cfg.get("Nova", "package")
if cfg.has_option("Nova", "package"):
NOVA_PACKAGE = cfg.get("Nova", "package")
except Exception as ex:
LOG = logging.getLogger(__name__)
LOG.error(_("Failed to load %(cfgfile)s: %(ex)s"),
{'cfgfile': cfgfile, 'ex': ex})
def vendor_string():
_load_config()
return NOVA_VENDOR
def product_string():
_load_config()
return NOVA_PRODUCT
def package_string():
_load_config()
return NOVA_PACKAGE
def version_string_with_package():
if package_string() is None:
return version_info.version_string()
else:
return "%s-%s" % (version_info.version_string(), package_string())
|
apache-2.0
|
was4444/chromium.src
|
chrome/browser/resources/chromeos/chromevox/tools/jscompilerwrapper.py
|
42
|
2419
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Uses the closure compiler to check syntax and semantics of a js module
with dependencies.'''
import os
import re
import subprocess
import sys
_SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
_CHROME_SOURCE_DIR = os.path.normpath(
os.path.join(
_SCRIPT_DIR, *[os.path.pardir] * 6))
# Compiler path.
_CLOSURE_COMPILER_JAR = os.path.join(
_CHROME_SOURCE_DIR, 'third_party', 'closure_compiler', 'compiler',
'compiler.jar')
# List of compilation errors to enable with the --jscomp_errors flag.
_JSCOMP_ERRORS = [ 'accessControls', 'checkTypes', 'checkVars', 'invalidCasts',
'missingProperties', 'undefinedNames', 'undefinedVars',
'visibility' ]
_java_executable = 'java'
def _Error(msg):
print >>sys.stderr, msg
sys.exit(1)
def _ExecuteCommand(args, ignore_exit_status=False):
try:
return subprocess.check_output(args, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
if ignore_exit_status and e.returncode > 0:
return e.output
_Error('%s\nCommand \'%s\' returned non-zero exit status %d' %
(e.output, ' '.join(e.cmd), e.returncode))
except (OSError, IOError) as e:
_Error('Error executing %s: %s' % (_java_executable, str(e)))
def _CheckJava():
global _java_executable
java_home = os.environ.get('JAVAHOME')
if java_home is not None:
_java_executable = os.path.join(java_home, 'bin', 'java')
output = _ExecuteCommand([_java_executable, '-version'])
match = re.search(r'version "(?:\d+)\.(\d+)', output)
if match is None or int(match.group(1)) < 7:
_Error('Java 7 or later is required: \n%s' % output)
_CheckJava()
def RunCompiler(js_files, externs=[]):
args = [_java_executable, '-jar', _CLOSURE_COMPILER_JAR]
args.extend(['--compilation_level', 'SIMPLE_OPTIMIZATIONS'])
args.extend(['--jscomp_error=%s' % error for error in _JSCOMP_ERRORS])
args.extend(['--language_in', 'ECMASCRIPT5'])
args.extend(['--externs=%s' % extern for extern in externs])
args.extend(['--js=%s' % js for js in js_files])
args.extend(['--js_output_file', '/dev/null'])
output = _ExecuteCommand(args, ignore_exit_status=True)
success = len(output) == 0
return success, output
|
bsd-3-clause
|
davidwilson-85/easymap
|
scripts_snp/af-comparison.py
|
1
|
3689
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-f2_mut', action="store", dest = 'input_mut')
parser.add_argument('-f2_wt', action="store", dest = 'input_wt')
parser.add_argument('-out', action="store", dest = 'output')
parser.add_argument('-f_input', action="store", dest = 'f_input')
parser.add_argument('-step', action="store", dest = 'step')
args = parser.parse_args()
#Input
input1 = args.input_mut
f1 = open(input1, 'r')
mut_lines = f1.readlines()
input2 = args.input_wt
f2 = open(input2, 'r')
wt_lines = f2.readlines()
f_input = args.f_input
step = int(args.step)
#Output
output = args.output
f3 = open(output, 'w')
#This function enables to obtain data regarding chromosomes and lenght of the,
def read_fasta(fp):
name, seq = None, []
for line in fp:
line = line.rstrip()
if line.startswith('>'):
if name: yield (name, ''.join(seq))
name, seq = line, []
else:
seq.append(line)
if name: yield (name, ''.join(seq))
ch = list()
if step == 1 or step == 2:
#From the data of read_fasta, I create a dictionary with the name of the contigs and its lenght
with open(f_input) as fp:
for name_contig in read_fasta(fp):
ch.append(name_contig[0][1:])
for chr in ch:
dic_mut = {}
dic_wt = {}
list_mut = list()
list_wt = list()
for i, line in enumerate(mut_lines):
if not line.startswith('#'):
sp = line.split('\t')
AF = float(float(sp[6])/(float(sp[6]) + float(sp[5])))
if chr == sp[0] and AF > 0.85:
dic_mut[sp[1]] = [sp[2], sp[3], sp[4], sp[5], sp[6].strip('\n')]
list_mut.append(sp[1])
for i, line in enumerate(wt_lines):
if not line.startswith('#'):
sp = line.split('\t')
if chr == sp[0]:
dic_wt[sp[1]] = [sp[2], sp[3], sp[4], sp[5], sp[6].strip('\n')]
list_wt.append(sp[1])
set_2 = frozenset(list_mut)
intersection = [x for x in list_wt if x in set_2]
for i in intersection:
if step == 1:
f3.write( str(chr) + '\t' + str(i) + '\t' + str(dic_mut[i][0]) +'\t' + str(dic_mut[i][1]) +'\t' + str(dic_mut[i][2]) + '\t' + str(dic_wt[i][3]) +'\t' + str(dic_wt[i][4]) + '\t' + str(dic_mut[i][3]) + '\t' + str(dic_mut[i][4]) + '\n')
if step == 2:
f3.write( str(chr) + '\t' + str(i) + '\t' + str(dic_mut[i][0]) +'\t' + str(dic_mut[i][1]) +'\t' + str(dic_mut[i][2]) + '\t' + str(dic_mut[i][3]) + '\t' + str(dic_mut[i][4]) + '\t' + str(dic_wt[i][3]) +'\t' + str(dic_wt[i][4]) + '\n')
if step == 3:
#From the data of read_fasta, I create a dictionary with the name of the contigs and its lenght
with open(f_input) as fp:
for name_contig in read_fasta(fp):
ch.append(name_contig[0][1:])
for chr in ch:
dic_mut = {}
dic_wt = {}
list_mut = list()
list_wt = list()
for i, line in enumerate(mut_lines):
if not line.startswith('#'):
sp = line.split('\t')
AF = float(float(sp[6])/(float(sp[6]) + float(sp[5])))
if chr == sp[0] and AF > 0.2 and AF < 0.85:
dic_mut[sp[1]] = [sp[2], sp[3], sp[4], sp[5], sp[6].strip('\n')]
list_mut.append(sp[1])
for i, line in enumerate(wt_lines):
if not line.startswith('#'):
sp = line.split('\t')
AF = float(float(sp[6])/(float(sp[6]) + float(sp[5])))
if chr == sp[0] and AF > 0.2 and AF < 0.85:
dic_wt[sp[1]] = [sp[2], sp[3], sp[4], sp[5], sp[6].strip('\n')]
list_wt.append(sp[1])
set_2 = frozenset(list_mut)
intersection = [x for x in list_wt if x in set_2]
for i in intersection:
f3.write( str(chr) + '\t' + str(i) + '\t' + str(dic_mut[i][0]) +'\t' + str(dic_mut[i][1]) +'\t' + str(dic_mut[i][2]) + '\t' + str(dic_mut[i][3]) + '\t' + str(dic_mut[i][4]) + '\t' + str(dic_wt[i][3]) +'\t' + str(dic_wt[i][4]) + '\n')
f3.close()
|
gpl-3.0
|
rexshihaoren/scikit-learn
|
doc/sphinxext/gen_rst.py
|
142
|
40026
|
"""
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
from sklearn.externals import six
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
if six.PY2:
lines = open(filename).readlines()
else:
lines = open(filename, encoding='utf-8').readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery.\n"
"Please check the layout of your"
" example file:\n {}\n and make sure"
" it's correct".format(filename))
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
display: none;
}
</style>
.. _examples-index:
Examples
========
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
if six.PY2:
lines = open(example_file).readlines()
else:
lines = open(example_file, encoding='utf-8').readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer" tooltip="{}">
""".format(snippet))
out.append('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
</div>
""" % (ref_name))
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = \
identify_names(open(example_file, encoding='utf-8').read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
if app.builder.name == 'latex':
# Don't embed hyperlinks when a latex builder is used.
return
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
|
bsd-3-clause
|
drglove/SickRage
|
lib/requests/packages/urllib3/__init__.py
|
51
|
1861
|
"""
urllib3 - Thread-safe connection pooling and re-using.
"""
__author__ = 'Andrey Petrov ([email protected])'
__license__ = 'MIT'
__version__ = 'dev'
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util.request import make_headers
from .util.url import get_host
from .util.timeout import Timeout
from .util.retry import Retry
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added a stderr logging handler to logger: %s' % __name__)
return handler
# ... Clean up.
del NullHandler
# Set security warning to always go off by default.
import warnings
warnings.simplefilter('always', exceptions.SecurityWarning)
def disable_warnings(category=exceptions.HTTPWarning):
"""
Helper for quickly disabling all urllib3 warnings.
"""
warnings.simplefilter('ignore', category)
|
gpl-3.0
|
rajanandakumar/DIRAC
|
AccountingSystem/Client/Types/BaseAccountingType.py
|
11
|
5726
|
# $HeadURL$
__RCSID__ = "$Id$"
import types
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities import Time
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.AccountingSystem.Client.DataStoreClient import gDataStoreClient
class BaseAccountingType:
__validDataValues = ( types.IntType, types.LongType, types.FloatType, types.LongType )
def __init__( self ):
self.keyFieldsList = []
self.valueFieldsList = []
self.valuesList = []
self.fieldsList = []
self.startTime = 0
self.endTime = 0
self.dataTimespan = 0
self.bucketsLength = [ ( 86400 * 8, 3600 ), #<1w+1 = 1h
( 15552000, 86400 ), #>1w+1d <6m = 1d
( 31104000, 604800 ), #>6m = 1w
]
self.definitionKeyFields = []
self.definitionAccountingFields = []
def checkType( self ):
"""
Check that everything is defined
"""
if len( self.definitionKeyFields ) == 0:
raise Exception( "definitionKeyFields has to be filled prior to utilization" )
if len( self.definitionAccountingFields ) == 0:
raise Exception( "definitionAccountingFields has to be filled prior to utilization" )
for key in self.definitionKeyFields:
self.keyFieldsList.append( key[0] )
for value in self.definitionAccountingFields:
self.valueFieldsList.append( value[0] )
self.fieldsList = []
self.fieldsList.extend( self.keyFieldsList )
self.fieldsList.extend( self.valueFieldsList )
if len( self.valuesList ) != len( self.fieldsList ):
self.valuesList = [ None for i in self.fieldsList ]
def getDataTimespan( self ):
"""
Get the data timespan for the time. Data older than dataTimespan will be deleted
"""
return self.dataTimespan
def setStartTime( self, startTime = False ):
"""
Give a start time for the report
By default use now
"""
if not startTime:
self.startTime = Time.dateTime()
else:
self.startTime = startTime
def setEndTime( self, endTime = False ):
"""
Give a end time for the report
By default use now
"""
if not endTime:
self.endTime = Time.dateTime()
else:
self.endTime = endTime
def setNowAsStartAndEndTime( self ):
"""
Set current time as start and end time of the report
"""
self.startTime = Time.dateTime()
self.endTime = self.startTime
def setValueByKey( self, key, value ):
"""
Add value for key
"""
if key not in self.fieldsList:
return S_ERROR( "Key %s is not defined" % key )
keyPos = self.fieldsList.index( key )
self.valuesList[ keyPos ] = value
return S_OK()
def setValuesFromDict( self, dataDict ):
"""
Set values from key-value dictionary
"""
errKeys = []
for key in dataDict:
if not key in self.fieldsList:
errKeys.append( key )
if errKeys:
return S_ERROR( "Key(s) %s are not valid" % ", ".join( errKeys ) )
for key in dataDict:
self.setValueByKey( key, dataDict[ key ] )
return S_OK()
def getValue( self, key ):
try:
return S_OK( self.valuesList[ self.fieldsList.index( key ) ] )
except IndexError:
return S_ERROR( "%s does not have a value" % key )
except ValueError:
return S_ERROR( "%s is not a valid key" % key )
def checkValues( self ):
"""
Check that all values are defined and valid
"""
errorList = []
for i in range( len( self.valuesList ) ):
key = self.fieldsList[i]
if self.valuesList[i] == None:
errorList.append( "no value for %s" % key )
if key in self.valueFieldsList and type( self.valuesList[i] ) not in self.__validDataValues:
errorList.append( "value for key %s is not numerical type" % key )
if errorList:
return S_ERROR( "Invalid values: %s" % ", ".join( errorList ) )
if not self.startTime:
return S_ERROR( "Start time has not been defined" )
if type( self.startTime ) != Time._dateTimeType:
return S_ERROR( "Start time is not a datetime object" )
if not self.endTime:
return S_ERROR( "End time has not been defined" )
if type( self.endTime ) != Time._dateTimeType:
return S_ERROR( "End time is not a datetime object" )
return self.checkRecord()
def checkRecord( self ):
""" To be overwritten by child class
"""
return S_OK()
def getDefinition( self ):
"""
Get a tuple containing type definition
"""
return ( self.__class__.__name__,
self.definitionKeyFields,
self.definitionAccountingFields,
self.bucketsLength
)
def getValues( self ):
"""
Get a tuple containing report values
"""
return ( self.__class__.__name__,
self.startTime,
self.endTime,
self.valuesList
)
def getContents( self ):
"""
Get the contents
"""
cD = {}
if self.startTime:
cD[ 'starTime' ] = self.startTime
if self.endTime:
cD[ 'endTime' ] = self.endTime
for iPos in range( len( self.fieldsList ) ):
if self.valuesList[ iPos ]:
cD[ self.fieldsList[ iPos ] ] = self.valuesList[ iPos ]
return cD
def registerToServer( self ):
"""
Register type in server
"""
rpcClient = RPCClient( "Accounting/DataStore" )
return rpcClient.registerType( *self.getDefinition() )
def commit( self ):
"""
Commit register to server
"""
retVal = gDataStoreClient.addRegister( self )
if not retVal[ 'OK' ]:
return retVal
return gDataStoreClient.commit()
def remove( self ):
"""
Remove a register from server
"""
return gDataStoreClient.remove( self )
|
gpl-3.0
|
andrewsmedina/horizon
|
horizon/horizon/dashboards/nova/instances_and_volumes/volumes/tables.py
|
1
|
4720
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.template.defaultfilters import title
from django.utils import safestring
from django.utils.translation import ugettext as _
from horizon import api
from horizon import tables
LOG = logging.getLogger(__name__)
ACTIVE_STATES = ("ACTIVE",)
class DeleteVolume(tables.DeleteAction):
data_type_singular = _("Volume")
data_type_plural = _("Volumes")
classes = ('danger',)
def delete(self, request, obj_id):
api.volume_delete(request, obj_id)
class CreateVolume(tables.LinkAction):
name = "create"
verbose_name = _("Create Volume")
url = "horizon:nova:instances_and_volumes:volumes:create"
attrs = {"class": "btn small ajax-modal"}
class EditAttachments(tables.LinkAction):
name = "attachments"
verbose_name = _("Edit Attachments")
url = "horizon:nova:instances_and_volumes:volumes:attach"
def allowed(self, request, volume=None):
return volume.status in ("available", "in-use")
def get_size(volume):
return _("%s GB") % volume.size
def get_attachment(volume):
attachments = []
link = '<a href="%(url)s">Instance %(instance)s ' \
'<small>(%(dev)s)</small></a>'
# Filter out "empty" attachments which the client returns...
for attachment in [att for att in volume.attachments if att]:
url = reverse("horizon:nova:instances_and_volumes:instances:detail",
args=(attachment["serverId"],))
# TODO(jake): Make "instance" the instance name
vals = {"url": url,
"instance": attachment["serverId"],
"dev": attachment["device"]}
attachments.append(link % vals)
return safestring.mark_safe(", ".join(attachments))
class VolumesTable(tables.DataTable):
name = tables.Column("displayName",
verbose_name=_("Name"),
link="horizon:nova:instances_and_volumes:"
"volumes:detail")
description = tables.Column("displayDescription",
verbose_name=("Description"))
size = tables.Column(get_size, verbose_name=_("Size"))
attachments = tables.Column(get_attachment,
verbose_name=_("Attachments"),
empty_value=_("-"))
status = tables.Column("status", filters=(title,))
def sanitize_id(self, obj_id):
return int(obj_id)
def get_object_display(self, obj):
return obj.displayName
class Meta:
name = "volumes"
verbose_name = _("Volumes")
table_actions = (CreateVolume, DeleteVolume,)
row_actions = (EditAttachments, DeleteVolume,)
class DetachVolume(tables.BatchAction):
name = "detach"
action_present = _("Detach")
action_past = _("Detached")
data_type_singular = _("Volume")
data_type_plural = _("Volumes")
classes = ('danger',)
def action(self, request, obj_id):
instance_id = self.table.get_object_by_id(obj_id)['serverId']
api.volume_detach(request, instance_id, obj_id)
def get_success_url(self, request):
return reverse('horizon:nova:instances_and_volumes:index')
class AttachmentsTable(tables.DataTable):
instance = tables.Column("serverId", verbose_name=_("Instance"))
device = tables.Column("device")
def sanitize_id(self, obj_id):
return int(obj_id)
def get_object_id(self, obj):
return obj['id']
def get_object_display(self, obj):
vals = {"dev": obj['device'],
"instance": obj['serverId']}
return "Attachment %(dev)s on %(instance)s" % vals
def get_object_by_id(self, obj_id):
for obj in self.data:
print self.get_object_id(obj)
if self.get_object_id(obj) == obj_id:
return obj
raise ValueError('No match found for the id "%s".' % obj_id)
class Meta:
name = "attachments"
table_actions = (DetachVolume,)
row_actions = (DetachVolume,)
|
apache-2.0
|
ptisserand/portage
|
pym/portage/tests/locks/test_lock_nonblock.py
|
16
|
1629
|
# Copyright 2011 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import tempfile
import traceback
import portage
from portage import os
from portage import shutil
from portage.tests import TestCase
class LockNonblockTestCase(TestCase):
def _testLockNonblock(self):
tempdir = tempfile.mkdtemp()
try:
path = os.path.join(tempdir, 'lock_me')
lock1 = portage.locks.lockfile(path)
pid = os.fork()
if pid == 0:
portage.locks._close_fds()
# Disable close_fds since we don't exec
# (see _setup_pipes docstring).
portage.process._setup_pipes({0:0, 1:1, 2:2}, close_fds=False)
rval = 2
try:
try:
lock2 = portage.locks.lockfile(path, flags=os.O_NONBLOCK)
except portage.exception.TryAgain:
rval = os.EX_OK
else:
rval = 1
portage.locks.unlockfile(lock2)
except SystemExit:
raise
except:
traceback.print_exc()
finally:
os._exit(rval)
self.assertEqual(pid > 0, True)
pid, status = os.waitpid(pid, 0)
self.assertEqual(os.WIFEXITED(status), True)
self.assertEqual(os.WEXITSTATUS(status), os.EX_OK)
portage.locks.unlockfile(lock1)
finally:
shutil.rmtree(tempdir)
def testLockNonblock(self):
self._testLockNonblock()
def testLockNonblockHardlink(self):
prev_state = os.environ.pop("__PORTAGE_TEST_HARDLINK_LOCKS", None)
os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = "1"
try:
self._testLockNonblock()
finally:
os.environ.pop("__PORTAGE_TEST_HARDLINK_LOCKS", None)
if prev_state is not None:
os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = prev_state
|
gpl-2.0
|
getavalon/core
|
avalon/nuke/command.py
|
2
|
3576
|
import logging
import contextlib
import nuke
from .. import api, io
log = logging.getLogger(__name__)
def reset_frame_range():
""" Set frame range to current asset
Also it will set a Viewer range with
displayed handles
"""
fps = float(api.Session.get("AVALON_FPS", 25))
nuke.root()["fps"].setValue(fps)
name = api.Session["AVALON_ASSET"]
asset = io.find_one({"name": name, "type": "asset"})
asset_data = asset["data"]
handles = get_handles(asset)
frame_start = int(asset_data.get(
"frameStart",
asset_data.get("edit_in")))
frame_end = int(asset_data.get(
"frameEnd",
asset_data.get("edit_out")))
if not all([frame_start, frame_end]):
missing = ", ".join(["frame_start", "frame_end"])
msg = "'{}' are not set for asset '{}'!".format(missing, name)
log.warning(msg)
nuke.message(msg)
return
frame_start -= handles
frame_end += handles
nuke.root()["first_frame"].setValue(frame_start)
nuke.root()["last_frame"].setValue(frame_end)
# setting active viewers
vv = nuke.activeViewer().node()
vv["frame_range_lock"].setValue(True)
vv["frame_range"].setValue("{0}-{1}".format(
int(asset_data["frameStart"]),
int(asset_data["frameEnd"]))
)
def get_handles(asset):
""" Gets handles data
Arguments:
asset (dict): avalon asset entity
Returns:
handles (int)
"""
data = asset["data"]
if "handles" in data and data["handles"] is not None:
return int(data["handles"])
parent_asset = None
if "visualParent" in data:
vp = data["visualParent"]
if vp is not None:
parent_asset = io.find_one({"_id": io.ObjectId(vp)})
if parent_asset is None:
parent_asset = io.find_one({"_id": io.ObjectId(asset["parent"])})
if parent_asset is not None:
return get_handles(parent_asset)
else:
return 0
def reset_resolution():
"""Set resolution to project resolution."""
project = io.find_one({"type": "project"})
p_data = project["data"]
width = p_data.get("resolution_width",
p_data.get("resolutionWidth"))
height = p_data.get("resolution_height",
p_data.get("resolutionHeight"))
if not all([width, height]):
missing = ", ".join(["width", "height"])
msg = "No resolution information `{0}` found for '{1}'.".format(
missing,
project["name"])
log.warning(msg)
nuke.message(msg)
return
current_width = nuke.root()["format"].value().width()
current_height = nuke.root()["format"].value().height()
if width != current_width or height != current_height:
fmt = None
for f in nuke.formats():
if f.width() == width and f.height() == height:
fmt = f.name()
if not fmt:
nuke.addFormat(
"{0} {1} {2}".format(int(width), int(height), project["name"])
)
fmt = project["name"]
nuke.root()["format"].setValue(fmt)
@contextlib.contextmanager
def viewer_update_and_undo_stop():
"""Lock viewer from updating and stop recording undo steps"""
try:
# stop active viewer to update any change
viewer = nuke.activeViewer()
if viewer:
viewer.stop()
else:
log.warning("No available active Viewer")
nuke.Undo.disable()
yield
finally:
nuke.Undo.enable()
|
mit
|
charanpald/wallhack
|
wallhack/viroscopy/ContactGrowthStatistics.py
|
1
|
49412
|
import logging
import sys
import gc
import numpy
import os.path
import matplotlib.pyplot as plt
from datetime import date
from sandbox.util.PathDefaults import PathDefaults
from sandbox.util.DateUtils import DateUtils
from sandbox.util.Latex import Latex
from sandbox.util.Util import Util
from apgl.graph import *
from apgl.viroscopy.HIVGraphReader import HIVGraphReader
from apgl.viroscopy.HIVGraphStatistics import HIVGraphStatistics
"""
This script computes some basic statistics on the growing graph. We currently
combine both infection and detection graphs and hence
look at the contact graph.
"""
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
numpy.set_printoptions(suppress=True, linewidth=150)
hivReader = HIVGraphReader()
graph = hivReader.readHIVGraph()
fInds = hivReader.getIndicatorFeatureIndices()
#The set of edges indexed by zeros is the contact graph
#The ones indexed by 1 is the infection graph
edgeTypeIndex1 = 0
edgeTypeIndex2 = 1
sGraphContact = graph.getSparseGraph(edgeTypeIndex1)
sGraphInfect = graph.getSparseGraph(edgeTypeIndex2)
sGraphContact = sGraphContact.union(sGraphInfect)
sGraph = sGraphContact
#sGraph = sGraph.subgraph(range(0, 200))
figureDir = PathDefaults.getOutputDir() + "viroscopy/figures/contact/"
resultsDir = PathDefaults.getOutputDir() + "viroscopy/"
graphStats = GraphStatistics()
statsArray = graphStats.scalarStatistics(sGraph, False)
slowStats = True
saveResults = False
logging.info(sGraph)
logging.info("Number of features: " + str(sGraph.getVertexList().getNumFeatures()))
logging.info("Largest component is " + str(statsArray[graphStats.maxComponentSizeIndex]))
logging.info("Number of components " + str(statsArray[graphStats.numComponentsIndex]))
#sGraph = sGraph.subgraph(components[componentIndex])
vertexArray = sGraph.getVertexList().getVertices()
logging.info("Size of graph we will use: " + str(sGraph.getNumVertices()))
#Some indices
dobIndex = fInds["birthDate"]
detectionIndex = fInds["detectDate"]
deathIndex = fInds["deathDate"]
genderIndex = fInds["gender"]
orientationIndex = fInds["orient"]
ages = vertexArray[:, detectionIndex] - vertexArray[:, dobIndex]
deaths = vertexArray[:, deathIndex] - vertexArray[:, detectionIndex]
detections = vertexArray[:, detectionIndex]
startYear = 1900
daysInYear = 365
daysInMonth = 30
monthStep = 3
#Effective diameter q
q = 0.9
plotInd = 1
plotStyles = ['ko-', 'kx-', 'k+-', 'k.-', 'k*-']
plotStyles2 = ['k-', 'r-', 'g-', 'b-', 'c-', 'm-']
plotStyleBW = ['k-', 'k--', 'k-.', 'k:']
plotStyles4 = ['r-', 'r--', 'r-.', 'r:']
numConfigGraphs = 10
#Make sure we include all detections
dayList = range(int(numpy.min(detections)), int(numpy.max(detections)), daysInMonth*monthStep)
dayList.append(numpy.max(detections))
absDayList = [float(i-numpy.min(detections)) for i in dayList]
subgraphIndicesList = []
for i in dayList:
logging.info("Date: " + str(DateUtils.getDateStrFromDay(i, startYear)))
subgraphIndices = numpy.nonzero(detections <= i)[0]
subgraphIndicesList.append(subgraphIndices)
#Compute the indices list for the vector statistics
dayList2 = [DateUtils.getDayDelta(date(1989, 12, 31), startYear)]
dayList2.append(DateUtils.getDayDelta(date(1993, 12, 31), startYear))
dayList2.append(DateUtils.getDayDelta(date(1997, 12, 31), startYear))
dayList2.append(DateUtils.getDayDelta(date(2001, 12, 31), startYear))
dayList2.append(int(numpy.max(detections)))
subgraphIndicesList2 = []
for i in dayList2:
logging.info("Date: " + str(DateUtils.getDateStrFromDay(i, startYear)))
subgraphIndices = numpy.nonzero(detections <= i)[0]
subgraphIndicesList2.append(subgraphIndices)
#Locations and labels for years
locs = list(range(0, int(absDayList[-1]), daysInYear*2))
labels = numpy.arange(1986, 2006, 2)
#Some indices
contactIndex = fInds["contactTrace"]
donorIndex = fInds["donor"]
randomTestIndex = fInds["randomTest"]
stdIndex = fInds["STD"]
prisonerIndex = fInds["prisoner"]
doctorIndex = fInds["recommendVisit"]
#The most popular provinces
havanaIndex = fInds["CH"]
villaClaraIndex = fInds["VC"]
pinarIndex = fInds["PR"]
holguinIndex = fInds["HO"]
habanaIndex = fInds["LH"]
sanctiIndex = fInds["SS"]
santiagoIndex = fInds['SC']
camagueyIndex = fInds['CA']
def plotVertexStats():
#Calculate all vertex statistics
logging.info("Computing vertex stats")
#Indices
numContactsIndex = fInds["numContacts"]
numTestedIndex = fInds["numTested"]
numPositiveIndex = fInds["numPositive"]
#Properties of vertex values
detectionAges = []
deathAfterInfectAges = []
deathAges = []
homoMeans = []
maleSums = []
femaleSums = []
heteroSums = []
biSums = []
contactMaleSums = []
contactFemaleSums = []
contactHeteroSums = []
contactBiSums = []
doctorMaleSums = []
doctorFemaleSums = []
doctorHeteroSums = []
doctorBiSums = []
contactSums = []
nonContactSums = []
donorSums = []
randomTestSums = []
stdSums = []
prisonerSums = []
recommendSums = []
#This is: all detections - contact, donor, randomTest, str, recommend
otherSums = []
havanaSums = []
villaClaraSums = []
pinarSums = []
holguinSums = []
habanaSums = []
sanctiSums = []
numContactSums = []
numTestedSums = []
numPositiveSums = []
#Total number of sexual contacts
numContactMaleSums = []
numContactFemaleSums = []
numContactHeteroSums = []
numContactBiSums = []
numTestedMaleSums = []
numTestedFemaleSums = []
numTestedHeteroSums = []
numTestedBiSums = []
numPositiveMaleSums = []
numPositiveFemaleSums = []
numPositiveHeteroSums = []
numPositiveBiSums = []
propPositiveMaleSums = []
propPositiveFemaleSums = []
propPositiveHeteroSums = []
propPositiveBiSums = []
numContactVertices = []
numContactEdges = []
numInfectEdges = []
#Mean proportion of degree at end of epidemic
meanPropDegree = []
finalDegreeSequence = numpy.array(sGraph.outDegreeSequence(), numpy.float)
degreeOneSums = []
degreeTwoSums = []
degreeThreePlusSums = []
numProvinces = 15
provinceArray = numpy.zeros((len(subgraphIndicesList), numProvinces))
m = 0
for subgraphIndices in subgraphIndicesList:
subgraph = sGraph.subgraph(subgraphIndices)
infectSubGraph = sGraphInfect.subgraph(subgraphIndices)
subgraphVertexArray = subgraph.getVertexList().getVertices(range(subgraph.getNumVertices()))
detectionAges.append(numpy.mean((subgraphVertexArray[:, detectionIndex] - subgraphVertexArray[:, dobIndex]))/daysInYear)
deathAfterInfectAges.append((numpy.mean(subgraphVertexArray[:, deathIndex] - subgraphVertexArray[:, detectionIndex]))/daysInYear)
deathAges.append(numpy.mean((subgraphVertexArray[:, deathIndex] - subgraphVertexArray[:, dobIndex]))/daysInYear)
homoMeans.append(numpy.mean(subgraphVertexArray[:, orientationIndex]))
nonContactSums.append(subgraphVertexArray.shape[0] - numpy.sum(subgraphVertexArray[:, contactIndex]))
contactSums.append(numpy.sum(subgraphVertexArray[:, contactIndex]))
donorSums.append(numpy.sum(subgraphVertexArray[:, donorIndex]))
randomTestSums.append(numpy.sum(subgraphVertexArray[:, randomTestIndex]))
stdSums.append(numpy.sum(subgraphVertexArray[:, stdIndex]))
prisonerSums.append(numpy.sum(subgraphVertexArray[:, prisonerIndex]))
recommendSums.append(numpy.sum(subgraphVertexArray[:, doctorIndex]))
otherSums.append(subgraphVertexArray.shape[0] - numpy.sum(subgraphVertexArray[:, [contactIndex, donorIndex, randomTestIndex, stdIndex, doctorIndex]]))
heteroSums.append(numpy.sum(subgraphVertexArray[:, orientationIndex]==0))
biSums.append(numpy.sum(subgraphVertexArray[:, orientationIndex]==1))
femaleSums.append(numpy.sum(subgraphVertexArray[:, genderIndex]==1))
maleSums.append(numpy.sum(subgraphVertexArray[:, genderIndex]==0))
contactHeteroSums.append(numpy.sum(numpy.logical_and(subgraphVertexArray[:, orientationIndex]==0, subgraphVertexArray[:, contactIndex])))
contactBiSums.append(numpy.sum(numpy.logical_and(subgraphVertexArray[:, orientationIndex]==1, subgraphVertexArray[:, contactIndex])))
contactFemaleSums.append(numpy.sum(numpy.logical_and(subgraphVertexArray[:, genderIndex]==1, subgraphVertexArray[:, contactIndex])))
contactMaleSums.append(numpy.sum(numpy.logical_and(subgraphVertexArray[:, genderIndex]==0, subgraphVertexArray[:, contactIndex])))
doctorHeteroSums.append(numpy.sum(numpy.logical_and(subgraphVertexArray[:, orientationIndex]==0, subgraphVertexArray[:, doctorIndex])))
doctorBiSums.append(numpy.sum(numpy.logical_and(subgraphVertexArray[:, orientationIndex]==1, subgraphVertexArray[:, doctorIndex])))
doctorFemaleSums.append(numpy.sum(numpy.logical_and(subgraphVertexArray[:, genderIndex]==1, subgraphVertexArray[:, doctorIndex])))
doctorMaleSums.append(numpy.sum(numpy.logical_and(subgraphVertexArray[:, genderIndex]==0, subgraphVertexArray[:, doctorIndex])))
havanaSums.append(numpy.sum(subgraphVertexArray[:, havanaIndex]==1))
villaClaraSums.append(numpy.sum(subgraphVertexArray[:, villaClaraIndex]==1))
pinarSums.append(numpy.sum(subgraphVertexArray[:, pinarIndex]==1))
holguinSums.append(numpy.sum(subgraphVertexArray[:, holguinIndex]==1))
habanaSums.append(numpy.sum(subgraphVertexArray[:, habanaIndex]==1))
sanctiSums.append(numpy.sum(subgraphVertexArray[:, sanctiIndex]==1))
numContactSums.append(numpy.mean(subgraphVertexArray[:, numContactsIndex]))
numTestedSums.append(numpy.mean(subgraphVertexArray[:, numTestedIndex]))
numPositiveSums.append(numpy.mean(subgraphVertexArray[:, numPositiveIndex]))
numContactMaleSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, genderIndex]==0, numContactsIndex]))
numContactFemaleSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, genderIndex]==1, numContactsIndex]))
numContactHeteroSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, orientationIndex]==0, numContactsIndex]))
numContactBiSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, orientationIndex]==1, numContactsIndex]))
numTestedMaleSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, genderIndex]==0, numTestedIndex]))
numTestedFemaleSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, genderIndex]==1, numTestedIndex]))
numTestedHeteroSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, orientationIndex]==0, numTestedIndex]))
numTestedBiSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, orientationIndex]==1, numTestedIndex]))
numPositiveMaleSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, genderIndex]==0, numPositiveIndex]))
numPositiveFemaleSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, genderIndex]==1, numPositiveIndex]))
numPositiveHeteroSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, orientationIndex]==0, numPositiveIndex]))
numPositiveBiSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, orientationIndex]==1, numPositiveIndex]))
propPositiveMaleSums.append(numPositiveMaleSums[m]/float(numTestedMaleSums[m]))
propPositiveFemaleSums.append(numPositiveFemaleSums[m]/float(numTestedFemaleSums[m]))
propPositiveHeteroSums.append(numPositiveHeteroSums[m]/float(numTestedHeteroSums[m]))
propPositiveBiSums.append(numPositiveBiSums[m]/float(numTestedMaleSums[m]))
numContactVertices.append(subgraph.getNumVertices())
numContactEdges.append(subgraph.getNumEdges())
numInfectEdges.append(infectSubGraph.getNumEdges())
nonZeroInds = finalDegreeSequence[subgraphIndices]!=0
propDegrees = numpy.mean(subgraph.outDegreeSequence()[nonZeroInds]/finalDegreeSequence[subgraphIndices][nonZeroInds])
meanPropDegree.append(numpy.mean(propDegrees))
degreeOneSums.append(numpy.sum(subgraph.outDegreeSequence()==1))
degreeTwoSums.append(numpy.sum(subgraph.outDegreeSequence()==2))
degreeThreePlusSums.append(numpy.sum(subgraph.outDegreeSequence()>=3))
provinceArray[m, :] = numpy.sum(subgraphVertexArray[:, fInds["CA"]:fInds['VC']+1], 0)
m += 1
#Save some of the results for the ABC work
numStats = 2
vertexStatsArray = numpy.zeros((len(subgraphIndicesList), numStats))
vertexStatsArray[:, 0] = numpy.array(biSums)
vertexStatsArray[:, 1] = numpy.array(heteroSums)
resultsFileName = resultsDir + "ContactGrowthVertexStats.pkl"
Util.savePickle(vertexStatsArray, resultsFileName)
global plotInd
plt.figure(plotInd)
plt.plot(absDayList, detectionAges)
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Detection Age (years)")
plt.savefig(figureDir + "DetectionMeansGrowth.eps")
plotInd += 1
plt.figure(plotInd)
plt.plot(absDayList, heteroSums, 'k-', absDayList, biSums, 'k--', absDayList, femaleSums, 'k-.', absDayList, maleSums, 'k:')
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Detections")
plt.legend(("Heterosexual", "MSM", "Female", "Male"), loc="upper left")
plt.savefig(figureDir + "OrientationGenderGrowth.eps")
plotInd += 1
plt.figure(plotInd)
plt.plot(absDayList, contactHeteroSums, 'k-', absDayList, contactBiSums, 'k--', absDayList, contactFemaleSums, 'k-.', absDayList, contactMaleSums, 'k:')
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Contact tracing detections")
plt.legend(("Heterosexual", "MSM", "Female", "Male"), loc="upper left")
plt.savefig(figureDir + "OrientationGenderContact.eps")
plotInd += 1
plt.figure(plotInd)
plt.plot(absDayList, doctorHeteroSums, 'k-', absDayList, doctorBiSums, 'k--', absDayList, doctorFemaleSums, 'k-.', absDayList, doctorMaleSums, 'k:')
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Doctor recommendation detections")
plt.legend(("Heterosexual", "MSM", "Female", "Male"), loc="upper left")
plt.savefig(figureDir + "OrientationGenderDoctor.eps")
plotInd += 1
#Plot all the provinces
plt.figure(plotInd)
plt.hold(True)
for k in range(provinceArray.shape[1]):
plt.plot(absDayList, provinceArray[:, k], label=str(k))
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Detections")
plt.legend(loc="upper left")
plotInd += 1
#Plot of detection types
plt.figure(plotInd)
plt.plot(absDayList, contactSums, plotStyles2[0], absDayList, donorSums, plotStyles2[1], absDayList, randomTestSums, plotStyles2[2], absDayList, stdSums, plotStyles2[3], absDayList, otherSums, plotStyles2[4], absDayList, recommendSums, plotStyles2[5])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Detections")
plt.legend(("Contact tracing", "Blood donation", "Random test", "STD", "Other test", "Doctor recommendation"), loc="upper left")
plt.savefig(figureDir + "DetectionGrowth.eps")
plotInd += 1
plt.figure(plotInd)
plt.plot(absDayList, numContactSums, plotStyleBW[0], absDayList, numTestedSums, plotStyleBW[1], absDayList, numPositiveSums, plotStyleBW[2])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Contacts")
plt.legend(("No. contacts", "No. tested", "No. positive"), loc="center left")
plt.savefig(figureDir + "ContactsGrowth.eps")
plotInd += 1
plt.figure(plotInd)
plt.plot(absDayList, numContactHeteroSums, plotStyleBW[0], absDayList, numContactBiSums, plotStyleBW[1], absDayList, numContactFemaleSums, plotStyleBW[2], absDayList, numContactMaleSums, plotStyleBW[3])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Total contacts")
plt.legend(("Heterosexual", "MSM", "Female", "Male"), loc="upper right")
plt.savefig(figureDir + "ContactsGrowthOrientGen.eps")
plotInd += 1
plt.figure(plotInd)
plt.plot(absDayList, numTestedHeteroSums, plotStyleBW[0], absDayList, numTestedBiSums, plotStyleBW[1], absDayList, numTestedFemaleSums, plotStyleBW[2], absDayList, numTestedMaleSums, plotStyleBW[3])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Tested contacts")
plt.legend(("Heterosexual", "MSM", "Female", "Male"), loc="upper right")
plt.savefig(figureDir + "TestedGrowthOrientGen.eps")
plotInd += 1
plt.figure(plotInd)
plt.plot(absDayList, numPositiveHeteroSums, plotStyleBW[0], absDayList, numPositiveBiSums, plotStyleBW[1], absDayList, numPositiveFemaleSums, plotStyleBW[2], absDayList, numPositiveMaleSums, plotStyleBW[3])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Positive contacts")
plt.legend(("Heterosexual", "MSM", "Female", "Male"), loc="upper right")
plt.savefig(figureDir + "PositiveGrowthOrientGen.eps")
plotInd += 1
#Proportion positive versus tested
plt.figure(plotInd)
plt.plot(absDayList, propPositiveHeteroSums, plotStyleBW[0], absDayList, propPositiveBiSums, plotStyleBW[1], absDayList, propPositiveFemaleSums, plotStyleBW[2], absDayList, propPositiveMaleSums, plotStyleBW[3])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Proportion positive contacts")
plt.legend(("Heterosexual", "MSM", "Female", "Male"), loc="upper right")
plt.savefig(figureDir + "PercentPositiveGrowthOrientGen.eps")
plotInd += 1
plt.figure(plotInd)
plt.hold(True)
plt.plot(absDayList, havanaSums, plotStyles2[0])
plt.plot(absDayList, villaClaraSums, plotStyles2[1])
plt.plot(absDayList, pinarSums, plotStyles2[2])
plt.plot(absDayList, holguinSums, plotStyles2[3])
plt.plot(absDayList, habanaSums, plotStyles2[4])
plt.plot(absDayList, sanctiSums, plotStyles2[5])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Detections")
plt.legend(("Havana City", "Villa Clara", "Pinar del Rio", "Holguin", "La Habana", "Sancti Spiritus"), loc="upper left")
plt.savefig(figureDir + "ProvinceGrowth.eps")
plotInd += 1
plt.figure(plotInd)
plt.plot(absDayList, numContactVertices, plotStyleBW[0], absDayList, numContactEdges, plotStyleBW[1], absDayList, numInfectEdges, plotStyleBW[2])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Vertices/edges")
plt.legend(("Contact vertices", "Contact edges", "Infect edges"), loc="upper left")
plt.savefig(figureDir + "VerticesEdges.eps")
plotInd += 1
plt.figure(plotInd)
plt.plot(absDayList, meanPropDegree, plotStyleBW[0])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Proportion of final degree")
plt.savefig(figureDir + "MeanPropDegree.eps")
plotInd += 1
plt.figure(plotInd)
plt.plot(absDayList, degreeOneSums, plotStyleBW[0], absDayList, degreeTwoSums, plotStyleBW[1], absDayList, degreeThreePlusSums, plotStyleBW[2])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Detections")
plt.legend(("Degree = 1", "Degree = 2", "Degree >= 3"), loc="upper left")
plotInd += 1
#Print a table of interesting stats
results = numpy.array([havanaSums])
results = numpy.r_[results, numpy.array([villaClaraSums])]
results = numpy.r_[results, numpy.array([pinarSums])]
results = numpy.r_[results, numpy.array([holguinSums])]
results = numpy.r_[results, numpy.array([habanaSums])]
results = numpy.r_[results, numpy.array([sanctiSums])]
print(Latex.listToRow(["Havana City", "Villa Clara", "Pinar del Rio", "Holguin", "La Habana", "Sancti Spiritus"]))
print("\\hline")
for i in range(0, len(dayList), 4):
day = dayList[i]
print(str(DateUtils.getDateStrFromDay(day, startYear)) + " & " + Latex.array1DToRow(results[:, i].T) + "\\\\")
results = numpy.array([heteroSums])
results = numpy.r_[results, numpy.array([biSums])]
results = numpy.r_[results, numpy.array([femaleSums])]
results = numpy.r_[results, numpy.array([maleSums])]
print("\n\n")
print(Latex.listToRow(["Heterosexual", "MSM", "Female", "Male"]))
print("\\hline")
for i in range(0, len(dayList), 4):
day = dayList[i]
print(str(DateUtils.getDateStrFromDay(day, startYear)) + " & " + Latex.array1DToRow(results[:, i].T) + "\\\\")
def computeConfigScalarStats():
logging.info("Computing configuration model scalar stats")
graphFileNameBase = resultsDir + "ConfigGraph"
resultsFileNameBase = resultsDir + "ConfigGraphScalarStats"
#graphStats.useFloydWarshall = True
for j in range(numConfigGraphs):
resultsFileName = resultsFileNameBase + str(j)
if not os.path.isfile(resultsFileName):
configGraph = SparseGraph.load(graphFileNameBase + str(j))
statsArray = graphStats.sequenceScalarStats(configGraph, subgraphIndicesList, slowStats)
Util.savePickle(statsArray, resultsFileName, True)
gc.collect()
logging.info("All done")
def computeConfigVectorStats():
#Note: We can make this multithreaded
logging.info("Computing configuration model vector stats")
graphFileNameBase = resultsDir + "ConfigGraph"
resultsFileNameBase = resultsDir + "ConfigGraphVectorStats"
for j in range(numConfigGraphs):
resultsFileName = resultsFileNameBase + str(j)
if not os.path.isfile(resultsFileName):
configGraph = SparseGraph.load(graphFileNameBase + str(j))
statsDictList = graphStats.sequenceVectorStats(configGraph, subgraphIndicesList2, eigenStats=False)
Util.savePickle(statsDictList, resultsFileName, False)
gc.collect()
logging.info("All done")
def plotScalarStats():
logging.info("Computing scalar stats")
resultsFileName = resultsDir + "ContactGrowthScalarStats.pkl"
if saveResults:
statsArray = graphStats.sequenceScalarStats(sGraph, subgraphIndicesList, slowStats)
Util.savePickle(statsArray, resultsFileName, True)
#Now compute statistics on the configuration graphs
else:
statsArray = Util.loadPickle(resultsFileName)
#Take the mean of the results over the configuration model graphs
resultsFileNameBase = resultsDir + "ConfigGraphScalarStats"
numGraphs = len(subgraphIndicesList)
#configStatsArrays = numpy.zeros((numGraphs, graphStats.getNumStats(), numConfigGraphs))
configStatsArrays = numpy.zeros((numGraphs, graphStats.getNumStats()-2, numConfigGraphs))
for j in range(numConfigGraphs):
resultsFileName = resultsFileNameBase + str(j)
configStatsArrays[:, :, j] = Util.loadPickle(resultsFileName)
configStatsArray = numpy.mean(configStatsArrays, 2)
configStatsStd = numpy.std(configStatsArrays, 2)
global plotInd
def plotRealConfigError(index, styleReal, styleConfig, realLabel, configLabel):
plt.hold(True)
plt.plot(absDayList, statsArray[:, index], styleReal, label=realLabel)
#errors = numpy.c_[configStatsArray[:, index]-configStatsMinArray[:, index] , configStatsMaxArray[:, index]-configStatsArray[:, index]].T
errors = numpy.c_[configStatsStd[:, index], configStatsStd[:, index]].T
plt.plot(absDayList, configStatsArray[:, index], styleConfig, label=configLabel)
plt.errorbar(absDayList, configStatsArray[:, index], errors, linewidth=0, elinewidth=1, label="_nolegend_", ecolor="red")
xmin, xmax = plt.xlim()
plt.xlim((0, xmax))
ymin, ymax = plt.ylim()
plt.ylim((0, ymax))
#Output all the results into plots
plt.figure(plotInd)
plt.hold(True)
plotRealConfigError(graphStats.maxComponentSizeIndex, plotStyleBW[0], plotStyles4[0], "Max comp. vertices", "CM max comp. vertices")
plotRealConfigError(graphStats.maxComponentEdgesIndex, plotStyleBW[1], plotStyles4[1], "Max comp. edges", "CM max comp. edges")
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("No. vertices/edges")
plt.legend(loc="upper left")
plt.savefig(figureDir + "MaxComponentSizeGrowth.eps")
plotInd += 1
for k in range(len(dayList)):
day = dayList[k]
print(str(DateUtils.getDateStrFromDay(day, startYear)) + ": " + str(statsArray[k, graphStats.maxComponentEdgesIndex]))
#print(str(DateUtils.getDateStrFromDay(day, startYear)) + ": " + str(configStatsArray[k, graphStats.numComponentsIndex]))
plt.figure(plotInd)
plotRealConfigError(graphStats.numComponentsIndex, plotStyleBW[0], plotStyles4[0], "Size >= 1", "CM size >= 1")
plotRealConfigError(graphStats.numNonSingletonComponentsIndex, plotStyleBW[1], plotStyles4[1], "Size >= 2", "CM size >= 2")
plotRealConfigError(graphStats.numTriOrMoreComponentsIndex, plotStyleBW[2], plotStyles4[2], "Size >= 3", "CM size >= 3")
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("No. components")
plt.legend(loc="upper left")
plt.savefig(figureDir + "NumComponentsGrowth.eps")
plotInd += 1
plt.figure(plotInd)
plotRealConfigError(graphStats.meanComponentSizeIndex, plotStyleBW[0], plotStyles4[0], "Real graph", "CM")
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Mean component size")
plt.legend(loc="lower right")
plt.savefig(figureDir + "MeanComponentSizeGrowth.eps")
plotInd += 1
plt.figure(plotInd)
plotRealConfigError(graphStats.diameterIndex, plotStyleBW[0], plotStyles4[0], "Real graph", "CM")
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Max component diameter")
plt.legend(loc="lower right")
plt.savefig(figureDir + "MaxComponentDiameterGrowth.eps")
plotInd += 1
plt.figure(plotInd)
plotRealConfigError(graphStats.effectiveDiameterIndex, plotStyleBW[0], plotStyles4[0], "Real graph", "CM")
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Effective diameter")
plt.legend(loc="lower right")
plt.savefig(figureDir + "MaxComponentEffDiameterGrowth.eps")
plotInd += 1
plt.figure(plotInd)
plotRealConfigError(graphStats.meanDegreeIndex, plotStyleBW[0], plotStyles4[0], "All vertices", "CM all vertices")
plotRealConfigError(graphStats.maxCompMeanDegreeIndex, plotStyleBW[1], plotStyles4[1], "Max component", "CM max component")
#plt.plot(absDayList, statsArray[:, graphStats.meanDegreeIndex], plotStyleBW[0], absDayList, statsArray[:, graphStats.maxCompMeanDegreeIndex], plotStyleBW[1], absDayList, configStatsArray[:, graphStats.meanDegreeIndex], plotStyles4[0], absDayList, configStatsArray[:, graphStats.maxCompMeanDegreeIndex], plotStyles4[1])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Mean degree")
plt.legend(loc="lower right")
plt.savefig(figureDir + "MeanDegrees.eps")
plotInd += 1
plt.figure(plotInd)
plotRealConfigError(graphStats.densityIndex, plotStyleBW[0], plotStyles4[0], "Real Graph", "Config Model")
#plt.plot(absDayList, statsArray[:, graphStats.densityIndex], plotStyleBW[0], absDayList, configStatsArray[:, graphStats.densityIndex], plotStyles4[0])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Density")
plt.legend()
plt.savefig(figureDir + "DensityGrowth.eps")
plotInd += 1
plt.figure(plotInd)
plt.plot(absDayList, statsArray[:, graphStats.powerLawIndex], plotStyleBW[0])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Alpha")
plt.savefig(figureDir + "PowerLawGrowth.eps")
plotInd += 1
plt.figure(plotInd)
plotRealConfigError(graphStats.geodesicDistanceIndex, plotStyleBW[0], plotStyles4[0], "Real Graph", "Config Model")
#plt.plot(absDayList, statsArray[:, graphStats.geodesicDistanceIndex], plotStyleBW[0], absDayList, configStatsArray[:, graphStats.geodesicDistanceIndex], plotStyles4[0])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Geodesic distance")
plt.legend(loc="lower right")
plt.savefig(figureDir + "GeodesicGrowth.eps")
plotInd += 1
plt.figure(plotInd)
plotRealConfigError(graphStats.harmonicGeoDistanceIndex, plotStyleBW[0], plotStyles4[0], "Real Graph", "Config Model")
#plt.plot(absDayList, statsArray[:, graphStats.harmonicGeoDistanceIndex], plotStyleBW[0], absDayList, configStatsArray[:, graphStats.harmonicGeoDistanceIndex], plotStyles4[0])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Mean harmonic geodesic distance")
plt.legend(loc="upper right")
plt.savefig(figureDir + "HarmonicGeodesicGrowth.eps")
plotInd += 1
#print(statsArray[:, graphStats.harmonicGeoDistanceIndex])
plt.figure(plotInd)
plotRealConfigError(graphStats.geodesicDistMaxCompIndex, plotStyleBW[0], plotStyles4[0], "Real graph", "Config model")
#plt.plot(absDayList, statsArray[:, graphStats.geodesicDistMaxCompIndex], plotStyleBW[0], absDayList, configStatsArray[:, graphStats.geodesicDistMaxCompIndex], plotStyles4[0])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Max component mean geodesic distance")
plt.legend(loc="lower right")
plt.savefig(figureDir + "MaxCompGeodesicGrowth.eps")
plotInd += 1
#Find the number of edges in the infection graph
resultsFileName = resultsDir + "InfectGrowthScalarStats.pkl"
infectStatsArray = Util.loadPickle(resultsFileName)
#Make sure we don't include 0 in the array
vertexIndex = numpy.argmax(statsArray[:, graphStats.numVerticesIndex] > 0)
edgeIndex = numpy.argmax(infectStatsArray[:, graphStats.numEdgesIndex] > 0)
minIndex = numpy.maximum(vertexIndex, edgeIndex)
plt.figure(plotInd)
plt.plot(numpy.log(statsArray[minIndex:, graphStats.numVerticesIndex]), numpy.log(statsArray[minIndex:, graphStats.numEdgesIndex]), plotStyleBW[0])
plt.plot(numpy.log(infectStatsArray[minIndex:, graphStats.numVerticesIndex]), numpy.log(infectStatsArray[minIndex:, graphStats.numEdgesIndex]), plotStyleBW[1])
plt.plot(numpy.log(statsArray[minIndex:, graphStats.maxComponentSizeIndex]), numpy.log(statsArray[minIndex:, graphStats.maxComponentEdgesIndex]), plotStyleBW[2])
plt.xlabel("log(|V|)")
plt.ylabel("log(|E|)/log(|D|)")
plt.legend(("Contact graph", "Infection graph", "Max component"), loc="upper left")
plt.savefig(figureDir + "LogVerticesEdgesGrowth.eps")
plotInd += 1
results = statsArray[:, graphStats.effectiveDiameterIndex]
results = numpy.c_[results, configStatsArray[:, graphStats.effectiveDiameterIndex]]
results = numpy.c_[results, statsArray[:, graphStats.geodesicDistMaxCompIndex]]
results = numpy.c_[results, configStatsArray[:, graphStats.geodesicDistMaxCompIndex]]
configStatsArray
print("\n\n")
print(Latex.listToRow(["Diameter", "CM Diameter", "Mean Geodesic", "CM Mean Geodesic"]))
print("\\hline")
for i in range(0, len(dayList), 4):
day = dayList[i]
print(str(DateUtils.getDateStrFromDay(day, startYear)) + " & " + Latex.array1DToRow(results[i, :]) + "\\\\")
def plotVectorStats():
#Finally, compute some vector stats at various points in the graph
logging.info("Computing vector stats")
global plotInd
resultsFileName = resultsDir + "ContactGrowthVectorStats.pkl"
if saveResults:
statsDictList = graphStats.sequenceVectorStats(sGraph, subgraphIndicesList2)
Util.savePickle(statsDictList, resultsFileName, False)
else:
statsDictList = Util.loadPickle(resultsFileName)
#Load up configuration model results
configStatsDictList = []
resultsFileNameBase = resultsDir + "ConfigGraphVectorStats"
for j in range(numConfigGraphs):
resultsFileName = resultsFileNameBase + str(j)
configStatsDictList.append(Util.loadPickle(resultsFileName))
#Now need to take mean of 1st element of list
meanConfigStatsDictList = configStatsDictList[0]
for i in range(len(configStatsDictList[0])):
for k in range(1, numConfigGraphs):
for key in configStatsDictList[k][i].keys():
if configStatsDictList[k][i][key].shape[0] > meanConfigStatsDictList[i][key].shape[0]:
meanConfigStatsDictList[i][key] = numpy.r_[meanConfigStatsDictList[i][key], numpy.zeros(configStatsDictList[k][i][key].shape[0] - meanConfigStatsDictList[i][key].shape[0])]
elif configStatsDictList[k][i][key].shape[0] < meanConfigStatsDictList[i][key].shape[0]:
configStatsDictList[k][i][key] = numpy.r_[configStatsDictList[k][i][key], numpy.zeros(meanConfigStatsDictList[i][key].shape[0] - configStatsDictList[k][i][key].shape[0])]
meanConfigStatsDictList[i][key] += configStatsDictList[k][i][key]
for key in configStatsDictList[0][i].keys():
meanConfigStatsDictList[i][key] = meanConfigStatsDictList[i][key]/numConfigGraphs
triangleDistArray = numpy.zeros((len(dayList2), 100))
configTriangleDistArray = numpy.zeros((len(dayList2), 100))
hopPlotArray = numpy.zeros((len(dayList2), 27))
configHopPlotArray = numpy.zeros((len(dayList2), 30))
componentsDistArray = numpy.zeros((len(dayList2), 3000))
configComponentsDistArray = numpy.zeros((len(dayList2), 3000))
numVerticesEdgesArray = numpy.zeros((len(dayList2), 2), numpy.int)
numVerticesEdgesArray[:, 0] = [len(sgl) for sgl in subgraphIndicesList2]
numVerticesEdgesArray[:, 1] = [sGraph.subgraph(sgl).getNumEdges() for sgl in subgraphIndicesList2]
binWidths = numpy.arange(0, 0.50, 0.05)
eigVectorDists = numpy.zeros((len(dayList2), binWidths.shape[0]-1), numpy.int)
femaleSums = numpy.zeros(len(dayList2))
maleSums = numpy.zeros(len(dayList2))
heteroSums = numpy.zeros(len(dayList2))
biSums = numpy.zeros(len(dayList2))
contactSums = numpy.zeros(len(dayList2))
nonContactSums = numpy.zeros(len(dayList2))
donorSums = numpy.zeros(len(dayList2))
randomTestSums = numpy.zeros(len(dayList2))
stdSums = numpy.zeros(len(dayList2))
prisonerSums = numpy.zeros(len(dayList2))
recommendSums = numpy.zeros(len(dayList2))
meanAges = numpy.zeros(len(dayList2))
degrees = numpy.zeros((len(dayList2), 20))
provinces = numpy.zeros((len(dayList2), 15))
havanaSums = numpy.zeros(len(dayList2))
villaClaraSums = numpy.zeros(len(dayList2))
pinarSums = numpy.zeros(len(dayList2))
holguinSums = numpy.zeros(len(dayList2))
habanaSums = numpy.zeros(len(dayList2))
sanctiSums = numpy.zeros(len(dayList2))
meanDegrees = numpy.zeros(len(dayList2))
stdDegrees = numpy.zeros(len(dayList2))
#Note that death has a lot of missing values
for j in range(len(dayList2)):
dateStr = (str(DateUtils.getDateStrFromDay(dayList2[j], startYear)))
logging.info(dateStr)
statsDict = statsDictList[j]
configStatsDict = meanConfigStatsDictList[j]
degreeDist = statsDict["outDegreeDist"]
degreeDist = degreeDist/float(numpy.sum(degreeDist))
#Note that degree distribution for configuration graph will be identical
eigenDist = statsDict["eigenDist"]
eigenDist = numpy.log(eigenDist[eigenDist>=10**-1])
#configEigenDist = configStatsDict["eigenDist"]
#configEigenDist = numpy.log(configEigenDist[configEigenDist>=10**-1])
hopCount = statsDict["hopCount"]
hopCount = numpy.log10(hopCount)
hopPlotArray[j, 0:hopCount.shape[0]] = hopCount
configHopCount = configStatsDict["hopCount"]
configHopCount = numpy.log10(configHopCount)
#configHopPlotArray[j, 0:configHopCount.shape[0]] = configHopCount
triangleDist = statsDict["triangleDist"]
#triangleDist = numpy.array(triangleDist, numpy.float64)/numpy.sum(triangleDist)
triangleDist = numpy.array(triangleDist, numpy.float64)
triangleDistArray[j, 0:triangleDist.shape[0]] = triangleDist
configTriangleDist = configStatsDict["triangleDist"]
configTriangleDist = numpy.array(configTriangleDist, numpy.float64)/numpy.sum(configTriangleDist)
configTriangleDistArray[j, 0:configTriangleDist.shape[0]] = configTriangleDist
maxEigVector = statsDict["maxEigVector"]
eigenvectorInds = numpy.flipud(numpy.argsort(numpy.abs(maxEigVector)))
top10eigenvectorInds = eigenvectorInds[0:numpy.round(eigenvectorInds.shape[0]/10.0)]
maxEigVector = numpy.abs(maxEigVector[eigenvectorInds])
#print(maxEigVector)
eigVectorDists[j, :] = numpy.histogram(maxEigVector, binWidths)[0]
componentsDist = statsDict["componentsDist"]
componentsDist = numpy.array(componentsDist, numpy.float64)/numpy.sum(componentsDist)
componentsDistArray[j, 0:componentsDist.shape[0]] = componentsDist
configComponentsDist = configStatsDict["componentsDist"]
configComponentsDist = numpy.array(configComponentsDist, numpy.float64)/numpy.sum(configComponentsDist)
configComponentsDistArray[j, 0:configComponentsDist.shape[0]] = configComponentsDist
plotInd2 = plotInd
plt.figure(plotInd2)
plt.plot(numpy.arange(degreeDist.shape[0]), degreeDist, plotStyles2[j], label=dateStr)
plt.xlabel("Degree")
plt.ylabel("Probability")
plt.ylim((0, 0.5))
plt.savefig(figureDir + "DegreeDist" + ".eps")
plt.legend()
plotInd2 += 1
"""
plt.figure(plotInd2)
plt.plot(numpy.arange(eigenDist.shape[0]), eigenDist, label=dateStr)
plt.xlabel("Eigenvalue rank")
plt.ylabel("log(Eigenvalue)")
plt.savefig(figureDir + "EigenDist" + ".eps")
plt.legend()
plotInd2 += 1
"""
#How does kleinberg do the hop plots
plt.figure(plotInd2)
plt.plot(numpy.arange(hopCount.shape[0]), hopCount, plotStyles[j], label=dateStr)
plt.xlabel("k")
plt.ylabel("log10(pairs)")
plt.ylim( (2.5, 7) )
plt.legend(loc="lower right")
plt.savefig(figureDir + "HopCount" + ".eps")
plotInd2 += 1
plt.figure(plotInd2)
plt.plot(numpy.arange(maxEigVector.shape[0]), maxEigVector, plotStyles2[j], label=dateStr)
plt.xlabel("Rank")
plt.ylabel("log(eigenvector coefficient)")
plt.savefig(figureDir + "MaxEigVector" + ".eps")
plt.legend()
plotInd2 += 1
#Compute some information the 10% most central vertices
subgraphIndices = numpy.nonzero(detections <= dayList2[j])[0]
subgraph = sGraph.subgraph(subgraphIndices)
subgraphVertexArray = subgraph.getVertexList().getVertices()
femaleSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, genderIndex]==1)
maleSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, genderIndex]==0)
heteroSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, orientationIndex]==0)
biSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, orientationIndex]==1)
contactSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, contactIndex])
donorSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, donorIndex])
randomTestSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, randomTestIndex])
stdSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, stdIndex])
prisonerSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, prisonerIndex])
recommendSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, doctorIndex])
meanAges[j] = numpy.mean(subgraphVertexArray[top10eigenvectorInds, detectionIndex] - subgraphVertexArray[top10eigenvectorInds, dobIndex])/daysInYear
havanaSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, havanaIndex])
villaClaraSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, villaClaraIndex])
pinarSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, pinarIndex])
holguinSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, holguinIndex])
habanaSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, habanaIndex])
sanctiSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, sanctiIndex])
provinces[j, :] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, 22:37], 0)
ddist = numpy.bincount(subgraph.outDegreeSequence()[top10eigenvectorInds])
degrees[j, 0:ddist.shape[0]] = numpy.array(ddist, numpy.float)/numpy.sum(ddist)
meanDegrees[j] = numpy.mean(subgraph.outDegreeSequence()[top10eigenvectorInds])
stdDegrees[j] = numpy.std(subgraph.outDegreeSequence()[top10eigenvectorInds])
plt.figure(plotInd2)
plt.plot(numpy.arange(degrees[j, :].shape[0]), degrees[j, :], plotStyles2[j], label=dateStr)
plt.xlabel("Degree")
plt.ylabel("Probability")
#plt.ylim((0, 0.5))
plt.savefig(figureDir + "DegreeDistCentral" + ".eps")
plt.legend()
plotInd2 += 1
precision = 4
dateStrList = [DateUtils.getDateStrFromDay(day, startYear) for day in dayList2]
print("Hop counts")
print(Latex.listToRow(dateStrList))
print(Latex.array2DToRows(hopPlotArray.T))
print("\nHop counts for configuration graphs")
print(Latex.listToRow(dateStrList))
print(Latex.array2DToRows(configHopPlotArray.T))
print("\n\nEdges and vertices")
print((Latex.listToRow(dateStrList)))
print((Latex.array2DToRows(numVerticesEdgesArray.T, precision)))
print("\n\nEigenvector distribution")
print((Latex.array1DToRow(binWidths[1:]) + "\\\\"))
print((Latex.array2DToRows(eigVectorDists)))
print("\n\nDistribution of component sizes")
componentsDistArray = componentsDistArray[:, 0:componentsDist.shape[0]]
nonZeroCols = numpy.sum(componentsDistArray, 0)!=0
componentsDistArray = numpy.r_[numpy.array([numpy.arange(componentsDistArray.shape[1])[nonZeroCols]]), componentsDistArray[:, nonZeroCols]]
print((Latex.listToRow(dateStrList)))
print((Latex.array2DToRows(componentsDistArray.T, precision)))
print("\n\nDistribution of component sizes in configuration graphs")
configComponentsDistArray = configComponentsDistArray[:, 0:configComponentsDist.shape[0]]
nonZeroCols = numpy.sum(configComponentsDistArray, 0)!=0
configComponentsDistArray = numpy.r_[numpy.array([numpy.arange(configComponentsDistArray.shape[1])[nonZeroCols]]), configComponentsDistArray[:, nonZeroCols]]
print((Latex.listToRow(dateStrList)))
print((Latex.array2DToRows(configComponentsDistArray.T, precision)))
print("\n\nDistribution of triangle participations")
triangleDistArray = triangleDistArray[:, 0:triangleDist.shape[0]]
nonZeroCols = numpy.sum(triangleDistArray, 0)!=0
triangleDistArray = numpy.r_[numpy.array([numpy.arange(triangleDistArray.shape[1])[nonZeroCols]])/2, triangleDistArray[:, nonZeroCols]]
print((Latex.listToRow(dateStrList)))
print((Latex.array2DToRows(triangleDistArray.T, precision)))
configTriangleDistArray = configTriangleDistArray[:, 0:configTriangleDist.shape[0]]
nonZeroCols = numpy.sum(configTriangleDistArray, 0)!=0
configTriangleDistArray = numpy.r_[numpy.array([numpy.arange(configTriangleDistArray.shape[1])[nonZeroCols]])/2, configTriangleDistArray[:, nonZeroCols]]
configTriangleDistArray = numpy.c_[configTriangleDistArray, numpy.zeros((configTriangleDistArray.shape[0], triangleDistArray.shape[1]-configTriangleDistArray.shape[1]))]
print("\n\nDistribution of central vertices")
print((Latex.listToRow(dateStrList)))
subgraphSizes = numpy.array(maleSums + femaleSums, numpy.float)
print("Female & " + Latex.array1DToRow(femaleSums*100/subgraphSizes, 1) + "\\\\")
print("Male & " + Latex.array1DToRow(maleSums*100/subgraphSizes, 1) + "\\\\")
print("\hline")
print("Heterosexual & " + Latex.array1DToRow(heteroSums*100/subgraphSizes, 1) + "\\\\")
print("Bisexual & " + Latex.array1DToRow(biSums*100/subgraphSizes, 1) + "\\\\")
print("\hline")
print("Contact traced & " + Latex.array1DToRow(contactSums*100/subgraphSizes, 1) + "\\\\")
print("Blood donor & " + Latex.array1DToRow(donorSums*100/subgraphSizes, 1) + "\\\\")
print("RandomTest & " + Latex.array1DToRow(randomTestSums*100/subgraphSizes, 1) + "\\\\")
print("STD & " + Latex.array1DToRow(stdSums*100/subgraphSizes, 1) + "\\\\")
print("Prisoner & " + Latex.array1DToRow(prisonerSums*100/subgraphSizes, 1) + "\\\\")
print("Doctor recommendation & " + Latex.array1DToRow(recommendSums*100/subgraphSizes, 1) + "\\\\")
print("\hline")
print("Mean ages (years) & " + Latex.array1DToRow(meanAges, 2) + "\\\\")
print("\hline")
print("Holguin & " + Latex.array1DToRow(holguinSums*100/subgraphSizes, 1) + "\\\\")
print("La Habana & " + Latex.array1DToRow(habanaSums*100/subgraphSizes, 1) + "\\\\")
print("Havana City & " + Latex.array1DToRow(havanaSums*100/subgraphSizes, 1) + "\\\\")
print("Pinar del Rio & " + Latex.array1DToRow(pinarSums*100/subgraphSizes, 1) + "\\\\")
print("Sancti Spiritus & " + Latex.array1DToRow(sanctiSums*100/subgraphSizes, 1) + "\\\\")
print("Villa Clara & " + Latex.array1DToRow(villaClaraSums*100/subgraphSizes, 1) + "\\\\")
print("\hline")
print("Mean degrees & " + Latex.array1DToRow(meanDegrees, 2) + "\\\\")
print("Std degrees & " + Latex.array1DToRow(stdDegrees, 2) + "\\\\")
print("\n\nProvinces")
print(Latex.array2DToRows(provinces))
print("\n\nDegree distribution")
print(Latex.array2DToRows(degrees))
def plotOtherStats():
#Let's look at geodesic distances in subgraphs and communities
logging.info("Computing other stats")
resultsFileName = resultsDir + "ContactGrowthOtherStats.pkl"
hivGraphStats = HIVGraphStatistics(fInds)
if saveResults:
statsArray = hivGraphStats.sequenceScalarStats(sGraph, subgraphIndicesList)
#statsArray["dayList"] = absDayList
Util.savePickle(statsArray, resultsFileName, True)
else:
statsArray = Util.loadPickle(resultsFileName)
#Just load the harmonic geodesic distances of the full graph
resultsFileName = resultsDir + "ContactGrowthScalarStats.pkl"
statsArray2 = Util.loadPickle(resultsFileName)
global plotInd
msmGeodesic = statsArray[:, hivGraphStats.msmGeodesicIndex]
msmGeodesic[msmGeodesic < 0] = 0
msmGeodesic[msmGeodesic == float('inf')] = 0
#Output all the results into plots
plt.figure(plotInd)
plt.plot(absDayList, msmGeodesic, 'k-', absDayList, statsArray[:, hivGraphStats.mostConnectedGeodesicIndex], 'k--')
plt.xticks(locs, labels)
#plt.ylim([0, 0.1])
plt.xlabel("Year")
plt.ylabel("Mean harmonic geodesic distance")
plt.legend(("MSM individuals", "Top 10% degree"), loc="upper right")
plt.savefig(figureDir + "MSM10Geodesic" + ".eps")
plotInd += 1
plt.figure(plotInd)
plt.plot(absDayList, statsArray2[:, graphStats.harmonicGeoDistanceIndex], 'k-', absDayList, statsArray[:, hivGraphStats.menSubgraphGeodesicIndex], 'k--')
plt.xticks(locs, labels)
plt.ylim([0, 200.0])
plt.xlabel("Year")
plt.ylabel("Mean harmonic geodesic distance")
plt.legend(("All individuals", "Men subgraph"), loc="upper right")
plt.savefig(figureDir + "MenSubgraphGeodesic" + ".eps")
plotInd += 1
#plotVertexStats()
plotScalarStats()
#plotVectorStats()
#plotOtherStats()
plt.show()
#computeConfigScalarStats()
#computeConfigVectorStats()
"""
Probability of adding node based on degree - try to find how we can generate data.
Mean Time between first and last infection for each person
"""
|
gpl-3.0
|
pigeonflight/strider-plone
|
docker/appengine/lib/django-1.2/django/contrib/gis/geos/prototypes/__init__.py
|
79
|
1355
|
"""
This module contains all of the GEOS ctypes function prototypes. Each
prototype handles the interaction between the GEOS library and Python
via ctypes.
"""
# Coordinate sequence routines.
from django.contrib.gis.geos.prototypes.coordseq import create_cs, get_cs, \
cs_clone, cs_getordinate, cs_setordinate, cs_getx, cs_gety, cs_getz, \
cs_setx, cs_sety, cs_setz, cs_getsize, cs_getdims
# Geometry routines.
from django.contrib.gis.geos.prototypes.geom import from_hex, from_wkb, from_wkt, \
create_point, create_linestring, create_linearring, create_polygon, create_collection, \
destroy_geom, get_extring, get_intring, get_nrings, get_geomn, geom_clone, \
geos_normalize, geos_type, geos_typeid, geos_get_srid, geos_set_srid, \
get_dims, get_num_coords, get_num_geoms, \
to_hex, to_wkb, to_wkt
# Miscellaneous routines.
from django.contrib.gis.geos.prototypes.misc import geos_area, geos_distance, geos_length
# Predicates
from django.contrib.gis.geos.prototypes.predicates import geos_hasz, geos_isempty, \
geos_isring, geos_issimple, geos_isvalid, geos_contains, geos_crosses, \
geos_disjoint, geos_equals, geos_equalsexact, geos_intersects, \
geos_intersects, geos_overlaps, geos_relatepattern, geos_touches, geos_within
# Topology routines
from django.contrib.gis.geos.prototypes.topology import *
|
mit
|
blooparksystems/odoo
|
addons/mrp_repair/wizard/make_invoice.py
|
45
|
2203
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv
class make_invoice(osv.osv_memory):
_name = 'mrp.repair.make_invoice'
_description = 'Make Invoice'
_columns = {
'group': fields.boolean('Group by partner invoice address'),
}
def make_invoices(self, cr, uid, ids, context=None):
""" Generates invoice(s) of selected records.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return: Loads the view of new invoice(s).
"""
if context is None:
context = {}
inv = self.browse(cr, uid, ids[0], context=context)
order_obj = self.pool.get('mrp.repair')
mod_obj = self.pool.get('ir.model.data')
newinv = order_obj.action_invoice_create(cr, uid, context['active_ids'],
group=inv.group,context=context)
# We have to trigger the workflow of the given repairs, otherwise they remain 'to be invoiced'.
# Note that the signal 'action_invoice_create' will trigger another call to the method 'action_invoice_create',
# but that second call will not do anything, since the repairs are already invoiced.
order_obj.signal_workflow(cr, uid, context['active_ids'], 'action_invoice_create')
form_res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_form')
form_id = form_res and form_res[1] or False
tree_res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_tree')
tree_id = tree_res and tree_res[1] or False
return {
'domain': [('id','in', newinv.values())],
'name': 'Invoices',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.invoice',
'view_id': False,
'views': [(tree_id, 'tree'),(form_id, 'form')],
'context': "{'type':'out_invoice'}",
'type': 'ir.actions.act_window'
}
|
gpl-3.0
|
pywinauto/pywinauto
|
pywinauto/controls/hwndwrapper.py
|
1
|
67694
|
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Basic wrapping of Windows controls"""
from __future__ import unicode_literals
from __future__ import print_function
# pylint: disable-msg=W0611
# import sys
import copy
import time
import re
import ctypes
import win32api
import win32gui
import win32con
import win32process
import win32event
import six
import pywintypes
import warnings
# the wrappers may be used in an environment that does not need
# the actions - as such I don't want to require sendkeys - so
# the following makes the import optional.
from ..windows import win32defines, win32functions, win32structures
from .. import controlproperties
from ..actionlogger import ActionLogger
from .. import keyboard
from .. import mouse
from ..timings import Timings
from .. import timings
from .. import handleprops
from ..windows.win32_element_info import HwndElementInfo
from .. import backend
from .. import WindowNotFoundError # noqa #E402
# I leave this optional because PIL is a large dependency
try:
from PIL import ImageGrab
except ImportError:
ImageGrab = None
# also import MenuItemNotEnabled so that it is
# accessible from HwndWrapper module
from .menuwrapper import Menu #, MenuItemNotEnabled
from .win_base_wrapper import WinBaseWrapper
from ..base_wrapper import BaseMeta
from .. import deprecated
#====================================================================
class ControlNotEnabled(RuntimeError):
"""Raised when a control is not enabled"""
pass
#====================================================================
class ControlNotVisible(RuntimeError):
"""Raised when a control is not visible"""
pass
#====================================================================
class InvalidWindowHandle(RuntimeError):
"""Raised when an invalid handle is passed to HwndWrapper"""
def __init__(self, hwnd):
"""Initialise the RuntimError parent with the mesage"""
RuntimeError.__init__(self,
"Handle {0} is not a vaild window handle".format(hwnd))
#=========================================================================
class HwndMeta(BaseMeta):
"""Metaclass for HwndWrapper objects"""
re_wrappers = {}
str_wrappers = {}
def __init__(cls, name, bases, attrs):
"""
Register the class names
Both the regular expression or the classes directly are registered.
"""
BaseMeta.__init__(cls, name, bases, attrs)
for win_class in cls.windowclasses:
HwndMeta.re_wrappers[re.compile(win_class)] = cls
HwndMeta.str_wrappers[win_class] = cls
@staticmethod
def find_wrapper(element):
"""Find the correct wrapper for this native element"""
if isinstance(element, six.integer_types):
element = HwndElementInfo(element)
class_name = element.class_name
try:
return HwndMeta.str_wrappers[class_name]
except KeyError:
wrapper_match = None
for regex, wrapper in HwndMeta.re_wrappers.items():
if regex.match(class_name):
wrapper_match = wrapper
HwndMeta.str_wrappers[class_name] = wrapper
return wrapper
# if it is a dialog then override the wrapper we found
# and make it a DialogWrapper
if handleprops.is_toplevel_window(element.handle):
wrapper_match = DialogWrapper
if wrapper_match is None:
wrapper_match = HwndWrapper
return wrapper_match
#====================================================================
@six.add_metaclass(HwndMeta)
class HwndWrapper(WinBaseWrapper):
"""
Default wrapper for controls.
All other wrappers are derived from this.
This class wraps a lot of functionality of underlying windows API
features for working with windows.
Most of the methods apply to every single window type. For example
you can click() on any window.
Most of the methods of this class are simple wrappers around
API calls and as such they try do the simplest thing possible.
An HwndWrapper object can be passed directly to a ctypes wrapped
C function - and it will get converted to a Long with the value of
it's handle (see ctypes, _as_parameter_).
"""
handle = None
# -----------------------------------------------------------
def __new__(cls, element):
"""Construct the control wrapper"""
return super(HwndWrapper, cls)._create_wrapper(cls, element, HwndWrapper)
# -----------------------------------------------------------
def __init__(self, element_info):
"""
Initialize the control
* **element_info** is either a valid HwndElementInfo or it can be an
instance or subclass of HwndWrapper.
If the handle is not valid then an InvalidWindowHandle error
is raised.
"""
if isinstance(element_info, six.integer_types):
element_info = HwndElementInfo(element_info)
if hasattr(element_info, "element_info"):
element_info = element_info.element_info
WinBaseWrapper.__init__(self, element_info, backend.registry.backends['win32'])
# verify that we have been passed in a valid windows handle
if not handleprops.iswindow(self.handle):
raise InvalidWindowHandle(self.handle)
# make it so that ctypes conversion happens correctly
self._as_parameter_ = self.handle
@property
def writable_props(self):
"""Extend default properties list."""
props = super(HwndWrapper, self).writable_props
props.extend(['style',
'exstyle',
'user_data',
'context_help_id',
'fonts',
'client_rects',
'is_unicode',
'menu_items',
'automation_id',
])
return props
# -----------------------------------------------------------
def style(self):
"""
Returns the style of window
Return value is a long.
Combination of WS_* and specific control specific styles.
See HwndWrapper.has_style() to easily check if the window has a
particular style.
"""
return handleprops.style(self)
# Non PEP-8 alias
Style = deprecated(style)
# -----------------------------------------------------------
def exstyle(self):
"""
Returns the Extended style of window
Return value is a long.
Combination of WS_* and specific control specific styles.
See HwndWrapper.has_style() to easily check if the window has a
particular style.
"""
return handleprops.exstyle(self)
# Non PEP-8 alias
ExStyle = deprecated(exstyle, deprecated_name='ExStyle')
#------------------------------------------------------------
def automation_id(self):
"""Return the .NET name of the control"""
return self.element_info.auto_id
#------------------------------------------------------------
def control_type(self):
"""Return the .NET type of the control"""
return self.element_info.control_type
#------------------------------------------------------------
def full_control_type(self):
"""Return the .NET type of the control (full, uncut)"""
return self.element_info.full_control_type
# -----------------------------------------------------------
def user_data(self):
"""
Extra data associted with the window
This value is a long value that has been associated with the window
and rarely has useful data (or at least data that you know the use
of).
"""
return handleprops.userdata(self)
# Non PEP-8 alias
UserData = deprecated(user_data)
# -----------------------------------------------------------
def context_help_id(self):
"""Return the Context Help ID of the window"""
return handleprops.contexthelpid(self)
# Non PEP-8 alias
ContextHelpID = deprecated(context_help_id, deprecated_name='ContextHelpID')
# -----------------------------------------------------------
def is_active(self):
"""Whether the window is active or not"""
return self.top_level_parent() == self.get_active()
# Non PEP-8 alias
IsActive = deprecated(is_active)
# -----------------------------------------------------------
def is_unicode(self):
"""
Whether the window is unicode or not
A window is Unicode if it was registered by the Wide char version
of RegisterClass(Ex).
"""
return handleprops.isunicode(self)
# Non PEP-8 alias
IsUnicode = deprecated(is_unicode)
# -----------------------------------------------------------
def client_rect(self):
"""
Returns the client rectangle of window
The client rectangle is the window rectangle minus any borders that
are not available to the control for drawing.
Both top and left are always 0 for this method.
This method returns a RECT structure, Which has attributes - top,
left, right, bottom. and has methods width() and height().
See win32structures.RECT for more information.
"""
return handleprops.clientrect(self)
# Non PEP-8 alias
ClientRect = deprecated(client_rect)
# -----------------------------------------------------------
#def client_to_screen(self, client_point):
# """Maps point from client to screen coordinates"""
# point = win32structures.POINT()
# if isinstance(client_point, win32structures.POINT):
# point.x = client_point.x
# point.y = client_point.y
# else:
# point.x = client_point[0]
# point.y = client_point[1]
# win32functions.client_to_screen(self, ctypes.byref(point))
#
# # return tuple in any case because
# # coords param is always expected to be tuple
# return point.x, point.y
# -----------------------------------------------------------
def font(self):
"""
Return the font of the window
The font of the window is used to draw the text of that window.
It is a structure which has attributes for font name, height, width
etc.
See win32structures.LOGFONTW for more information.
"""
return handleprops.font(self)
# Non PEP-8 alias
Font = deprecated(font)
# -----------------------------------------------------------
def has_style(self, style):
"""Return True if the control has the specified style"""
return handleprops.has_style(self, style)
# Non PEP-8 alias
HasStyle = deprecated(has_style)
# -----------------------------------------------------------
def has_exstyle(self, exstyle):
"""Return True if the control has the specified extended style"""
return handleprops.has_exstyle(self, exstyle)
# Non PEP-8 alias
HasExStyle = deprecated(has_exstyle, deprecated_name='HasExStyle')
# -----------------------------------------------------------
def is_dialog(self):
"""Return true if the control is a top level window"""
if not ("isdialog" in self._cache.keys()):
self._cache['isdialog'] = handleprops.is_toplevel_window(self)
return self._cache['isdialog']
# -----------------------------------------------------------
def client_rects(self):
"""
Return the client rect for each item in this control
It is a list of rectangles for the control. It is frequently over-ridden
to extract all rectangles from a control with multiple items.
It is always a list with one or more rectangles:
* First elemtent is the client rectangle of the control
* Subsequent elements contain the client rectangle of any items of
the control (e.g. items in a listbox/combobox, tabs in a
tabcontrol)
"""
return [self.client_rect(), ]
# Non PEP-8 alias
ClientRects = deprecated(client_rects)
# -----------------------------------------------------------
def fonts(self):
"""
Return the font for each item in this control
It is a list of fonts for the control. It is frequently over-ridden
to extract all fonts from a control with multiple items.
It is always a list with one or more fonts:
* First elemtent is the control font
* Subsequent elements contain the font of any items of
the control (e.g. items in a listbox/combobox, tabs in a
tabcontrol)
"""
return [self.font(), ]
# Non PEP-8 alias
Fonts = deprecated(fonts)
# -----------------------------------------------------------
def send_command(self, commandID):
return self.send_message(win32defines.WM_COMMAND, commandID)
# Non PEP-8 alias
SendCommand = deprecated(send_command)
# -----------------------------------------------------------
def post_command(self, commandID):
return self.post_message(win32defines.WM_COMMAND, commandID)
# Non PEP-8 alias
PostCommand = deprecated(post_command)
# -----------------------------------------------------------
#def notify(self, code):
# "Send a notification to the parent (not tested yet)"
# # now we need to notify the parent that the state has changed
# nmhdr = win32structures.NMHDR()
# nmhdr.hwndFrom = self.handle
# nmhdr.idFrom = self.control_id()
# nmhdr.code = code
# from ..windows.remote_memory_block import RemoteMemoryBlock
# remote_mem = RemoteMemoryBlock(self, size=ctypes.sizeof(nmhdr))
# remote_mem.Write(nmhdr, size=ctypes.sizeof(nmhdr))
# retval = self.parent().send_message(
# win32defines.WM_NOTIFY,
# self.handle,
# remote_mem)
# #if retval != win32defines.TRUE:
# # print('retval = ' + str(retval))
# # raise ctypes.WinError()
# del remote_mem
# return retval
# Non PEP-8 alias
#Notify = deprecated(notify)
# -----------------------------------------------------------
def _ensure_enough_privileges(self, message_name):
"""Ensure the Python process has enough rights to send some window messages"""
pid = handleprops.processid(self.handle)
if not handleprops.has_enough_privileges(pid):
raise RuntimeError('Not enough rights to use {} message/function for target process ' \
'(to resolve it run the script as Administrator)'.format(message_name))
# -----------------------------------------------------------
def send_message(self, message, wparam = 0, lparam = 0):
"""Send a message to the control and wait for it to return"""
wParamAddress = wparam
if hasattr(wparam, 'mem_address'):
wParamAddress = wparam.mem_address
lParamAddress = lparam
if hasattr(lparam, 'mem_address'):
lParamAddress = lparam.mem_address
CArgObject = type(ctypes.byref(ctypes.c_int(0)))
if isinstance(wparam, CArgObject):
wParamAddress = ctypes.addressof(wparam._obj)
if isinstance(lparam, CArgObject):
lParamAddress = ctypes.addressof(lparam._obj)
return win32gui.SendMessage(self.handle, message, wParamAddress, lParamAddress)
# Non PEP-8 alias
SendMessage = deprecated(send_message)
# -----------------------------------------------------------
def send_chars(self,
chars,
with_spaces=True,
with_tabs=True,
with_newlines=True):
"""
Silently send a character string to the control in an inactive window
If a virtual key with no corresponding character is encountered
(e.g. VK_LEFT, VK_DELETE), a KeySequenceError is raised. Consider using
the method send_keystrokes for such input.
"""
input_locale_id = win32functions.GetKeyboardLayout(0)
keys = keyboard.parse_keys(chars, with_spaces, with_tabs, with_newlines)
for key in keys:
key_info = key.get_key_info()
flags = key_info[2]
unicode_char = (
flags & keyboard.KEYEVENTF_UNICODE ==
keyboard.KEYEVENTF_UNICODE)
if unicode_char:
_, char = key_info[:2]
vk = win32functions.VkKeyScanExW(chr(char), input_locale_id) & 0xFF
scan = win32functions.MapVirtualKeyW(vk, 0)
else:
vk, scan = key_info[:2]
char = win32functions.MapVirtualKeyW(vk, 2)
if char > 0:
lparam = 1 << 0 | scan << 16 | (flags & 1) << 24
win32api.SendMessage(self.handle, win32con.WM_CHAR, char, lparam)
else:
raise keyboard.KeySequenceError(
'no WM_CHAR code for {key}, use method send_keystrokes instead'.
format(key=key))
# -----------------------------------------------------------
def send_keystrokes(self,
keystrokes,
with_spaces=True,
with_tabs=True,
with_newlines=True):
"""
Silently send keystrokes to the control in an inactive window
It parses modifiers Shift(+), Control(^), Menu(%) and Sequences like "{TAB}", "{ENTER}"
For more information about Sequences and Modifiers navigate to module `keyboard`_
.. _`keyboard`: pywinauto.keyboard.html
Due to the fact that each application handles input differently and this method
is meant to be used on inactive windows, it may work only partially depending
on the target app. If the window being inactive is not essential, use the robust
`type_keys`_ method.
.. _`type_keys`: pywinauto.base_wrapper.html#pywinauto.base_wrapper.BaseWrapper.type_keys
"""
PBYTE256 = ctypes.c_ubyte * 256
win32gui.SendMessage(self.handle, win32con.WM_ACTIVATE,
win32con.WA_ACTIVE, 0)
target_thread_id = win32functions.GetWindowThreadProcessId(self.handle, None)
current_thread_id = win32functions.GetCurrentThreadId()
attach_success = win32functions.AttachThreadInput(target_thread_id, current_thread_id, True) != 0
if not attach_success:
warnings.warn('Failed to attach app\'s thread to the current thread\'s message queue',
UserWarning, stacklevel=2)
keyboard_state_stack = [PBYTE256()]
win32functions.GetKeyboardState(keyboard_state_stack[-1])
input_locale_id = win32functions.GetKeyboardLayout(0)
context_code = 0
keys = keyboard.parse_keys(keystrokes, with_spaces, with_tabs, with_newlines)
key_combos_present = any([isinstance(k, keyboard.EscapedKeyAction) for k in keys])
if key_combos_present:
warnings.warn('Key combinations may or may not work depending on the target app',
UserWarning, stacklevel=2)
try:
for key in keys:
vk, scan, flags = key.get_key_info()
if vk == keyboard.VK_MENU or context_code == 1:
down_msg, up_msg = win32con.WM_SYSKEYDOWN, win32con.WM_SYSKEYUP
else:
down_msg, up_msg = win32con.WM_KEYDOWN, win32con.WM_KEYUP
repeat = 1
shift_state = 0
unicode_codepoint = flags & keyboard.KEYEVENTF_UNICODE != 0
if unicode_codepoint:
char = chr(scan)
vk_with_flags = win32functions.VkKeyScanExW(char, input_locale_id)
vk = vk_with_flags & 0xFF
shift_state = (vk_with_flags & 0xFF00) >> 8
scan = win32functions.MapVirtualKeyW(vk, 0)
if key.down and vk > 0:
new_keyboard_state = copy.deepcopy(keyboard_state_stack[-1])
new_keyboard_state[vk] |= 128
if shift_state & 1 == 1:
new_keyboard_state[keyboard.VK_SHIFT] |= 128
# NOTE: if there are characters with CTRL or ALT in the shift
# state, make sure to add these keys to new_keyboard_state
keyboard_state_stack.append(new_keyboard_state)
lparam = (
repeat << 0 |
scan << 16 |
(flags & 1) << 24 |
context_code << 29 |
0 << 31)
win32functions.SetKeyboardState(keyboard_state_stack[-1])
win32functions.PostMessage(self.handle, down_msg, vk, lparam)
if vk == keyboard.VK_MENU:
context_code = 1
# a delay for keyboard state to take effect
time.sleep(0.01)
if key.up and vk > 0:
keyboard_state_stack.pop()
lparam = (
repeat << 0 |
scan << 16 |
(flags & 1) << 24 |
context_code << 29 |
1 << 30 |
1 << 31)
win32functions.PostMessage(self.handle, up_msg, vk, lparam)
win32functions.SetKeyboardState(keyboard_state_stack[-1])
if vk == keyboard.VK_MENU:
context_code = 0
# a delay for keyboard state to take effect
time.sleep(0.01)
except pywintypes.error as e:
if e.winerror == 1400:
warnings.warn('Application exited before the end of keystrokes',
UserWarning, stacklevel=2)
else:
warnings.warn(e.strerror, UserWarning, stacklevel=2)
win32functions.SetKeyboardState(keyboard_state_stack[0])
if attach_success:
win32functions.AttachThreadInput(target_thread_id, current_thread_id, False)
# -----------------------------------------------------------
def send_message_timeout(
self,
message,
wparam = 0,
lparam = 0,
timeout = None,
timeoutflags = win32defines.SMTO_NORMAL):
"""
Send a message to the control and wait for it to return or to timeout
If no timeout is given then a default timeout of .01 of a second will
be used.
"""
if timeout is None:
timeout = Timings.sendmessagetimeout_timeout
result = -1
try:
(_, result) = win32gui.SendMessageTimeout(
int(self.handle),
message,
wparam,
lparam,
timeoutflags,
int(timeout * 1000)
)
except Exception as exc:
#import traceback
#print('____________________________________________________________')
#print('self.handle =', int(self.handle), ', message =', message,
# ', wparam =', wparam, ', lparam =', lparam, ', timeout =', timeout)
#print('Exception: ', exc)
#print(traceback.format_exc())
result = str(exc)
return result #result.value
# Non PEP-8 alias
SendMessageTimeout = deprecated(send_message_timeout)
# -----------------------------------------------------------
def post_message(self, message, wparam = 0, lparam = 0):
"""Post a message to the control message queue and return"""
return win32functions.PostMessage(self, message, wparam, lparam)
# Non PEP-8 alias
PostMessage = deprecated(post_message)
# # -----------------------------------------------------------
# def notify_menu_select(self, menu_id):
# """Notify the dialog that one of it's menu items was selected
#
# **This method is Deprecated**
# """
#
# import warnings
# warning_msg = "HwndWrapper.NotifyMenuSelect() is deprecated - " \
# "equivalent functionality is being moved to the MenuWrapper class."
# warnings.warn(warning_msg, DeprecationWarning)
#
# self.set_focus()
#
# msg = win32defines.WM_COMMAND
# return self.send_message_timeout(
# msg,
# win32functions.MakeLong(0, menu_id), #wparam
# )
# Non PEP-8 alias
# NotifyMenuSelect = deprecated(notify_menu_select)
# -----------------------------------------------------------
def notify_parent(self, message, controlID = None):
"""Send the notification message to parent of this control"""
if controlID is None:
controlID = self.control_id()
if controlID is None:
# probably parent doesn't exist any more, no need to notify
return win32defines.TRUE
return self.parent().post_message(
win32defines.WM_COMMAND,
win32functions.MakeLong(message, controlID),
self)
# Non PEP-8 alias
NotifyParent = deprecated(notify_parent)
#-----------------------------------------------------------
def wait_for_idle(self):
"""Backend specific function to wait for idle state of a thread or a window"""
win32functions.WaitGuiThreadIdle(self.handle)
# -----------------------------------------------------------
def click(self, button="left", pressed="", coords=(0, 0),
double=False, absolute=False):
"""
Simulates a mouse click on the control
This method sends WM_* messages to the control, to do a more
'realistic' mouse click use click_input() which uses mouse_event() API
to perform the click.
This method does not require that the control be visible on the screen
(i.e. it can be hidden beneath another window and it will still work).
"""
self.verify_actionable()
self._ensure_enough_privileges('WM_*BUTTONDOWN/UP')
_perform_click(self, button, pressed, coords, double, absolute=absolute)
return self
# Non PEP-8 alias
Click = deprecated(click)
# -----------------------------------------------------------
def close_click(
self, button = "left", pressed = "", coords = (0, 0), double = False):
"""
Perform a click action that should make the window go away
The only difference from click is that there are extra delays
before and after the click action.
"""
time.sleep(Timings.before_closeclick_wait)
_perform_click(self, button, pressed, coords, double)
def has_closed():
closed = not (
handleprops.iswindow(self) or
handleprops.iswindow(self.parent()))
if not closed:
# try closing again
try:
_perform_click(self, button, pressed, coords, double)
except Exception:
return True # already closed
return closed
# Keep waiting until both this control and it's parent
# are no longer valid controls
timings.wait_until(
Timings.closeclick_dialog_close_wait,
Timings.closeclick_retry,
has_closed
)
time.sleep(Timings.after_closeclick_wait)
return self
# Non PEP-8 alias
CloseClick = deprecated(close_click)
# -----------------------------------------------------------
def close_alt_f4(self):
"""Close the window by pressing Alt+F4 keys."""
time.sleep(Timings.before_closeclick_wait)
self.type_keys('%{F4}')
time.sleep(Timings.after_closeclick_wait)
return self
# Non PEP-8 alias
CloseAltF4 = deprecated(close_alt_f4)
# -----------------------------------------------------------
def double_click(
self, button = "left", pressed = "", coords = (0, 0)):
"""Perform a double click action"""
_perform_click(self, button, pressed, coords, double = True)
return self
# Non PEP-8 alias
DoubleClick = deprecated(double_click)
# -----------------------------------------------------------
def right_click(
self, pressed = "", coords = (0, 0)):
"""Perform a right click action"""
_perform_click(
self, "right", "right " + pressed, coords, button_up = False)
_perform_click(self, "right", pressed, coords, button_down = False)
return self
# Non PEP-8 alias
RightClick = deprecated(right_click)
# -----------------------------------------------------------
def press_mouse(self, button ="left", coords = (0, 0), pressed =""):
"""Press the mouse button"""
#flags, click_point = _calc_flags_and_coords(pressed, coords)
_perform_click(self, button, pressed, coords, button_down=True, button_up=False)
return self
# Non PEP-8 alias
PressMouse = deprecated(press_mouse)
# -----------------------------------------------------------
def release_mouse(self, button ="left", coords = (0, 0), pressed =""):
"""Release the mouse button"""
#flags, click_point = _calc_flags_and_coords(pressed, coords)
_perform_click(self, button, pressed, coords, button_down=False, button_up=True)
return self
# Non PEP-8 alias
ReleaseMouse = deprecated(release_mouse)
# -----------------------------------------------------------
def move_mouse(self, coords = (0, 0), pressed ="", absolute = False):
"""Move the mouse by WM_MOUSEMOVE"""
if not absolute:
self.actions.log('Moving mouse to relative (client) coordinates ' + str(coords).replace('\n', ', '))
_perform_click(self, button='move', coords=coords, absolute=absolute, pressed=pressed)
win32functions.WaitGuiThreadIdle(self.handle)
return self
# Non PEP-8 alias
MoveMouse = deprecated(move_mouse)
# -----------------------------------------------------------
def drag_mouse(self, button ="left",
press_coords = (0, 0),
release_coords = (0, 0),
pressed = ""):
"""Drag the mouse"""
if isinstance(press_coords, win32structures.POINT):
press_coords = (press_coords.x, press_coords.y)
if isinstance(release_coords, win32structures.POINT):
release_coords = (release_coords.x, release_coords.y)
_pressed = pressed
if not _pressed:
_pressed = "left"
self.press_mouse(button, press_coords, pressed=pressed)
for i in range(5):
self.move_mouse((press_coords[0] + i, press_coords[1]), pressed=_pressed)
time.sleep(Timings.drag_n_drop_move_mouse_wait)
self.move_mouse(release_coords, pressed=_pressed)
time.sleep(Timings.before_drop_wait)
self.release_mouse(button, release_coords, pressed=pressed)
time.sleep(Timings.after_drag_n_drop_wait)
return self
# Non PEP-8 alias
DragMouse = deprecated(drag_mouse)
# -----------------------------------------------------------
def set_window_text(self, text, append = False):
"""Set the text of the window"""
self.verify_actionable()
if append:
text = self.window_text() + text
text = ctypes.c_wchar_p(six.text_type(text))
self.post_message(win32defines.WM_SETTEXT, 0, text)
win32functions.WaitGuiThreadIdle(self.handle)
self.actions.log('Set text to the ' + self.friendly_class_name() + ': ' + str(text))
return self
# Non PEP-8 alias
SetWindowText = deprecated(set_window_text)
# -----------------------------------------------------------
def debug_message(self, text):
"""Write some debug text over the window"""
# don't draw if dialog is not visible
dc = win32functions.CreateDC("DISPLAY", None, None, None)
if not dc:
raise ctypes.WinError()
rect = self.rectangle()
#ret = win32functions.TextOut(
# dc, rect.left, rect.top, six.text_type(text), len(text))
ret = win32functions.DrawText(
dc,
six.text_type(text),
len(text),
ctypes.byref(rect),
win32defines.DT_SINGLELINE)
# delete the Display context that we created
win32functions.DeleteDC(dc)
if not ret:
raise ctypes.WinError()
return self
# Non PEP-8 alias
DebugMessage = deprecated(debug_message)
# -----------------------------------------------------------
def set_transparency(self, alpha = 120):
"""Set the window transparency from 0 to 255 by alpha attribute"""
if not (0 <= alpha <= 255):
raise ValueError('alpha should be in [0, 255] interval!')
# TODO: implement SetExStyle method
win32gui.SetWindowLong(self.handle, win32defines.GWL_EXSTYLE, self.exstyle() | win32con.WS_EX_LAYERED)
win32gui.SetLayeredWindowAttributes(self.handle, win32api.RGB(0,0,0), alpha, win32con.LWA_ALPHA)
# Non PEP-8 alias
SetTransparency = deprecated(set_transparency)
# -----------------------------------------------------------
def popup_window(self):
"""Return owned enabled Popup window wrapper if shown.
If there is no enabled popups at that time, it returns **self**.
See MSDN reference:
https://msdn.microsoft.com/en-us/library/windows/desktop/ms633515.aspx
Please do not use in production code yet - not tested fully
"""
popup = win32functions.GetWindow(self, win32defines.GW_ENABLEDPOPUP)
return popup
# Non PEP-8 alias
PopupWindow = deprecated(popup_window)
# -----------------------------------------------------------
def owner(self):
"""Return the owner window for the window if it exists
Returns None if there is no owner.
"""
owner = win32functions.GetWindow(self, win32defines.GW_OWNER)
if owner:
return HwndWrapper(owner)
else:
return None
# Non PEP-8 alias
Owner = deprecated(owner)
# -----------------------------------------------------------
# def context_menu_select(self, path, x = None, y = None):
# "TODO context_menu_select Not Implemented"
# pass
# #raise NotImplementedError(
# # "HwndWrapper.ContextMenuSelect not implemented yet")
# # Non PEP-8 alias
# ContextMenuSelect = deprecated(context_menu_select)
# -----------------------------------------------------------
def _menu_handle(self):
"""Simple overridable method to get the menu handle"""
hMenu = win32gui.GetMenu(self.handle)
is_main_menu = True
if not hMenu:
self._ensure_enough_privileges('MN_GETHMENU')
hMenu = self.send_message(self.handle, win32defines.MN_GETHMENU);
is_main_menu = False
return (hMenu, is_main_menu)
# -----------------------------------------------------------
def menu(self):
"""Return the menu of the control"""
hMenu, is_main_menu = self._menu_handle()
if hMenu: # and win32functions.IsMenu(menu_hwnd):
return Menu(self, hMenu, is_main_menu=is_main_menu)
return None
# Non PEP-8 alias
Menu = deprecated(menu)
# -----------------------------------------------------------
def menu_item(self, path, exact = False):
"""Return the menu item specified by path
Path can be a string in the form "MenuItem->MenuItem->MenuItem..."
where each MenuItem is the text of an item at that level of the menu.
E.g. ::
File->Export->ExportAsPNG
spaces are not important so you could also have written... ::
File -> Export -> Export As PNG
"""
if self.appdata is not None:
menu_appdata = self.appdata['menu_items']
else:
menu_appdata = None
menu = self.menu()
if menu:
return self.menu().get_menu_path(path, appdata = menu_appdata, exact=exact)[-1]
raise RuntimeError("There is no menu.")
# Non PEP-8 alias
MenuItem = deprecated(menu_item)
# -----------------------------------------------------------
def menu_items(self):
"""Return the menu items for the dialog
If there are no menu items then return an empty list
"""
if self.is_dialog() and self.menu():
#menu_handle = win32functions.GetMenu(self)
#self.send_message(win32defines.WM_INITMENU, menu_handle)
return self.menu().get_properties()['menu_items']
#self.send_message(win32defines.WM_INITMENU, menu_handle)
#return _GetMenuItems(menu_handle, self)
else:
return []
# Non PEP-8 alias
MenuItems = deprecated(menu_items)
# # -----------------------------------------------------------
# def menu_click(self, path):
# "Select the MenuItem specifed in path"
#
# self.verify_actionable()
#
# self.set_focus()
#
# menu = Menu(self, self._menu_handle())
#
# path_items = menu.get_menu_path(path)
#
# for menu_item in path_items:
# if not menu_item.is_enabled():
# raise MenuItemNotEnabled(
# "MenuItem '%s' is disabled"% menu_item.text())
#
# menu_item.click()
#
# return self
# # Non PEP-8 alias
# MenuClick = deprecated(menu_click)
# -----------------------------------------------------------
def menu_select(self, path, exact=False, ):
"""Find a menu item specified by the path
The full path syntax is specified in:
:py:meth:`.controls.menuwrapper.Menu.get_menu_path`
"""
self.verify_actionable()
self.menu_item(path, exact=exact).select()
# Non PEP-8 alias
MenuSelect = deprecated(menu_select)
# -----------------------------------------------------------
def move_window(self, x=None, y=None, width=None, height=None):
"""Move the window to the new coordinates
* **x** Specifies the new left position of the window.
Defaults to the current left position of the window.
* **y** Specifies the new top position of the window.
Defaults to the current top position of the window.
* **width** Specifies the new width of the window. Defaults to the
current width of the window.
* **height** Specifies the new height of the window. Default to the
current height of the window.
"""
cur_rect = self.rectangle()
# if no X is specified - so use current coordinate
if x is None:
x = cur_rect.left
else:
try:
y = x.top
width = x.width()
height = x.height()
x = x.left
except AttributeError:
pass
# if no Y is specified - so use current coordinate
if y is None:
y = cur_rect.top
# if no width is specified - so use current width
if width is None:
width = cur_rect.width()
# if no height is specified - so use current height
if height is None:
height = cur_rect.height()
# ask for the window to be moved
ret = win32functions.MoveWindow(self, x, y, width, height, True)
# check that it worked correctly
if not ret:
raise ctypes.WinError()
win32functions.WaitGuiThreadIdle(self.handle)
time.sleep(Timings.after_movewindow_wait)
# Non PEP-8 alias
MoveWindow = deprecated(move_window)
# -----------------------------------------------------------
def close(self, wait_time = 0):
"""Close the window
Code modified from http://msdn.microsoft.com/msdnmag/issues/02/08/CQA/
"""
window_text = self.window_text()
# tell the window it must close
self.post_message(win32defines.WM_CLOSE)
#unused var: start = time.time()
# Keeps trying while
# we have not timed out and
# window is still a valid handle and
# window is still visible
# any one of these conditions evaluates to false means the window is
# closed or we have timed out
def has_closed():
return not (handleprops.iswindow(self) and self.is_visible())
if not wait_time:
wait_time = Timings.closeclick_dialog_close_wait
# Keep waiting until both this control and it's parent
# are no longer valid controls
try:
timings.wait_until(
wait_time,
Timings.closeclick_retry,
has_closed
)
except timings.TimeoutError:
raise WindowNotFoundError
self.actions.log('Closed window "{0}"'.format(window_text))
# Non PEP-8 alias
Close = deprecated(close)
# -----------------------------------------------------------
def maximize(self):
"""Maximize the window"""
win32functions.ShowWindow(self, win32defines.SW_MAXIMIZE)
self.actions.log('Maximized window "{0}"'.format(self.window_text()))
return self
# Non PEP-8 alias
Maximize = deprecated(maximize)
# -----------------------------------------------------------
def minimize(self):
"""Minimize the window"""
win32functions.ShowWindow(self, win32defines.SW_MINIMIZE)
# TODO: wait while window is minimized
self.actions.log('Minimized window "{0}"'.format(self.window_text()))
return self
# Non PEP-8 alias
Minimize = deprecated(minimize)
# -----------------------------------------------------------
def restore(self):
"""Restore the window to its previous state (normal or maximized)"""
win32functions.ShowWindow(self, win32defines.SW_RESTORE)
self.actions.log('Restored window "{0}"'.format(self.window_text()))
return self
# Non PEP-8 alias
Restore = deprecated(restore)
# -----------------------------------------------------------
def get_show_state(self):
"""Get the show state and Maximized/minimzed/restored state
Returns a value that is a union of the following
* SW_HIDE the window is hidden.
* SW_MAXIMIZE the window is maximized
* SW_MINIMIZE the window is minimized
* SW_RESTORE the window is in the 'restored'
state (neither minimized or maximized)
* SW_SHOW The window is not hidden
"""
wp = win32structures.WINDOWPLACEMENT()
wp.lenght = ctypes.sizeof(wp)
ret = win32functions.GetWindowPlacement(self, ctypes.byref(wp))
if not ret:
raise ctypes.WinError()
return wp.showCmd
# Non PEP-8 alias
GetShowState = deprecated(get_show_state)
# -----------------------------------------------------------
def is_minimized(self):
"""Indicate whether the window is minimized or not"""
return self.get_show_state() == win32defines.SW_SHOWMINIMIZED
# -----------------------------------------------------------
def is_maximized(self):
"""Indicate whether the window is maximized or not"""
return self.get_show_state() == win32defines.SW_SHOWMAXIMIZED
# -----------------------------------------------------------
def is_normal(self):
"""Indicate whether the window is normal (i.e. not minimized and not maximized)"""
return self.get_show_state() == win32defines.SW_SHOWNORMAL
# -----------------------------------------------------------
# Non PEP-8 alias
#GetActive = deprecated(get_active)
# -----------------------------------------------------------
def get_focus(self):
"""Return the control in the process of this window that has the Focus
"""
gui_info = win32structures.GUITHREADINFO()
gui_info.cbSize = ctypes.sizeof(gui_info)
window_thread_id = win32functions.GetWindowThreadProcessId(self.handle, None)
ret = win32functions.GetGUIThreadInfo(
window_thread_id,
ctypes.byref(gui_info))
if not ret:
return None
return HwndWrapper(gui_info.hwndFocus)
# Non PEP-8 alias
GetFocus = deprecated(get_focus)
# -----------------------------------------------------------
def set_focus(self):
"""
Set the focus to this control.
Bring the window to the foreground first.
The system restricts which processes can set the foreground window
(https://msdn.microsoft.com/en-us/library/windows/desktop/ms633539(v=vs.85).aspx)
so the mouse cursor is removed from the screen to prevent any side effects.
"""
# "steal the focus" if there is another active window
# otherwise it is already into the foreground and no action required
if not self.has_focus():
# Notice that we need to move the mouse out of the screen
# but we don't use the built-in methods of the class:
# self.mouse_move doesn't do the job well even with absolute=True
# self.move_mouse_input can't be used as it calls click_input->set_focus
mouse.move(coords=(-10000, 500)) # move the mouse out of screen to the left
# change active window
if self.is_minimized():
if self.was_maximized():
self.maximize()
else:
self.restore()
else:
win32gui.ShowWindow(self.handle, win32con.SW_SHOW)
win32gui.SetForegroundWindow(self.handle)
# make sure that we are idle before returning
win32functions.WaitGuiThreadIdle(self.handle)
# only sleep if we had to change something!
time.sleep(Timings.after_setfocus_wait)
return self
# Non PEP-8 alias
SetFocus = deprecated(set_focus)
def has_focus(self):
"""Check the window is in focus (foreground)"""
return self.handle == win32gui.GetForegroundWindow()
def has_keyboard_focus(self):
"""Check the keyboard focus on this control."""
control_thread = win32functions.GetWindowThreadProcessId(self.handle, None)
win32process.AttachThreadInput(control_thread, win32api.GetCurrentThreadId(), 1)
focused = win32gui.GetFocus()
win32process.AttachThreadInput(control_thread, win32api.GetCurrentThreadId(), 0)
win32functions.WaitGuiThreadIdle(self.handle)
return self.handle == focused
def set_keyboard_focus(self):
"""Set the keyboard focus to this control."""
control_thread = win32functions.GetWindowThreadProcessId(self.handle, None)
win32process.AttachThreadInput(control_thread, win32api.GetCurrentThreadId(), 1)
win32functions.SetFocus(self.handle)
win32process.AttachThreadInput(control_thread, win32api.GetCurrentThreadId(), 0)
win32functions.WaitGuiThreadIdle(self.handle)
time.sleep(Timings.after_setfocus_wait)
return self
# -----------------------------------------------------------
def set_application_data(self, appdata):
"""Application data is data from a previous run of the software
It is essential for running scripts written for one spoke language
on a different spoken language
"""
self.appdata = appdata
_scroll_types = {"left": {
"line" : win32defines.SB_LINELEFT,
"page" : win32defines.SB_PAGELEFT,
"end" : win32defines.SB_LEFT,
},
"right": {
"line" : win32defines.SB_LINERIGHT,
"page" : win32defines.SB_PAGERIGHT,
"end" : win32defines.SB_RIGHT,
},
"up": {
"line" : win32defines.SB_LINEUP,
"page" : win32defines.SB_PAGEUP,
"end" : win32defines.SB_TOP,
},
"down": {
"line" : win32defines.SB_LINEDOWN,
"page" : win32defines.SB_PAGEDOWN,
"end" : win32defines.SB_BOTTOM,
},
}
# Non PEP-8 alias
SetApplicationData = deprecated(set_application_data)
# -----------------------------------------------------------
def scroll(self, direction, amount, count = 1, retry_interval = None):
"""Ask the control to scroll itself
**direction** can be any of "up", "down", "left", "right"
**amount** can be one of "line", "page", "end"
**count** (optional) the number of times to scroll
"""
self._ensure_enough_privileges('WM_HSCROLL/WM_VSCROLL')
# check which message we want to send
if direction.lower() in ("left", "right"):
message = win32defines.WM_HSCROLL
elif direction.lower() in ("up", "down"):
message = win32defines.WM_VSCROLL
# the constant that matches direction, and how much
try:
scroll_type = \
self._scroll_types[direction.lower()][amount.lower()]
except KeyError:
raise ValueError("""Wrong arguments:
direction can be any of "up", "down", "left", "right"
amount can be any of "line", "page", "end"
""")
# Scroll as often as we have been asked to
if retry_interval is None:
retry_interval = Timings.scroll_step_wait
while count > 0:
self.send_message(message, scroll_type)
time.sleep(retry_interval)
count -= 1
return self
# Non PEP-8 alias
Scroll = deprecated(scroll)
# -----------------------------------------------------------
def get_toolbar(self):
"""Get the first child toolbar if it exists"""
for child in self.children():
if child.__class__.__name__ == 'ToolbarWrapper':
return child
return None
# Non PEP-8 alias
GetToolbar = deprecated(get_toolbar)
# Non PEP-8 aliases for Win32Wrapper methods
# We keep them for the backward compatibility in legacy scripts
ClickInput = deprecated(WinBaseWrapper.click_input)
DoubleClickInput = deprecated(WinBaseWrapper.double_click_input)
RightClickInput = deprecated(WinBaseWrapper.right_click_input)
VerifyVisible = deprecated(WinBaseWrapper.verify_visible)
_NeedsImageProp = deprecated(WinBaseWrapper._needs_image_prop, deprecated_name='_NeedsImageProp')
FriendlyClassName = deprecated(WinBaseWrapper.friendly_class_name)
Class = deprecated(WinBaseWrapper.class_name, deprecated_name='Class')
WindowText = deprecated(WinBaseWrapper.window_text)
ControlID = deprecated(WinBaseWrapper.control_id, deprecated_name='ControlID')
IsVisible = deprecated(WinBaseWrapper.is_visible)
IsEnabled = deprecated(WinBaseWrapper.is_enabled)
Rectangle = deprecated(WinBaseWrapper.rectangle)
ClientToScreen = deprecated(WinBaseWrapper.client_to_screen)
ProcessID = deprecated(WinBaseWrapper.process_id, deprecated_name='ProcessID')
IsDialog = deprecated(WinBaseWrapper.is_dialog)
Parent = deprecated(WinBaseWrapper.parent)
TopLevelParent = deprecated(WinBaseWrapper.top_level_parent)
Texts = deprecated(WinBaseWrapper.texts)
Children = deprecated(WinBaseWrapper.children)
CaptureAsImage = deprecated(WinBaseWrapper.capture_as_image)
GetProperties = deprecated(WinBaseWrapper.get_properties)
DrawOutline = deprecated(WinBaseWrapper.draw_outline)
IsChild = deprecated(WinBaseWrapper.is_child)
VerifyActionable = deprecated(WinBaseWrapper.verify_actionable)
VerifyEnabled = deprecated(WinBaseWrapper.verify_enabled)
PressMouseInput = deprecated(WinBaseWrapper.press_mouse_input)
ReleaseMouseInput = deprecated(WinBaseWrapper.release_mouse_input)
MoveMouseInput = deprecated(WinBaseWrapper.move_mouse_input)
DragMouseInput = deprecated(WinBaseWrapper.drag_mouse_input)
WheelMouseInput = deprecated(WinBaseWrapper.wheel_mouse_input)
TypeKeys = deprecated(WinBaseWrapper.type_keys)
#====================================================================
# the main reason for this is just to make sure that
# a Dialog is a known class - and we don't need to take
# an image of it (as an unknown control class)
class DialogWrapper(HwndWrapper):
"""Wrap a dialog"""
friendlyclassname = "Dialog"
#windowclasses = ["#32770", ]
can_be_label = True
#-----------------------------------------------------------
def __init__(self, hwnd):
"""Initialize the DialogWrapper
The only extra functionality here is to modify self.friendlyclassname
to make it "Dialog" if the class is "#32770" otherwise to leave it
the same as the window class.
"""
HwndWrapper.__init__(self, hwnd)
if self.class_name() == "#32770":
self.friendlyclassname = "Dialog"
else:
self.friendlyclassname = self.class_name()
#-----------------------------------------------------------
def run_tests(self, tests_to_run = None, ref_controls = None):
"""Run the tests on dialog"""
# the tests package is imported only when running unittests
from .. import tests
# get all the controls
controls = [self] + self.children()
# add the reference controls
if ref_controls is not None:
matched_flags = controlproperties.SetReferenceControls(
controls, ref_controls)
# todo: allow some checking of how well the controls matched
# matched_flags says how well they matched
# 1 = same number of controls
# 2 = ID's matched
# 4 = control classes matched
# i.e. 1 + 2 + 4 = perfect match
return tests.run_tests(controls, tests_to_run)
# Non PEP-8 alias
RunTests = deprecated(run_tests)
#-----------------------------------------------------------
def write_to_xml(self, filename):
"""Write the dialog an XML file (requires elementtree)"""
controls = [self] + self.children()
props = [ctrl.get_properties() for ctrl in controls]
from .. import xml_helpers
xml_helpers.WriteDialogToFile(filename, props)
# Non PEP-8 alias
WriteToXML = deprecated(write_to_xml)
#-----------------------------------------------------------
def client_area_rect(self):
"""Return the client area rectangle
From MSDN:
The client area of a control is the bounds of the control, minus the
nonclient elements such as scroll bars, borders, title bars, and
menus.
"""
rect = win32structures.RECT(self.rectangle())
self.send_message(win32defines.WM_NCCALCSIZE, 0, ctypes.byref(rect))
return rect
# Non PEP-8 alias
ClientAreaRect = deprecated(client_area_rect)
#-----------------------------------------------------------
def hide_from_taskbar(self):
"""Hide the dialog from the Windows taskbar"""
win32functions.ShowWindow(self, win32defines.SW_HIDE)
win32functions.SetWindowLongPtr(self, win32defines.GWL_EXSTYLE, self.exstyle() | win32defines.WS_EX_TOOLWINDOW)
win32functions.ShowWindow(self, win32defines.SW_SHOW)
# Non PEP-8 alias
HideFromTaskbar = deprecated(hide_from_taskbar)
#-----------------------------------------------------------
def show_in_taskbar(self):
"""Show the dialog in the Windows taskbar"""
win32functions.ShowWindow(self, win32defines.SW_HIDE)
win32functions.SetWindowLongPtr(self, win32defines.GWL_EXSTYLE,
self.exstyle() | win32defines.WS_EX_APPWINDOW)
win32functions.ShowWindow(self, win32defines.SW_SHOW)
# Non PEP-8 alias
ShowInTaskbar = deprecated(show_in_taskbar)
#-----------------------------------------------------------
def is_in_taskbar(self):
"""Check whether the dialog is shown in the Windows taskbar
Thanks to David Heffernan for the idea:
http://stackoverflow.com/questions/30933219/hide-window-from-taskbar-without-using-ws-ex-toolwindow
A window is represented in the taskbar if:
It has no owner and it does not have the WS_EX_TOOLWINDOW extended style,
or it has the WS_EX_APPWINDOW extended style.
"""
return self.has_exstyle(win32defines.WS_EX_APPWINDOW) or \
(self.owner() is None and not self.has_exstyle(win32defines.WS_EX_TOOLWINDOW))
# Non PEP-8 alias
IsInTaskbar = deprecated(is_in_taskbar)
#-----------------------------------------------------------
def force_close(self):
"""Close the dialog forcefully using WM_QUERYENDSESSION and return the result
Window has let us know that it doesn't want to die - so we abort
this means that the app is not hung - but knows it doesn't want
to close yet - e.g. it is asking the user if they want to save.
"""
self.send_message_timeout(
win32defines.WM_QUERYENDSESSION,
timeout = .5,
timeoutflags = (win32defines.SMTO_ABORTIFHUNG)) # |
#win32defines.SMTO_NOTIMEOUTIFNOTHUNG)) # |
#win32defines.SMTO_BLOCK)
# get a handle we can wait on
pid = ctypes.c_ulong()
win32functions.GetWindowThreadProcessId(self.handle, ctypes.byref(pid))
try:
process_wait_handle = win32api.OpenProcess(
win32con.SYNCHRONIZE | win32con.PROCESS_TERMINATE,
0,
pid.value)
except win32gui.error:
return True # already closed
result = win32event.WaitForSingleObject(
process_wait_handle,
int(Timings.after_windowclose_timeout * 1000))
return result != win32con.WAIT_TIMEOUT
# #-----------------------------------------------------------
# def read_controls_from_xml(self, filename):
# from pywinauto import xml_helpers
# [controlproperties.ControlProps(ctrl) for
# ctrl in xml_helpers.ReadPropertiesFromFile(handle)]
# # Non PEP-8 alias
# ReadControlsFromXML = deprecated(read_controls_from_xml)
# #-----------------------------------------------------------
# def add_reference(self, reference):
#
# if len(self.children() != len(reference)):
# raise "different number of reference controls"
#
# for i, ctrl in enumerate(reference):
# # loop over each of the controls
# # and set the reference
# if isinstance(ctrl, dict):
# ctrl = CtrlProps(ctrl)
#
# self.
# if ctrl.class_name() != self.children()[i+1].class_name():
# print "different classes"
# # Non PEP-8 alias
# AddReference = deprecated(add_reference)
#====================================================================
def _perform_click(
ctrl,
button = "left",
pressed = "",
coords = (0, 0),
double = False,
button_down = True,
button_up = True,
absolute = False,
):
"""Low level method for performing click operations"""
if ctrl is None:
ctrl = HwndWrapper(win32functions.GetDesktopWindow())
ctrl.verify_actionable()
ctrl_text = ctrl.window_text()
if ctrl_text is None:
ctrl_text = six.text_type(ctrl_text)
ctrl_friendly_class_name = ctrl.friendly_class_name()
if isinstance(coords, win32structures.RECT):
coords = coords.mid_point()
# allow points objects to be passed as the coords
elif isinstance(coords, win32structures.POINT):
coords = [coords.x, coords.y]
else:
coords = list(coords)
if absolute:
coords = ctrl.client_to_screen(coords)
# figure out the messages for click/press
msgs = []
if not double:
if button.lower() == 'left':
if button_down:
msgs.append(win32defines.WM_LBUTTONDOWN)
if button_up:
msgs.append(win32defines.WM_LBUTTONUP)
elif button.lower() == 'middle':
if button_down:
msgs.append(win32defines.WM_MBUTTONDOWN)
if button_up:
msgs.append(win32defines.WM_MBUTTONUP)
elif button.lower() == 'right':
if button_down:
msgs.append(win32defines.WM_RBUTTONDOWN)
if button_up:
msgs.append(win32defines.WM_RBUTTONUP)
elif button.lower() == 'move':
msgs.append(win32defines.WM_MOUSEMOVE)
# figure out the messages for double clicking
else:
if button.lower() == 'left':
msgs = (
win32defines.WM_LBUTTONDOWN,
win32defines.WM_LBUTTONUP,
win32defines.WM_LBUTTONDBLCLK,
win32defines.WM_LBUTTONUP)
elif button.lower() == 'middle':
msgs = (
win32defines.WM_MBUTTONDOWN,
win32defines.WM_MBUTTONUP,
win32defines.WM_MBUTTONDBLCLK,
win32defines.WM_MBUTTONUP)
elif button.lower() == 'right':
msgs = (
win32defines.WM_RBUTTONDOWN,
win32defines.WM_RBUTTONUP,
win32defines.WM_RBUTTONDBLCLK,
win32defines.WM_RBUTTONUP)
elif button.lower() == 'move':
msgs.append(win32defines.WM_MOUSEMOVE)
# figure out the flags and pack coordinates
flags, click_point = _calc_flags_and_coords(pressed, coords)
#control_thread = win32functions.GetWindowThreadProcessId(ctrl, None)
#win32functions.AttachThreadInput(win32functions.GetCurrentThreadId(), control_thread, win32defines.TRUE)
# TODO: check return value of AttachThreadInput properly
# send each message
for msg in msgs:
win32functions.PostMessage(ctrl, msg, win32structures.WPARAM(flags), win32structures.LPARAM(click_point))
#ctrl.post_message(msg, flags, click_point)
#flags = 0
time.sleep(Timings.sendmessagetimeout_timeout)
# wait until the thread can accept another message
win32functions.WaitGuiThreadIdle(ctrl.handle)
# detach the Python process with the process that self is in
#win32functions.AttachThreadInput(win32functions.GetCurrentThreadId(), control_thread, win32defines.FALSE)
# TODO: check return value of AttachThreadInput properly
# wait a certain(short) time after the click
time.sleep(Timings.after_click_wait)
if button.lower() == 'move':
message = 'Moved mouse over ' + ctrl_friendly_class_name + ' "' + ctrl_text + \
'" to screen point ' + str(tuple(coords)) + ' by WM_MOUSEMOVE'
else:
message = 'Clicked ' + ctrl_friendly_class_name + ' "' + ctrl_text + \
'" by ' + str(button) + ' button event ' + str(tuple(coords))
if double:
message = 'Double-c' + message[1:]
ActionLogger().log(message)
_mouse_flags = {
"left": win32defines.MK_LBUTTON,
"right": win32defines.MK_RBUTTON,
"middle": win32defines.MK_MBUTTON,
"shift": win32defines.MK_SHIFT,
"control": win32defines.MK_CONTROL,
}
#====================================================================
def _calc_flags_and_coords(pressed, coords):
"""Calculate the flags to use and the coordinates for mouse actions"""
flags = 0
for key in pressed.split():
flags |= _mouse_flags[key.lower()]
click_point = win32functions.MakeLong(coords[1], coords[0])
return flags, click_point
#====================================================================
class _DummyControl(dict):
"""A subclass of dict so that we can assign attributes"""
pass
#====================================================================
def get_dialog_props_from_handle(hwnd):
"""Get the properties of all the controls as a list of dictionaries"""
# wrap the dialog handle and start a new list for the
# controls on the dialog
try:
controls = [hwnd, ]
controls.extend(hwnd.children())
except AttributeError:
controls = [HwndWrapper(hwnd), ]
# add all the children of the dialog
controls.extend(controls[0].children())
props = []
# Add each control to the properties for this dialog
for ctrl in controls:
# Get properties for each control and wrap them in
# _DummyControl so that we can assign handle
ctrl_props = _DummyControl(ctrl.get_properties())
# assign the handle
ctrl_props.handle = ctrl.handle
# offset the rectangle from the dialog rectangle
ctrl_props['rectangle'] -= controls[0].rectangle()
props.append(ctrl_props)
return props
# Non PEP-8 alias
GetDialogPropsFromHandle = deprecated(get_dialog_props_from_handle)
backend.register('win32', HwndElementInfo, HwndWrapper)
backend.registry.backends['win32'].dialog_class = DialogWrapper
backend.activate('win32') # default
|
bsd-3-clause
|
drayanaindra/shoop
|
shoop_tests/simple_cms/test_date_logic.py
|
7
|
1781
|
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import datetime
from django.utils.timezone import now
import pytest
from shoop.simple_cms.models import Page
from shoop_tests.simple_cms.utils import create_page
@pytest.mark.django_db
def test_none_dates_page_not_visible():
# create page that is not anymore visible
page = create_page()
assert not Page.objects.visible().filter(pk=page.pk).exists()
assert not page.is_visible()
@pytest.mark.django_db
def test_past_page_not_visible():
today = now()
page = create_page(
available_from=(today - datetime.timedelta(days=2)),
available_to=(today - datetime.timedelta(days=1)),
)
assert not Page.objects.visible().filter(pk=page.pk).exists()
assert not page.is_visible()
@pytest.mark.django_db
def test_future_page_not_visible():
today = now()
page = create_page(
available_from=(today + datetime.timedelta(days=1)),
available_to=(today + datetime.timedelta(days=2)),
)
assert not Page.objects.visible().filter(pk=page.pk).exists()
assert not page.is_visible()
@pytest.mark.django_db
def test_current_page_is_visible():
today = now()
page = create_page(available_from=today, available_to=today)
assert Page.objects.visible(today).filter(pk=page.pk).exists()
assert page.is_visible(today)
@pytest.mark.django_db
def test_page_without_visibility_end_is_visible():
today = now()
page = create_page(available_from=today, available_to=None)
assert Page.objects.visible(today).filter(pk=page.pk).exists()
assert page.is_visible(today)
|
agpl-3.0
|
econchick/heroku-buildpack-python
|
vendor/pip-1.2.1/setup.py
|
10
|
2142
|
import sys
import os
from setuptools import setup
# If you change this version, change it also in docs/conf.py
version = "1.1"
doc_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), "docs")
index_filename = os.path.join(doc_dir, "index.txt")
news_filename = os.path.join(doc_dir, "news.txt")
long_description = """
The main website for pip is `www.pip-installer.org
<http://www.pip-installer.org>`_. You can also install
the `in-development version <https://github.com/pypa/pip/tarball/develop#egg=pip-dev>`_
of pip with ``easy_install pip==dev``.
"""
f = open(index_filename)
# remove the toctree from sphinx index, as it breaks long_description
parts = f.read().split("split here", 2)
long_description = parts[0] + long_description + parts[2]
f.close()
f = open(news_filename)
long_description += "\n\n" + f.read()
f.close()
setup(name="pip",
version=version,
description="pip installs packages. Python packages. An easy_install replacement",
long_description=long_description,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Build Tools',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
],
keywords='easy_install distutils setuptools egg virtualenv',
author='The pip developers',
author_email='[email protected]',
url='http://www.pip-installer.org',
license='MIT',
packages=['pip', 'pip.commands', 'pip.vcs'],
entry_points=dict(console_scripts=['pip=pip:main', 'pip-%s=pip:main' % sys.version[:3]]),
test_suite='nose.collector',
tests_require=['nose', 'virtualenv>=1.7', 'scripttest>=1.1.1', 'mock'],
zip_safe=False)
|
mit
|
sigmavirus24/pip
|
pip/baseparser.py
|
2
|
7460
|
"""Base option parser setup"""
from __future__ import absolute_import
import sys
import optparse
import textwrap
from distutils.util import strtobool
from pip._vendor.six import string_types
from pip.configuration import Configuration
from pip.utils import get_terminal_size
class PrettyHelpFormatter(optparse.IndentedHelpFormatter):
"""A prettier/less verbose help formatter for optparse."""
def __init__(self, *args, **kwargs):
# help position must be aligned with __init__.parseopts.description
kwargs['max_help_position'] = 30
kwargs['indent_increment'] = 1
kwargs['width'] = get_terminal_size()[0] - 2
optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs)
def format_option_strings(self, option):
return self._format_option_strings(option, ' <%s>', ', ')
def _format_option_strings(self, option, mvarfmt=' <%s>', optsep=', '):
"""
Return a comma-separated list of option strings and metavars.
:param option: tuple of (short opt, long opt), e.g: ('-f', '--format')
:param mvarfmt: metavar format string - evaluated as mvarfmt % metavar
:param optsep: separator
"""
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, optsep)
if option.takes_value():
metavar = option.metavar or option.dest.lower()
opts.append(mvarfmt % metavar.lower())
return ''.join(opts)
def format_heading(self, heading):
if heading == 'Options':
return ''
return heading + ':\n'
def format_usage(self, usage):
"""
Ensure there is only one newline between usage and the first heading
if there is no description.
"""
msg = '\nUsage: %s\n' % self.indent_lines(textwrap.dedent(usage), " ")
return msg
def format_description(self, description):
# leave full control over description to us
if description:
if hasattr(self.parser, 'main'):
label = 'Commands'
else:
label = 'Description'
# some doc strings have initial newlines, some don't
description = description.lstrip('\n')
# some doc strings have final newlines and spaces, some don't
description = description.rstrip()
# dedent, then reindent
description = self.indent_lines(textwrap.dedent(description), " ")
description = '%s:\n%s\n' % (label, description)
return description
else:
return ''
def format_epilog(self, epilog):
# leave full control over epilog to us
if epilog:
return epilog
else:
return ''
def indent_lines(self, text, indent):
new_lines = [indent + line for line in text.split('\n')]
return "\n".join(new_lines)
class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter):
"""Custom help formatter for use in ConfigOptionParser.
This is updates the defaults before expanding them, allowing
them to show up correctly in the help listing.
"""
def expand_default(self, option):
if self.parser is not None:
self.parser._update_defaults(self.parser.defaults)
return optparse.IndentedHelpFormatter.expand_default(self, option)
class CustomOptionParser(optparse.OptionParser):
def insert_option_group(self, idx, *args, **kwargs):
"""Insert an OptionGroup at a given position."""
group = self.add_option_group(*args, **kwargs)
self.option_groups.pop()
self.option_groups.insert(idx, group)
return group
@property
def option_list_all(self):
"""Get a list of all options, including those in option groups."""
res = self.option_list[:]
for i in self.option_groups:
res.extend(i.option_list)
return res
class ConfigOptionParser(CustomOptionParser):
"""Custom option parser which updates its defaults by checking the
configuration files and environmental variables"""
def __init__(self, *args, **kwargs):
self.name = kwargs.pop('name')
isolated = kwargs.pop("isolated", False)
self.config = Configuration(isolated)
assert self.name
optparse.OptionParser.__init__(self, *args, **kwargs)
def check_default(self, option, key, val):
try:
return option.check_value(key, val)
except optparse.OptionValueError as exc:
print("An error occurred during configuration: %s" % exc)
sys.exit(3)
def _update_defaults(self, defaults):
"""Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists)."""
# Load the configuration
self.config.load(self.name)
# Accumulate complex default state.
self.values = optparse.Values(self.defaults)
late_eval = set()
# Then set the options with those values
for key, val in self.config.items():
# ignore empty values
if not val:
continue
# '--' because configuration supports only long names
option = self.get_option('--' + key)
# Ignore options not present in this parser. E.g. non-globals put
# in [global] by users that want them to apply to all applicable
# commands.
if option is None:
continue
if option.action in ('store_true', 'store_false', 'count'):
val = strtobool(val)
elif option.action == 'append':
val = val.split()
val = [self.check_default(option, key, v) for v in val]
elif option.action == 'callback':
late_eval.add(option.dest)
opt_str = option.get_opt_string()
val = option.convert_value(opt_str, val)
# From take_action
args = option.callback_args or ()
kwargs = option.callback_kwargs or {}
option.callback(option, opt_str, val, self, *args, **kwargs)
else:
val = self.check_default(option, key, val)
defaults[option.dest] = val
for key in late_eval:
defaults[key] = getattr(self.values, key)
self.values = None
return defaults
def get_default_values(self):
"""Overriding to make updating the defaults after instantiation of
the option parser possible, _update_defaults() does the dirty work."""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
defaults = self._update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, string_types):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
def error(self, msg):
self.print_usage(sys.stderr)
self.exit(2, "%s\n" % msg)
|
mit
|
fyffyt/scikit-learn
|
sklearn/preprocessing/tests/test_data.py
|
71
|
38516
|
import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.utils.validation import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
@ignore_warnings
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
@ignore_warnings
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
"""Check MaxAbsScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# sparse data
X_csr = sparse.csr_matrix(X)
X_trans = scaler.fit_transform(X_csr)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans.A, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv.A)
def test_maxabs_scaler_large_negative_value():
"""Check MaxAbsScaler on toy data with a large negative value"""
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
|
bsd-3-clause
|
txm/potato
|
django/utils/simplejson/__init__.py
|
324
|
14306
|
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> import decimal
>>> json.loads('1.1', parse_float=decimal.Decimal) == decimal.Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError("%r is not JSON serializable" % (o,))
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -msimplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -msimplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
# Django modification: try to use the system version first, providing it's
# either of a later version of has the C speedups in place. Otherwise, fall
# back to our local copy.
__version__ = '2.0.7'
use_system_version = False
try:
# The system-installed version has priority providing it is either not an
# earlier version or it contains the C speedups.
import simplejson
if (simplejson.__version__.split('.') >= __version__.split('.') or
hasattr(simplejson, '_speedups')):
from simplejson import *
use_system_version = True
except ImportError:
pass
if not use_system_version:
try:
from json import * # Python 2.6 preferred over local copy.
# There is a "json" package around that is not Python's "json", so we
# check for something that is only in the namespace of the version we
# want.
JSONDecoder
use_system_version = True
except (ImportError, NameError):
pass
# If all else fails, we have a bundled version that can be used.
if not use_system_version:
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
from django.utils.simplejson.decoder import JSONDecoder
from django.utils.simplejson.encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s)
|
bsd-3-clause
|
arthru/OpenUpgrade
|
addons/website_event/controllers/main.py
|
22
|
11582
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import time
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import werkzeug.urls
from openerp import SUPERUSER_ID
from openerp import http
from openerp import tools
from openerp.http import request
from openerp.tools.translate import _
from openerp.addons.website.models.website import slug
_logger = logging.getLogger(__name__)
try:
import GeoIP
except ImportError:
GeoIP = None
_logger.warn("Please install GeoIP python module to use events localisation.")
class website_event(http.Controller):
@http.route(['/event', '/event/page/<int:page>'], type='http', auth="public", website=True)
def events(self, page=1, **searches):
cr, uid, context = request.cr, request.uid, request.context
event_obj = request.registry['event.event']
type_obj = request.registry['event.type']
country_obj = request.registry['res.country']
searches.setdefault('date', 'all')
searches.setdefault('type', 'all')
searches.setdefault('country', 'all')
domain_search = {}
def sdn(date):
return date.strftime('%Y-%m-%d 23:59:59')
def sd(date):
return date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
today = datetime.today()
dates = [
['all', _('Next Events'), [("date_end", ">", sd(today))], 0],
['today', _('Today'), [
("date_end", ">", sd(today)),
("date_begin", "<", sdn(today))],
0],
['week', _('This Week'), [
("date_end", ">=", sd(today + relativedelta(days=-today.weekday()))),
("date_begin", "<", sdn(today + relativedelta(days=6-today.weekday())))],
0],
['nextweek', _('Next Week'), [
("date_end", ">=", sd(today + relativedelta(days=7-today.weekday()))),
("date_begin", "<", sdn(today + relativedelta(days=13-today.weekday())))],
0],
['month', _('This month'), [
("date_end", ">=", sd(today.replace(day=1))),
("date_begin", "<", (today.replace(day=1) + relativedelta(months=1)).strftime('%Y-%m-%d 00:00:00'))],
0],
['nextmonth', _('Next month'), [
("date_end", ">=", sd(today.replace(day=1) + relativedelta(months=1))),
("date_begin", "<", (today.replace(day=1) + relativedelta(months=2)).strftime('%Y-%m-%d 00:00:00'))],
0],
['old', _('Old Events'), [
("date_end", "<", today.strftime('%Y-%m-%d 00:00:00'))],
0],
]
# search domains
current_date = None
current_type = None
current_country = None
for date in dates:
if searches["date"] == date[0]:
domain_search["date"] = date[2]
if date[0] != 'all':
current_date = date[1]
if searches["type"] != 'all':
current_type = type_obj.browse(cr, uid, int(searches['type']), context=context)
domain_search["type"] = [("type", "=", int(searches["type"]))]
if searches["country"] != 'all':
current_country = country_obj.browse(cr, uid, int(searches['country']), context=context)
domain_search["country"] = [("country_id", "=", int(searches["country"]))]
def dom_without(without):
domain = [('state', "in", ['draft','confirm','done'])]
for key, search in domain_search.items():
if key != without:
domain += search
return domain
# count by domains without self search
for date in dates:
if date[0] <> 'old':
date[3] = event_obj.search(
request.cr, request.uid, dom_without('date') + date[2],
count=True, context=request.context)
domain = dom_without('type')
types = event_obj.read_group(
request.cr, request.uid, domain, ["id", "type"], groupby="type",
orderby="type", context=request.context)
type_count = event_obj.search(request.cr, request.uid, domain,
count=True, context=request.context)
types.insert(0, {
'type_count': type_count,
'type': ("all", _("All Categories"))
})
domain = dom_without('country')
countries = event_obj.read_group(
request.cr, request.uid, domain, ["id", "country_id"],
groupby="country_id", orderby="country_id", context=request.context)
country_id_count = event_obj.search(request.cr, request.uid, domain,
count=True, context=request.context)
countries.insert(0, {
'country_id_count': country_id_count,
'country_id': ("all", _("All Countries"))
})
step = 10 # Number of events per page
event_count = event_obj.search(
request.cr, request.uid, dom_without("none"), count=True,
context=request.context)
pager = request.website.pager(url="/event", total=event_count, page=page, step=step, scope=5)
order = 'website_published desc, date_begin'
if searches.get('date','all') == 'old':
order = 'website_published desc, date_begin desc'
obj_ids = event_obj.search(
request.cr, request.uid, dom_without("none"), limit=step,
offset=pager['offset'], order=order, context=request.context)
events_ids = event_obj.browse(request.cr, request.uid, obj_ids,
context=request.context)
values = {
'current_date': current_date,
'current_country': current_country,
'current_type': current_type,
'event_ids': events_ids,
'dates': dates,
'types': types,
'countries': countries,
'pager': pager,
'searches': searches,
'search_path': "?%s" % werkzeug.url_encode(searches),
}
return request.website.render("website_event.index", values)
@http.route(['/event/<model("event.event"):event>/page/<path:page>'], type='http', auth="public", website=True)
def event_page(self, event, page, **post):
values = {
'event': event,
'main_object': event
}
return request.website.render(page, values)
@http.route(['/event/<model("event.event"):event>'], type='http', auth="public", website=True)
def event(self, event, **post):
if event.menu_id and event.menu_id.child_id:
target_url = event.menu_id.child_id[0].url
else:
target_url = '/event/%s/register' % str(event.id)
if post.get('enable_editor') == '1':
target_url += '?enable_editor=1'
return request.redirect(target_url);
@http.route(['/event/<model("event.event"):event>/register'], type='http', auth="public", website=True)
def event_register(self, event, **post):
values = {
'event': event,
'main_object': event,
'range': range,
}
return request.website.render("website_event.event_description_full", values)
@http.route('/event/add_event', type='http', auth="user", methods=['POST'], website=True)
def add_event(self, event_name="New Event", **kwargs):
return self._add_event(event_name, request.context, **kwargs)
def _add_event(self, event_name=None, context={}, **kwargs):
if not event_name:
event_name = _("New Event")
Event = request.registry.get('event.event')
date_begin = datetime.today() + timedelta(days=(14))
vals = {
'name': event_name,
'date_begin': date_begin.strftime('%Y-%m-%d'),
'date_end': (date_begin + timedelta(days=(1))).strftime('%Y-%m-%d'),
}
event_id = Event.create(request.cr, request.uid, vals, context=context)
event = Event.browse(request.cr, request.uid, event_id, context=context)
return request.redirect("/event/%s/register?enable_editor=1" % slug(event))
def get_visitors_country(self):
GI = GeoIP.open('/usr/share/GeoIP/GeoIP.dat', 0)
return {'country_code': GI.country_code_by_addr(request.httprequest.remote_addr), 'country_name': GI.country_name_by_addr(request.httprequest.remote_addr)}
def get_formated_date(self, event):
start_date = datetime.strptime(event.date_begin, tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
end_date = datetime.strptime(event.date_end, tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
return ('%s %s%s') % (start_date.strftime("%b"), start_date.strftime("%e"), (end_date != start_date and ("-"+end_date.strftime("%e")) or ""))
@http.route('/event/get_country_event_list', type='http', auth='public', website=True)
def get_country_events(self ,**post):
if not GeoIP:
return ""
country_obj = request.registry['res.country']
event_obj = request.registry['event.event']
cr, uid, context,event_ids = request.cr, request.uid, request.context,[]
country_code = self.get_visitors_country()['country_code']
result = {'events':[],'country':False}
if country_code:
country_ids = country_obj.search(request.cr, request.uid, [('code', '=', country_code)], context=request.context)
event_ids = event_obj.search(request.cr, request.uid, ['|', ('address_id', '=', None),('country_id.code', '=', country_code),('date_begin','>=', time.strftime('%Y-%m-%d 00:00:00')),('state', '=', 'confirm')], order="date_begin", context=request.context)
if not event_ids:
event_ids = event_obj.search(request.cr, request.uid, [('date_begin','>=', time.strftime('%Y-%m-%d 00:00:00')),('state', '=', 'confirm')], order="date_begin", context=request.context)
for event in event_obj.browse(request.cr, request.uid, event_ids, context=request.context)[:6]:
if country_code and event.country_id.code == country_code:
result['country'] = country_obj.browse(request.cr, request.uid, country_ids[0], context=request.context)
result['events'].append({
"date": self.get_formated_date(event),
"event": event,
"url": event.website_url})
return request.website.render("website_event.country_events_list",result)
|
agpl-3.0
|
sunlibin/incubator-eagle
|
eagle-external/eagle-ambari/lib/EAGLE/package/scripts/eagle_userprofile_scheduler.py
|
21
|
3209
|
#!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from resource_management import *
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import get_unique_id_and_date
from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
from resource_management.libraries.functions.security_commons import build_expectations, \
cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
FILE_TYPE_JAAS_CONF
from resource_management.core.shell import call
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Execute
from resource_management.libraries.functions.check_process_status import check_process_status
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.validate import call_and_match_output
from actions import *
class EagleUserProfileScheduler(Script):
# def get_stack_to_component(self):
# return {"HDP": "EAGLE-TOPOLOGY"}
def install(self, env):
Logger.info('Install Eagle UserProfile Scheduler')
# self.install_packages(env)
import params
env.set_params(params)
self.configure(env)
# eagle_topology_exec(action = 'init')
def configure(self,env):
Logger.info("Configure Eagle UserProfile Scheduler")
import params
env.set_params(params)
def pre_rolling_restart(self,env):
Logger.info("Executing rolling pre-restart Eagle UserProfile Scheduler")
import params
env.set_params(params)
# if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
# Execute(format("hdp-select set eagle-topology {version}"))
def stop(self, env):
Logger.info('Stopping Eagle UserProfile Scheduler')
import params
env.set_params(params)
self.configure(env)
eagle_userprofile_scheduler_exec(action = 'stop')
def start(self, env):
Logger.info('Starting Eagle UserProfile Scheduler')
import params
env.set_params(params)
self.configure(env)
eagle_userprofile_scheduler_exec(action = 'start')
def status(self, env):
Logger.info('Checking Eagle UserProfile Scheduler')
import params
env.set_params(params)
self.configure(env)
eagle_userprofile_scheduler_exec(action = 'status')
if __name__ == "__main__":
EagleUserProfileScheduler().execute()
|
apache-2.0
|
OCA/stock-logistics-barcode
|
stock_barcodes/tests/test_stock_barcodes_picking.py
|
1
|
9539
|
# Copyright 2108-2019 Sergio Teruel <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.addons.stock_barcodes.tests.test_stock_barcodes import\
TestStockBarcodes
class TestStockBarcodesPicking(TestStockBarcodes):
def setUp(self):
super().setUp()
self.ScanReadPicking = self.env['wiz.stock.barcodes.read.picking']
self.stock_picking_model = self.env.ref('stock.model_stock_picking')
# Model Data
self.partner_agrolite = self.env.ref('base.res_partner_2')
self.picking_type_in = self.env.ref('stock.picking_type_in')
self.picking_type_out = self.env.ref('stock.picking_type_out')
self.supplier_location = self.env.ref('stock.stock_location_suppliers')
self.customer_location = self.env.ref('stock.stock_location_customers')
self.stock_location = self.env.ref('stock.stock_location_stock')
self.categ_unit = self.env.ref('uom.product_uom_categ_unit')
self.categ_kgm = self.env.ref('uom.product_uom_categ_kgm')
self.picking_out_01 = self.env['stock.picking'].with_context(
planned_picking=True
).create({
'location_id': self.stock_location.id,
'location_dest_id': self.customer_location.id,
'partner_id': self.partner_agrolite.id,
'picking_type_id': self.picking_type_out.id,
'move_lines': [
(0, 0, {
'name': self.product_tracking.name,
'product_id': self.product_tracking.id,
'product_uom_qty': 3,
'product_uom': self.product_tracking.uom_id.id,
'location_id': self.stock_location.id,
'location_dest_id': self.customer_location.id,
}),
]
})
self.picking_out_02 = self.picking_out_01.copy()
self.picking_in_01 = self.env['stock.picking'].with_context(
planned_picking=True
).create({
'location_id': self.supplier_location.id,
'location_dest_id': self.stock_location.id,
'partner_id': self.partner_agrolite.id,
'picking_type_id': self.picking_type_in.id,
'move_lines': [
(0, 0, {
'name': self.product_wo_tracking.name,
'product_id': self.product_wo_tracking.id,
'product_uom_qty': 3,
'product_uom': self.product_wo_tracking.uom_id.id,
'location_id': self.supplier_location.id,
'location_dest_id': self.stock_location.id,
}),
(0, 0, {
'name': self.product_wo_tracking.name,
'product_id': self.product_wo_tracking.id,
'product_uom_qty': 5,
'product_uom': self.product_wo_tracking.uom_id.id,
'location_id': self.supplier_location.id,
'location_dest_id': self.stock_location.id,
}),
(0, 0, {
'name': self.product_tracking.name,
'product_id': self.product_tracking.id,
'product_uom_qty': 3,
'product_uom': self.product_tracking.uom_id.id,
'location_id': self.supplier_location.id,
'location_dest_id': self.stock_location.id,
}),
(0, 0, {
'name': self.product_tracking.name,
'product_id': self.product_tracking.id,
'product_uom_qty': 5,
'product_uom': self.product_tracking.uom_id.id,
'location_id': self.supplier_location.id,
'location_dest_id': self.stock_location.id,
}),
]
})
self.picking_in_01.action_confirm()
vals = self.picking_in_01.action_barcode_scan()
self.wiz_scan_picking = self.ScanReadPicking.with_context(
vals['context']
).create({})
def test_wiz_picking_values(self):
self.assertEqual(self.wiz_scan_picking.location_id,
self.picking_in_01.location_dest_id)
self.assertEqual(self.wiz_scan_picking.res_model_id,
self.stock_picking_model)
self.assertEqual(self.wiz_scan_picking.res_id,
self.picking_in_01.id)
self.assertIn(
"Barcode reader - %s - " % (self.picking_in_01.name),
self.wiz_scan_picking.display_name,
)
def test_picking_wizard_scan_product(self):
self.action_barcode_scanned(self.wiz_scan_picking, '8480000723208')
self.assertEqual(
self.wiz_scan_picking.product_id, self.product_wo_tracking)
sml = self.picking_in_01.move_line_ids.filtered(
lambda x: x.product_id == self.product_wo_tracking)
self.assertEqual(sml.qty_done, 1.0)
# Scan product with tracking lot enable
self.action_barcode_scanned(self.wiz_scan_picking, '8433281006850')
sml = self.picking_in_01.move_line_ids.filtered(
lambda x: x.product_id == self.product_tracking)
self.assertEqual(sml.qty_done, 0.0)
self.assertEqual(self.wiz_scan_picking.message,
'Barcode: 8433281006850 (Waiting for input lot)')
# Scan a lot. Increment quantities if scan product or other lot from
# this product
self.action_barcode_scanned(self.wiz_scan_picking, '8411822222568')
sml = self.picking_in_01.move_line_ids.filtered(
lambda x: x.product_id == self.product_tracking and x.lot_id)
self.assertEqual(sml.lot_id, self.lot_1)
self.assertEqual(sml.qty_done, 1.0)
self.action_barcode_scanned(self.wiz_scan_picking, '8433281006850')
self.assertEqual(sml.qty_done, 2.0)
self.action_barcode_scanned(self.wiz_scan_picking, '8411822222568')
self.assertEqual(sml.qty_done, 3.0)
self.assertEqual(self.wiz_scan_picking.message,
'Barcode: 8411822222568 (Barcode read correctly)')
# Scan a package
self.action_barcode_scanned(self.wiz_scan_picking, '5420008510489')
# Package of 5 product units. Already three unit exists
self.assertEqual(sml.qty_done, 8.0)
def test_picking_wizard_scan_product_manual_entry(self):
self.wiz_scan_picking.manual_entry = True
self.action_barcode_scanned(self.wiz_scan_picking, '8480000723208')
self.assertEqual(self.wiz_scan_picking.product_id,
self.product_wo_tracking)
sml = self.picking_in_01.move_line_ids.filtered(
lambda x: x.product_id == self.product_wo_tracking)
self.assertEqual(self.wiz_scan_picking.product_qty, 0.0)
self.wiz_scan_picking.product_qty = 12.0
self.wiz_scan_picking.action_manual_entry()
self.assertEqual(sml.qty_done, 8.0)
self.assertEqual(sml.move_id.quantity_done, 12.0)
def test_picking_wizard_remove_last_scan(self):
self.action_barcode_scanned(self.wiz_scan_picking, '8480000723208')
self.assertEqual(self.wiz_scan_picking.product_id,
self.product_wo_tracking)
sml = self.picking_in_01.move_line_ids.filtered(
lambda x: x.product_id == self.product_wo_tracking)
self.assertEqual(sml.qty_done, 1.0)
self.wiz_scan_picking.action_undo_last_scan()
self.assertEqual(sml.qty_done, 0.0)
self.assertEqual(self.wiz_scan_picking.picking_product_qty, 0.0)
def test_barcode_from_operation(self):
picking_out_3 = self.picking_out_01.copy()
self.picking_out_01.action_assign()
self.picking_out_02.action_assign()
vals = self.picking_type_out.action_barcode_scan()
self.wiz_scan_picking = self.ScanReadPicking.with_context(
vals['context']
).create({})
self.wiz_scan_picking.manual_entry = True
self.wiz_scan_picking.product_id = self.product_tracking
self.wiz_scan_picking.lot_id = self.lot_1
self.wiz_scan_picking.product_qty = 2
self.wiz_scan_picking.action_manual_entry()
self.assertEqual(len(self.wiz_scan_picking.candidate_picking_ids), 2)
# Lock first picking
candidate = self.wiz_scan_picking.candidate_picking_ids.filtered(
lambda c: c.picking_id == self.picking_out_01)
candidate_wiz = candidate.with_context(
wiz_barcode_id=self.wiz_scan_picking.id,
picking_id=self.picking_out_01.id,
)
candidate_wiz.action_lock_picking()
self.assertEqual(self.picking_out_01.move_lines.quantity_done, 2)
self.wiz_scan_picking.action_manual_entry()
self.assertEqual(self.picking_out_01.move_lines.quantity_done, 4)
# Picking out 3 is in confirmed state, so until confirmed moves has
# not been activated candidate pickings is 2
picking_out_3.action_confirm()
candidate_wiz.action_unlock_picking()
self.wiz_scan_picking.action_manual_entry()
self.assertEqual(len(self.wiz_scan_picking.candidate_picking_ids), 2)
self.wiz_scan_picking.confirmed_moves = True
candidate_wiz.action_unlock_picking()
self.wiz_scan_picking.action_manual_entry()
self.assertEqual(len(self.wiz_scan_picking.candidate_picking_ids), 3)
|
agpl-3.0
|
grue/django-allauth
|
allauth/socialaccount/providers/weibo/views.py
|
80
|
1132
|
import requests
from allauth.socialaccount.providers.oauth2.views import (OAuth2Adapter,
OAuth2LoginView,
OAuth2CallbackView)
from .provider import WeiboProvider
class WeiboOAuth2Adapter(OAuth2Adapter):
provider_id = WeiboProvider.id
access_token_url = 'https://api.weibo.com/oauth2/access_token'
authorize_url = 'https://api.weibo.com/oauth2/authorize'
profile_url = 'https://api.weibo.com/2/users/show.json'
def complete_login(self, request, app, token, **kwargs):
uid = kwargs.get('response', {}).get('uid')
resp = requests.get(self.profile_url,
params={'access_token': token.token,
'uid': uid})
extra_data = resp.json()
return self.get_provider().sociallogin_from_response(request,
extra_data)
oauth2_login = OAuth2LoginView.adapter_view(WeiboOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(WeiboOAuth2Adapter)
|
mit
|
musicmetric/mmpy
|
src/releasegroup.py
|
1
|
2354
|
from entity import *
class Releasegroup(Entity):
"""
wraps the ReleaseGroup entity type as described at http://developer.musicmetric.com
all timeseries are attributes of the form self.<type>_<source>, which sets a dict if there's data
"""
summary_attrs = ("name", "id", "description", "artist")
def __init__(self, relgrpUUID, **kwargs):
"""
creates a releasegroup instance. UUID required(or equivelant 3rd party id with prefix,
see http://developer.musicmetric.com/identification.html for details)
any attributes that are in
"""
self.entity_type = 'releasegroup'
self.entity_id = relgrpUUID
self.fetched_summary = False # prevent the summary from being fetched multiple times
for key, val in kwargs.items():
if key in Releasegroup.summary_attrs:
setattr(self, key, val)
else:
raise KeyError("unexpected creation attribute")
def __getattr__(self, attr):
if attr in Releasegroup.summary_attrs and not self.fetched_summary:
self.fetch_summary()
return getattr(self, attr)
if len(attr.split('_')) > 1:
try:
result = self._construct_timeseries(attr.replace('_', '/'))
setattr(self, attr, result) # don't want to call the api over and over
return result
except ValueError:
pass #call failed, probably not an endpoint
raise AttributeError("Unknown attribute name: {0}".format(attr))
# return getattr(super(Artist, self), attr)
def fetch_summary(self):
"""
grabs the summary info and sets the corisponding attributes.
Note: overides existing attribute values for these attributes
"""
self.response_from()
#should really clean up the triple nest
for k,v in self.response.items():
if not isinstance(v,dict):
setattr(self,k,v)
else:
for subk,subv in v.items():
if not isinstance(subv,dict):
setattr(self,subk,subv)
else:
for ssk,ssv in subv.items():
setattr(self,ssk,ssv)
self.fetched_summary = True
|
isc
|
xwolf12/django
|
tests/m2m_and_m2o/tests.py
|
383
|
2711
|
from django.db.models import Q
from django.test import TestCase
from .models import Issue, UnicodeReferenceModel, User
class RelatedObjectTests(TestCase):
def test_related_objects_have_name_attribute(self):
for field_name in ('test_issue_client', 'test_issue_cc'):
obj = User._meta.get_field(field_name)
self.assertEqual(field_name, obj.field.related_query_name())
def test_m2m_and_m2o(self):
r = User.objects.create(username="russell")
g = User.objects.create(username="gustav")
i1 = Issue(num=1)
i1.client = r
i1.save()
i2 = Issue(num=2)
i2.client = r
i2.save()
i2.cc.add(r)
i3 = Issue(num=3)
i3.client = g
i3.save()
i3.cc.add(r)
self.assertQuerysetEqual(
Issue.objects.filter(client=r.id), [
1,
2,
],
lambda i: i.num
)
self.assertQuerysetEqual(
Issue.objects.filter(client=g.id), [
3,
],
lambda i: i.num
)
self.assertQuerysetEqual(
Issue.objects.filter(cc__id__exact=g.id), []
)
self.assertQuerysetEqual(
Issue.objects.filter(cc__id__exact=r.id), [
2,
3,
],
lambda i: i.num
)
# These queries combine results from the m2m and the m2o relationships.
# They're three ways of saying the same thing.
self.assertQuerysetEqual(
Issue.objects.filter(Q(cc__id__exact=r.id) | Q(client=r.id)), [
1,
2,
3,
],
lambda i: i.num
)
self.assertQuerysetEqual(
Issue.objects.filter(cc__id__exact=r.id) | Issue.objects.filter(client=r.id), [
1,
2,
3,
],
lambda i: i.num
)
self.assertQuerysetEqual(
Issue.objects.filter(Q(client=r.id) | Q(cc__id__exact=r.id)), [
1,
2,
3,
],
lambda i: i.num
)
class RelatedObjectUnicodeTests(TestCase):
def test_m2m_with_unicode_reference(self):
"""
Regression test for #6045: references to other models can be unicode
strings, providing they are directly convertible to ASCII.
"""
m1 = UnicodeReferenceModel.objects.create()
m2 = UnicodeReferenceModel.objects.create()
m2.others.add(m1) # used to cause an error (see ticket #6045)
m2.save()
list(m2.others.all()) # Force retrieval.
|
bsd-3-clause
|
popazerty/test-1
|
lib/python/Plugins/Extensions/DVDBurn/DVDToolbox.py
|
51
|
8243
|
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Screens.HelpMenu import HelpableScreen
from Components.ActionMap import HelpableActionMap, ActionMap
from Components.Sources.List import List
from Components.Sources.StaticText import StaticText
from Components.Sources.Progress import Progress
from Components.Task import Task, Job, job_manager, Condition
from Components.ScrollLabel import ScrollLabel
from Components.Harddisk import harddiskmanager
from Components.Console import Console
from Plugins.SystemPlugins.Hotplug.plugin import hotplugNotifier
class DVDToolbox(Screen):
skin = """
<screen name="DVDToolbox" position="center,center" size="560,445" title="DVD media toolbox" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="info" render="Label" position="20,60" size="520,100" font="Regular;20" />
<widget name="details" position="20,200" size="520,200" font="Regular;16" />
<widget source="space_bar" render="Progress" position="10,410" size="540,26" borderWidth="1" backgroundColor="#254f7497" />
<widget source="space_label" render="Label" position="20,414" size="520,22" zPosition="2" font="Regular;18" halign="center" transparent="1" foregroundColor="#000000" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
self["key_red"] = StaticText(_("Exit"))
self["key_green"] = StaticText(_("Update"))
self["key_yellow"] = StaticText()
self["space_label"] = StaticText()
self["space_bar"] = Progress()
self.mediuminfo = [ ]
self.formattable = False
self["details"] = ScrollLabel()
self["info"] = StaticText()
self["toolboxactions"] = ActionMap(["ColorActions", "DVDToolbox", "OkCancelActions"],
{
"red": self.exit,
"green": self.update,
"yellow": self.format,
"cancel": self.exit,
"pageUp": self.pageUp,
"pageDown": self.pageDown
})
self.update()
hotplugNotifier.append(self.update)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_("DVD media toolbox"))
def pageUp(self):
self["details"].pageUp()
def pageDown(self):
self["details"].pageDown()
def update(self, dev="", action=""):
self["space_label"].text = _("Please wait... Loading list...")
self["info"].text = ""
self["details"].setText("")
self.Console = Console()
cmd = "dvd+rw-mediainfo /dev/" + harddiskmanager.getCD()
self.Console.ePopen(cmd, self.mediainfoCB)
def format(self):
if self.formattable:
job = DVDformatJob(self)
job_manager.AddJob(job)
from Screens.TaskView import JobView
self.session.openWithCallback(self.formatCB, JobView, job)
def formatCB(self, in_background):
self.update()
def mediainfoCB(self, mediuminfo, retval, extra_args):
formatted_capacity = 0
read_capacity = 0
capacity = 0
used = 0
infotext = ""
mediatype = ""
for line in mediuminfo.splitlines():
if line.find("Mounted Media:") > -1:
mediatype = line.rsplit(',',1)[1][1:]
if mediatype.find("RW") > 0 or mediatype.find("RAM") > 0:
self.formattable = True
else:
self.formattable = False
elif line.find("Legacy lead-out at:") > -1:
used = int(line.rsplit('=',1)[1]) / 1048576.0
print "[dvd+rw-mediainfo] lead out used =", used
elif line.find("formatted:") > -1:
formatted_capacity = int(line.rsplit('=',1)[1]) / 1048576.0
print "[dvd+rw-mediainfo] formatted capacity =", formatted_capacity
elif formatted_capacity == 0 and line.find("READ CAPACITY:") > -1:
read_capacity = int(line.rsplit('=',1)[1]) / 1048576.0
print "[dvd+rw-mediainfo] READ CAPACITY =", read_capacity
for line in mediuminfo.splitlines():
if line.find("Free Blocks:") > -1:
try:
size = eval(line[14:].replace("KB","*1024"))
except:
size = 0
if size > 0:
capacity = size / 1048576
if used:
used = capacity-used
print "[dvd+rw-mediainfo] free blocks capacity=%d, used=%d" % (capacity, used)
elif line.find("Disc status:") > -1:
if line.find("blank") > -1:
print "[dvd+rw-mediainfo] Disc status blank capacity=%d, used=0" % (capacity)
capacity = used
used = 0
elif line.find("complete") > -1 and formatted_capacity == 0:
print "[dvd+rw-mediainfo] Disc status complete capacity=0, used=%d" % (capacity)
used = read_capacity
capacity = 1
else:
capacity = formatted_capacity
infotext += line+'\n'
if capacity and used > capacity:
used = read_capacity or capacity
capacity = formatted_capacity or capacity
self["details"].setText(infotext)
if self.formattable:
self["key_yellow"].text = _("Format")
else:
self["key_yellow"].text = ""
percent = 100 * used / (capacity or 1)
if capacity > 4600:
self["space_label"].text = "%d / %d MB" % (used, capacity) + " (%.2f%% " % percent + _("of a DUAL layer medium used.") + ")"
self["space_bar"].value = int(percent)
elif capacity > 1:
self["space_label"].text = "%d / %d MB" % (used, capacity) + " (%.2f%% " % percent + _("of a SINGLE layer medium used.") + ")"
self["space_bar"].value = int(percent)
elif capacity == 1 and used > 0:
self["space_label"].text = "%d MB " % (used) + _("on READ ONLY medium.")
self["space_bar"].value = int(percent)
else:
self["space_label"].text = _("Medium is not a writeable DVD!")
self["space_bar"].value = 0
free = capacity-used
if free < 2:
free = 0
self["info"].text = "Media-Type:\t\t%s\nFree capacity:\t\t%d MB" % (mediatype or "NO DVD", free)
def exit(self):
del self.Console
hotplugNotifier.remove(self.update)
self.close()
class DVDformatJob(Job):
def __init__(self, toolbox):
Job.__init__(self, _("DVD media toolbox"))
self.toolbox = toolbox
DVDformatTask(self)
def retry(self):
self.tasks[0].args += self.tasks[0].retryargs
Job.retry(self)
class DVDformatTaskPostcondition(Condition):
RECOVERABLE = True
def check(self, task):
return task.error is None
def getErrorMessage(self, task):
return {
task.ERROR_ALREADYFORMATTED: _("This DVD RW medium is already formatted - reformatting will erase all content on the disc."),
task.ERROR_NOTWRITEABLE: _("Medium is not a writeable DVD!"),
task.ERROR_UNKNOWN: _("An unknown error occurred!")
}[task.error]
class DVDformatTask(Task):
ERROR_ALREADYFORMATTED, ERROR_NOTWRITEABLE, ERROR_UNKNOWN = range(3)
def __init__(self, job, extra_args=[]):
Task.__init__(self, job, ("RW medium format"))
self.toolbox = job.toolbox
self.postconditions.append(DVDformatTaskPostcondition())
self.setTool("dvd+rw-format")
self.args += [ "/dev/" + harddiskmanager.getCD() ]
self.end = 1100
self.retryargs = [ ]
def prepare(self):
self.error = None
def processOutputLine(self, line):
if line.startswith("- media is already formatted"):
self.error = self.ERROR_ALREADYFORMATTED
self.retryargs = [ "-force" ]
if line.startswith("- media is not blank") or line.startswith(" -format=full to perform full (lengthy) reformat;"):
self.error = self.ERROR_ALREADYFORMATTED
self.retryargs = [ "-blank" ]
if line.startswith(":-( mounted media doesn't appear to be"):
self.error = self.ERROR_NOTWRITEABLE
def processOutput(self, data):
print "[DVDformatTask processOutput] ", data
if data.endswith('%'):
data= data.replace('\x08','')
self.progress = int(float(data[:-1])*10)
else:
Task.processOutput(self, data)
|
gpl-2.0
|
chartjes/liesitoldmykids
|
defensio/__init__.py
|
1
|
4092
|
import sys
def is_python3():
return sys.version_info[0] == 3
if is_python3():
import urllib.parse
import http.client
else:
import urllib
import httplib
import simplejson as json
API_VERSION = '2.0'
API_HOST = 'api.defensio.com'
LIB_VERSION = '0.9.1'
ROOT_NODE = 'defensio-result'
FORMAT = 'json'
USER_AGENT = "Defensio-Python %(LIB_VERSION)s"%locals()
CLIENT = "Defensio-Python | %(LIB_VERSION)s | Camilo Lopez | [email protected]"%locals()
class Defensio(object):
"""Small, but full featured Defensio client class"""
def __init__(self, api_key, client=CLIENT):
"""Constructor
api_key -- A defensio api key you can get one from http://defensio.com/
client -- Client siganture for your application for details see http://defensio.com/api
"""
self.client = client
self.api_key = api_key
def get_user(self):
""" Get information about the api key """
return self._call('GET', self._generate_url_path())
def post_document(self, data):
"""
Create and analyze a new document
data -- A Dictionary representing the new document
"""
data.update({ 'client' : CLIENT })
return self._call('POST', self._generate_url_path('documents'), data)
def get_document(self, signature):
"""
Get the status of an existing document
signature -- The signature of the desired document
"""
return self._call('GET', self._generate_url_path('documents', signature))
def put_document(self, signature, data):
"""
Modify the properties of an existing document
signature -- The signature for the desired document
data -- A Dictionary with the new allowed value eg. {'allow': false}
"""
return self._call('PUT', self._generate_url_path('documents', signature), data)
def get_basic_stats(self):
""" Get basic statistics for the current user """
return self._call('GET', self._generate_url_path('basic-stats'))
def get_extended_stats(self, data):
"""
Get more exhaustive statistics for the current user
data -- A dictionary with the range of dates you want the stats for {'from': '2010/01/01', 'to': '2010/01/10'}
"""
return self._call('GET', self._generate_url_path('extended-stats') + '?' + self._urlencode(data))
def post_profanity_filter(self, data):
"""
Filter a set of values based on a pre-defined dictionary
data -- Fields to be filtered
"""
return self._call('POST', self._generate_url_path('profanity-filter'), data)
def handle_post_document_async_callback(self, request):
"""
Takes the request string of an async request callback and returns a Dictionary
request -- String posted by Defensio as a callback
"""
return self._parse_body(request)
def _call(self, method, path, data=None):
""" Do the actual HTTP request """
if is_python3():
conn = http.client.HTTPConnection(API_HOST)
else:
conn = httplib.HTTPConnection(API_HOST)
headers = {'User-Agent' : USER_AGENT}
if data:
headers.update( {'Content-type': 'application/x-www-form-urlencoded'} )
conn.request(method, path, self._urlencode(data), headers)
else:
conn.request(method, path, None, headers)
response = conn.getresponse()
result = [response.status, self._parse_body(response.read())]
conn.close()
return result
def _generate_url_path(self, action=None, id=None):
url = '/' + API_VERSION + '/users/' + self.api_key
if action: url = url + '/' + action
if id: url = url + '/' + id
url = url + '.' + FORMAT
return url
def _parse_body(self, body):
""" For just call a deserializer for FORMAT"""
if is_python3():
return json.loads(body.decode('UTF-8'))
else:
return json.loads(body)
def _urlencode(self, url):
if is_python3():
return urllib.parse.urlencode(url)
else:
return urllib.urlencode(url)
def handle_post_document_async_callback(request):
""" Shortcut function to handle callbacks """
Defensio(None).handle_post_document_async_callback(request)
|
mit
|
ecordell/pymacaroons
|
tests/functional_tests/serialization_tests.py
|
1
|
4067
|
from nose.tools import *
from pymacaroons import Macaroon, Verifier, MACAROON_V1, MACAROON_V2
from pymacaroons.serializers import JsonSerializer
class TestSerializationCompatibility(object):
def setup(self):
pass
def test_from_go_macaroon_json_v2(self):
# The following macaroon have been generated with
# https://github.com/go-macaroon/macaroon
# to test the deserialization.
json_v1 = '{"caveats":[{"cid":"fp caveat"},{"cid":"tp caveat",' \
'"vid":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAp_MgxHrfLnfvNuYDo' \
'zNKWTlRPPx6VemasWnPpJdAWE6FWmOuFX4sB4-a1oAURDp",' \
'"cl":"tp location"}],"location":"my location",' \
'"identifier":"my identifier",' \
'"signature":"483b3881c9990e5099cb6695da3164daa64da60417b' \
'caf9e9dc4c0a9968f6636"}'
json_v1_discharge = '{"caveats":[],"location":"tp location",' \
'"identifier":"tp caveat",' \
'"signature":"8506007f69ae3e6a654e0b9769f20dd9da5' \
'd2af7860070d6776c15989fb7dea6"}'
m = Macaroon.deserialize(json_v1, serializer=JsonSerializer())
discharge = Macaroon.deserialize(json_v1_discharge,
serializer=JsonSerializer())
assert_macaroon(m, discharge, MACAROON_V1)
binary_v1 = 'MDAxOWxvY2F0aW9uIG15IGxvY2F0aW9uCjAwMWRpZGVudGlmaWVyIG1' \
'5IGlkZW50aWZpZXIKMDAxMmNpZCBmcCBjYXZlYXQKMDAxMmNpZCB0cC' \
'BjYXZlYXQKMDA1MXZpZCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACn' \
'8yDEet8ud+825gOjM0pZOVE8/HpV6Zqxac+kl0BYToVaY64VfiwHj5r' \
'WgBREOkKMDAxM2NsIHRwIGxvY2F0aW9uCjAwMmZzaWduYXR1cmUgSDs' \
'4gcmZDlCZy2aV2jFk2qZNpgQXvK+encTAqZaPZjYK'
binary_v1_discharge = 'MDAxOWxvY2F0aW9uIHRwIGxvY2F0aW9uCjAwMTlpZGVud' \
'GlmaWVyIHRwIGNhdmVhdAowMDJmc2lnbmF0dXJlIIUGAH' \
'9prj5qZU4Ll2nyDdnaXSr3hgBw1ndsFZift96mCg'
m = Macaroon.deserialize(binary_v1)
discharge = Macaroon.deserialize(binary_v1_discharge)
assert_macaroon(m, discharge, MACAROON_V1)
json_v2 = '{"c":[{"i":"fp caveat"},{"i":"tp caveat",' \
'"v64":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAp_MgxHrfLnfvNuYDoz' \
'NKWTlRPPx6VemasWnPpJdAWE6FWmOuFX4sB4-a1oAURDp",' \
'"l":"tp location"}],"l":"my location","i":"my identifier",' \
'"s64":"SDs4gcmZDlCZy2aV2jFk2qZNpgQXvK-encTAqZaPZjY"}'
json_v2_discharge = '{"l":"tp location","i":"tp caveat","s64":"hQYAf2' \
'muPmplTguXafIN2dpdKveGAHDWd2wVmJ-33qY"}'
m = Macaroon.deserialize(json_v2, serializer=JsonSerializer())
discharge = Macaroon.deserialize(json_v2_discharge,
serializer=JsonSerializer())
assert_macaroon(m, discharge, MACAROON_V2)
binary_v2 = 'AgELbXkgbG9jYXRpb24CDW15IGlkZW50aWZpZXIAAglmcCBjYXZlYXQ' \
'AAQt0cCBsb2NhdGlvbgIJdHAgY2F2ZWF0BEgAAAAAAAAAAAAAAAAAAA' \
'AAAAAAAAAAAAACn8yDEet8ud+825gOjM0pZOVE8/HpV6Zqxac+kl0BY' \
'ToVaY64VfiwHj5rWgBREOkAAAYgSDs4gcmZDlCZy2aV2jFk2qZNpgQX' \
'vK+encTAqZaPZjY'
binary_v2_discharge = 'AgELdHAgbG9jYXRpb24CCXRwIGNhdmVhdAAABiCFBgB/a' \
'a4+amVOC5dp8g3Z2l0q94YAcNZ3bBWYn7fepg'
m = Macaroon.deserialize(binary_v2)
discharge = Macaroon.deserialize(binary_v2_discharge)
assert_macaroon(m, discharge, MACAROON_V2)
def assert_macaroon(m, discharge, version):
assert_equal(m.location, 'my location')
assert_equal(m.version, version)
assert_equal(m.identifier_bytes, b'my identifier')
v = Verifier(discharge_macaroons=[discharge])
v.satisfy_exact('fp caveat')
verified = v.verify(
m,
"my secret key",
)
assert_true(verified)
|
mit
|
OptiPop/external_chromium_org
|
tools/site_compare/scrapers/chrome/chromebase.py
|
189
|
5358
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Does scraping for all currently-known versions of Chrome"""
import pywintypes
import types
from drivers import keyboard
from drivers import mouse
from drivers import windowing
# TODO: this has moved, use some logic to find it. For now,
# expects a subst k:.
DEFAULT_PATH = r"k:\chrome.exe"
def InvokeBrowser(path):
"""Invoke the Chrome browser.
Args:
path: full path to browser
Returns:
A tuple of (main window, process handle, address bar, render pane)
"""
# Reuse an existing instance of the browser if we can find one. This
# may not work correctly, especially if the window is behind other windows.
# TODO(jhaas): make this work with Vista
wnds = windowing.FindChildWindows(0, "Chrome_XPFrame")
if len(wnds):
wnd = wnds[0]
proc = None
else:
# Invoke Chrome
(proc, wnd) = windowing.InvokeAndWait(path)
# Get windows we'll need
address_bar = windowing.FindChildWindow(wnd, "Chrome_AutocompleteEdit")
render_pane = GetChromeRenderPane(wnd)
return (wnd, proc, address_bar, render_pane)
def Scrape(urls, outdir, size, pos, timeout, kwargs):
"""Invoke a browser, send it to a series of URLs, and save its output.
Args:
urls: list of URLs to scrape
outdir: directory to place output
size: size of browser window to use
pos: position of browser window
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
Returns:
None if success, else an error string
"""
if "path" in kwargs and kwargs["path"]: path = kwargs["path"]
else: path = DEFAULT_PATH
(wnd, proc, address_bar, render_pane) = InvokeBrowser(path)
# Resize and reposition the frame
windowing.MoveAndSizeWindow(wnd, pos, size, render_pane)
# Visit each URL we're given
if type(urls) in types.StringTypes: urls = [urls]
timedout = False
for url in urls:
# Double-click in the address bar, type the name, and press Enter
mouse.ClickInWindow(address_bar)
keyboard.TypeString(url, 0.1)
keyboard.TypeString("\n")
# Wait for the page to finish loading
load_time = windowing.WaitForThrobber(wnd, (20, 16, 36, 32), timeout)
timedout = load_time < 0
if timedout:
break
# Scrape the page
image = windowing.ScrapeWindow(render_pane)
# Save to disk
if "filename" in kwargs:
if callable(kwargs["filename"]):
filename = kwargs["filename"](url)
else:
filename = kwargs["filename"]
else:
filename = windowing.URLtoFilename(url, outdir, ".bmp")
image.save(filename)
if proc:
windowing.SetForegroundWindow(wnd)
# Send Alt-F4, then wait for process to end
keyboard.TypeString(r"{\4}", use_modifiers=True)
if not windowing.WaitForProcessExit(proc, timeout):
windowing.EndProcess(proc)
return "crashed"
if timedout:
return "timeout"
return None
def Time(urls, size, timeout, kwargs):
"""Measure how long it takes to load each of a series of URLs
Args:
urls: list of URLs to time
size: size of browser window to use
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
Returns:
A list of tuples (url, time). "time" can be "crashed" or "timeout"
"""
if "path" in kwargs and kwargs["path"]: path = kwargs["path"]
else: path = DEFAULT_PATH
proc = None
# Visit each URL we're given
if type(urls) in types.StringTypes: urls = [urls]
ret = []
for url in urls:
try:
# Invoke the browser if necessary
if not proc:
(wnd, proc, address_bar, render_pane) = InvokeBrowser(path)
# Resize and reposition the frame
windowing.MoveAndSizeWindow(wnd, (0,0), size, render_pane)
# Double-click in the address bar, type the name, and press Enter
mouse.ClickInWindow(address_bar)
keyboard.TypeString(url, 0.1)
keyboard.TypeString("\n")
# Wait for the page to finish loading
load_time = windowing.WaitForThrobber(wnd, (20, 16, 36, 32), timeout)
timedout = load_time < 0
if timedout:
load_time = "timeout"
# Send an alt-F4 to make the browser close; if this times out,
# we've probably got a crash
windowing.SetForegroundWindow(wnd)
keyboard.TypeString(r"{\4}", use_modifiers=True)
if not windowing.WaitForProcessExit(proc, timeout):
windowing.EndProcess(proc)
load_time = "crashed"
proc = None
except pywintypes.error:
proc = None
load_time = "crashed"
ret.append( (url, load_time) )
if proc:
windowing.SetForegroundWindow(wnd)
keyboard.TypeString(r"{\4}", use_modifiers=True)
if not windowing.WaitForProcessExit(proc, timeout):
windowing.EndProcess(proc)
return ret
def main():
# We're being invoked rather than imported, so run some tests
path = r"c:\sitecompare\scrapes\chrome\0.1.97.0"
windowing.PreparePath(path)
# Scrape three sites and save the results
Scrape([
"http://www.microsoft.com",
"http://www.google.com",
"http://www.sun.com"],
path, (1024, 768), (0, 0))
return 0
if __name__ == "__main__":
sys.exit(main())
|
bsd-3-clause
|
MostlyOpen/odoo_addons_jcafb
|
myo_address_cst/__openerp__.py
|
1
|
1808
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'Address (customizations for CLVhealth-JCAFB Solution)',
'summary': 'Address Module customizations for CLVhealth-JCAFB Solution.',
'version': '2.0.0',
'author': 'Carlos Eduardo Vercelino - CLVsol',
'category': 'Generic Modules/Others',
'license': 'AGPL-3',
'website': 'http://clvsol.com',
'depends': [
'myo_address_l10n_br',
],
'data': [
'views/address_view.xml',
'data/address_seq.xml',
'wizard/address_document_wizard_view.xml',
'wizard/address_responsible_wizard_view.xml',
'wizard/address_event_wizard_view.xml',
'wizard/address_summary_wizard_view.xml',
'wizard/address_update_wizard_view.xml',
],
'demo': [],
'test': [],
'init_xml': [],
'test': [],
'update_xml': [],
'installable': True,
'application': False,
'active': False,
'css': [],
}
|
agpl-3.0
|
nuobit/odoo-addons
|
connector_ambugest/models/res_partner/binding.py
|
1
|
1728
|
# Copyright NuoBiT Solutions, S.L. (<https://www.nuobit.com>)
# Eric Antones <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
from odoo import models, fields
from odoo.addons.component.core import Component
from odoo.addons.queue_job.job import job
class ResPartner(models.Model):
_inherit = 'res.partner'
ambugest_bind_ids = fields.One2many(
comodel_name='ambugest.res.partner',
inverse_name='odoo_id',
string='Ambugest Bindings',
)
class ResPartnerBinding(models.Model):
_name = 'ambugest.res.partner'
_inherit = 'ambugest.binding'
_inherits = {'res.partner': 'odoo_id'}
odoo_id = fields.Many2one(comodel_name='res.partner',
string='Partner',
required=True,
ondelete='cascade')
## composed id
ambugest_empresa = fields.Integer(string="Empresa on Ambugest", required=True)
ambugest_codiup = fields.Integer(string="CodiUP on Ambugest", required=True)
_sql_constraints = [
('ambugest_res_partner', 'unique(ambugest_empresa, ambugest_codiup)',
'Partner with same ID on Ambugest already exists.'),
]
@job(default_channel='root.ambugest')
def import_customers_since(self, backend_record=None, since_date=None):
""" Prepare the import of partners modified on Ambugest """
filters = {
'EMPRESA': backend_record.ambugest_company_id,
}
now_fmt = fields.Datetime.now()
self.env['ambugest.res.partner'].import_batch(
backend=backend_record, filters=filters)
backend_record.import_customers_since_date = now_fmt
return True
|
agpl-3.0
|
jasonbot/django
|
django/utils/jslex.py
|
251
|
7779
|
"""JsLex: a lexer for Javascript"""
# Originally from https://bitbucket.org/ned/jslex
from __future__ import unicode_literals
import re
class Tok(object):
"""
A specification for a token class.
"""
num = 0
def __init__(self, name, regex, next=None):
self.id = Tok.num
Tok.num += 1
self.name = name
self.regex = regex
self.next = next
def literals(choices, prefix="", suffix=""):
"""
Create a regex from a space-separated list of literal `choices`.
If provided, `prefix` and `suffix` will be attached to each choice
individually.
"""
return "|".join(prefix + re.escape(c) + suffix for c in choices.split())
class Lexer(object):
"""
A generic multi-state regex-based lexer.
"""
def __init__(self, states, first):
self.regexes = {}
self.toks = {}
for state, rules in states.items():
parts = []
for tok in rules:
groupid = "t%d" % tok.id
self.toks[groupid] = tok
parts.append("(?P<%s>%s)" % (groupid, tok.regex))
self.regexes[state] = re.compile("|".join(parts), re.MULTILINE | re.VERBOSE)
self.state = first
def lex(self, text):
"""
Lexically analyze `text`.
Yields pairs (`name`, `tokentext`).
"""
end = len(text)
state = self.state
regexes = self.regexes
toks = self.toks
start = 0
while start < end:
for match in regexes[state].finditer(text, start):
name = match.lastgroup
tok = toks[name]
toktext = match.group(name)
start += len(toktext)
yield (tok.name, toktext)
if tok.next:
state = tok.next
break
self.state = state
class JsLexer(Lexer):
"""
A Javascript lexer
>>> lexer = JsLexer()
>>> list(lexer.lex("a = 1"))
[('id', 'a'), ('ws', ' '), ('punct', '='), ('ws', ' '), ('dnum', '1')]
This doesn't properly handle non-ASCII characters in the Javascript source.
"""
# Because these tokens are matched as alternatives in a regex, longer
# possibilities must appear in the list before shorter ones, for example,
# '>>' before '>'.
#
# Note that we don't have to detect malformed Javascript, only properly
# lex correct Javascript, so much of this is simplified.
# Details of Javascript lexical structure are taken from
# http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-262.pdf
# A useful explanation of automatic semicolon insertion is at
# http://inimino.org/~inimino/blog/javascript_semicolons
both_before = [
Tok("comment", r"/\*(.|\n)*?\*/"),
Tok("linecomment", r"//.*?$"),
Tok("ws", r"\s+"),
Tok("keyword", literals("""
break case catch class const continue debugger
default delete do else enum export extends
finally for function if import in instanceof
new return super switch this throw try typeof
var void while with
""", suffix=r"\b"), next='reg'),
Tok("reserved", literals("null true false", suffix=r"\b"), next='div'),
Tok("id", r"""
([a-zA-Z_$ ]|\\u[0-9a-fA-Z]{4}) # first char
([a-zA-Z_$0-9]|\\u[0-9a-fA-F]{4})* # rest chars
""", next='div'),
Tok("hnum", r"0[xX][0-9a-fA-F]+", next='div'),
Tok("onum", r"0[0-7]+"),
Tok("dnum", r"""
( (0|[1-9][0-9]*) # DecimalIntegerLiteral
\. # dot
[0-9]* # DecimalDigits-opt
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
\. # dot
[0-9]+ # DecimalDigits
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
(0|[1-9][0-9]*) # DecimalIntegerLiteral
([eE][-+]?[0-9]+)? # ExponentPart-opt
)
""", next='div'),
Tok("punct", literals("""
>>>= === !== >>> <<= >>= <= >= == != << >> &&
|| += -= *= %= &= |= ^=
"""), next="reg"),
Tok("punct", literals("++ -- ) ]"), next='div'),
Tok("punct", literals("{ } ( [ . ; , < > + - * % & | ^ ! ~ ? : ="), next='reg'),
Tok("string", r'"([^"\\]|(\\(.|\n)))*?"', next='div'),
Tok("string", r"'([^'\\]|(\\(.|\n)))*?'", next='div'),
]
both_after = [
Tok("other", r"."),
]
states = {
# slash will mean division
'div': both_before + [
Tok("punct", literals("/= /"), next='reg'),
] + both_after,
# slash will mean regex
'reg': both_before + [
Tok("regex",
r"""
/ # opening slash
# First character is..
( [^*\\/[] # anything but * \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)
# Following characters are same, except for excluding a star
( [^\\/[] # anything but \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)* # many times
/ # closing slash
[a-zA-Z0-9]* # trailing flags
""", next='div'),
] + both_after,
}
def __init__(self):
super(JsLexer, self).__init__(self.states, 'reg')
def prepare_js_for_gettext(js):
"""
Convert the Javascript source `js` into something resembling C for
xgettext.
What actually happens is that all the regex literals are replaced with
"REGEX".
"""
def escape_quotes(m):
"""Used in a regex to properly escape double quotes."""
s = m.group(0)
if s == '"':
return r'\"'
else:
return s
lexer = JsLexer()
c = []
for name, tok in lexer.lex(js):
if name == 'regex':
# C doesn't grok regexes, and they aren't needed for gettext,
# so just output a string instead.
tok = '"REGEX"'
elif name == 'string':
# C doesn't have single-quoted strings, so make all strings
# double-quoted.
if tok.startswith("'"):
guts = re.sub(r"\\.|.", escape_quotes, tok[1:-1])
tok = '"' + guts + '"'
elif name == 'id':
# C can't deal with Unicode escapes in identifiers. We don't
# need them for gettext anyway, so replace them with something
# innocuous
tok = tok.replace("\\", "U")
c.append(tok)
return ''.join(c)
|
bsd-3-clause
|
hyiltiz/youtube-dl
|
youtube_dl/extractor/tvp.py
|
132
|
4801
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class TvpIE(InfoExtractor):
IE_NAME = 'tvp.pl'
_VALID_URL = r'https?://(?:vod|www)\.tvp\.pl/.*/(?P<id>\d+)$'
_TESTS = [{
'url': 'http://vod.tvp.pl/filmy-fabularne/filmy-za-darmo/ogniem-i-mieczem/wideo/odc-2/4278035',
'md5': 'cdd98303338b8a7f7abab5cd14092bf2',
'info_dict': {
'id': '4278035',
'ext': 'wmv',
'title': 'Ogniem i mieczem, odc. 2',
},
}, {
'url': 'http://vod.tvp.pl/seriale/obyczajowe/czas-honoru/sezon-1-1-13/i-seria-odc-13/194536',
'md5': '8aa518c15e5cc32dfe8db400dc921fbb',
'info_dict': {
'id': '194536',
'ext': 'mp4',
'title': 'Czas honoru, I seria – odc. 13',
},
}, {
'url': 'http://www.tvp.pl/there-can-be-anything-so-i-shortened-it/17916176',
'md5': 'c3b15ed1af288131115ff17a17c19dda',
'info_dict': {
'id': '17916176',
'ext': 'mp4',
'title': 'TVP Gorzów pokaże filmy studentów z podroży dookoła świata',
},
}, {
'url': 'http://vod.tvp.pl/seriale/obyczajowe/na-sygnale/sezon-2-27-/odc-39/17834272',
'md5': 'c3b15ed1af288131115ff17a17c19dda',
'info_dict': {
'id': '17834272',
'ext': 'mp4',
'title': 'Na sygnale, odc. 39',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://www.tvp.pl/sess/tvplayer.php?object_id=%s' % video_id, video_id)
title = self._search_regex(
r'name\s*:\s*([\'"])Title\1\s*,\s*value\s*:\s*\1(?P<title>.+?)\1',
webpage, 'title', group='title')
series_title = self._search_regex(
r'name\s*:\s*([\'"])SeriesTitle\1\s*,\s*value\s*:\s*\1(?P<series>.+?)\1',
webpage, 'series', group='series', default=None)
if series_title:
title = '%s, %s' % (series_title, title)
thumbnail = self._search_regex(
r"poster\s*:\s*'([^']+)'", webpage, 'thumbnail', default=None)
video_url = self._search_regex(
r'0:{src:([\'"])(?P<url>.*?)\1', webpage, 'formats', group='url', default=None)
if not video_url:
video_url = self._download_json(
'http://www.tvp.pl/pub/stat/videofileinfo?video_id=%s' % video_id,
video_id)['video_url']
ext = video_url.rsplit('.', 1)[-1]
if ext != 'ism/manifest':
if '/' in ext:
ext = 'mp4'
formats = [{
'format_id': 'direct',
'url': video_url,
'ext': ext,
}]
else:
m3u8_url = re.sub('([^/]*)\.ism/manifest', r'\1.ism/\1.m3u8', video_url)
formats = self._extract_m3u8_formats(m3u8_url, video_id, 'mp4')
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'formats': formats,
}
class TvpSeriesIE(InfoExtractor):
IE_NAME = 'tvp.pl:Series'
_VALID_URL = r'https?://vod\.tvp\.pl/(?:[^/]+/){2}(?P<id>[^/]+)/?$'
_TESTS = [{
'url': 'http://vod.tvp.pl/filmy-fabularne/filmy-za-darmo/ogniem-i-mieczem',
'info_dict': {
'title': 'Ogniem i mieczem',
'id': '4278026',
},
'playlist_count': 4,
}, {
'url': 'http://vod.tvp.pl/audycje/podroze/boso-przez-swiat',
'info_dict': {
'title': 'Boso przez świat',
'id': '9329207',
},
'playlist_count': 86,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id, tries=5)
title = self._html_search_regex(
r'(?s) id=[\'"]path[\'"]>(?:.*? / ){2}(.*?)</span>', webpage, 'series')
playlist_id = self._search_regex(r'nodeId:\s*(\d+)', webpage, 'playlist id')
playlist = self._download_webpage(
'http://vod.tvp.pl/vod/seriesAjax?type=series&nodeId=%s&recommend'
'edId=0&sort=&page=0&pageSize=10000' % playlist_id, display_id, tries=5,
note='Downloading playlist')
videos_paths = re.findall(
'(?s)class="shortTitle">.*?href="(/[^"]+)', playlist)
entries = [
self.url_result('http://vod.tvp.pl%s' % v_path, ie=TvpIE.ie_key())
for v_path in videos_paths]
return {
'_type': 'playlist',
'id': playlist_id,
'display_id': display_id,
'title': title,
'entries': entries,
}
|
unlicense
|
compiteing/flask-ponypermission
|
venv/lib/python2.7/site-packages/pony/orm/tests/test_db_session.py
|
1
|
12359
|
from __future__ import absolute_import, print_function, division
import unittest
from datetime import date
from decimal import Decimal
from itertools import count
from pony.orm.core import *
from pony.orm.tests.testutils import *
class TestDBSession(unittest.TestCase):
def setUp(self):
self.db = Database('sqlite', ':memory:')
class X(self.db.Entity):
a = Required(int)
b = Optional(int)
self.X = X
self.db.generate_mapping(create_tables=True)
with db_session:
x1 = X(a=1, b=1)
x2 = X(a=2, b=2)
@raises_exception(TypeError, "Pass only keyword arguments to db_session or use db_session as decorator")
def test_db_session_1(self):
db_session(1, 2, 3)
@raises_exception(TypeError, "Pass only keyword arguments to db_session or use db_session as decorator")
def test_db_session_2(self):
db_session(1, 2, 3, a=10, b=20)
def test_db_session_3(self):
self.assertTrue(db_session is db_session())
def test_db_session_4(self):
with db_session:
with db_session:
self.X(a=3, b=3)
with db_session:
self.assertEqual(count(x for x in self.X), 3)
def test_db_session_decorator_1(self):
@db_session
def test():
self.X(a=3, b=3)
test()
with db_session:
self.assertEqual(count(x for x in self.X), 3)
def test_db_session_decorator_2(self):
@db_session
def test():
self.X(a=3, b=3)
1/0
try:
test()
except ZeroDivisionError:
with db_session:
self.assertEqual(count(x for x in self.X), 2)
else:
self.fail()
def test_db_session_decorator_3(self):
@db_session(allowed_exceptions=[TypeError])
def test():
self.X(a=3, b=3)
1/0
try:
test()
except ZeroDivisionError:
with db_session:
self.assertEqual(count(x for x in self.X), 2)
else:
self.fail()
def test_db_session_decorator_4(self):
@db_session(allowed_exceptions=[ZeroDivisionError])
def test():
self.X(a=3, b=3)
1/0
try:
test()
except ZeroDivisionError:
with db_session:
self.assertEqual(count(x for x in self.X), 3)
else:
self.fail()
@raises_exception(TypeError, "'retry' parameter of db_session must be of integer type. Got: %r" % str)
def test_db_session_decorator_5(self):
@db_session(retry='foobar')
def test():
pass
@raises_exception(TypeError, "'retry' parameter of db_session must not be negative. Got: -1")
def test_db_session_decorator_6(self):
@db_session(retry=-1)
def test():
pass
def test_db_session_decorator_7(self):
counter = count()
@db_session(retry_exceptions=[ZeroDivisionError])
def test():
next(counter)
self.X(a=3, b=3)
1/0
try:
test()
except ZeroDivisionError:
self.assertEqual(next(counter), 1)
with db_session:
self.assertEqual(count(x for x in self.X), 2)
else:
self.fail()
def test_db_session_decorator_8(self):
counter = count()
@db_session(retry=1, retry_exceptions=[ZeroDivisionError])
def test():
next(counter)
self.X(a=3, b=3)
1/0
try:
test()
except ZeroDivisionError:
self.assertEqual(next(counter), 2)
with db_session:
self.assertEqual(count(x for x in self.X), 2)
else:
self.fail()
def test_db_session_decorator_9(self):
counter = count()
@db_session(retry=5, retry_exceptions=[ZeroDivisionError])
def test():
next(counter)
self.X(a=3, b=3)
1/0
try:
test()
except ZeroDivisionError:
self.assertEqual(next(counter), 6)
with db_session:
self.assertEqual(count(x for x in self.X), 2)
else:
self.fail()
def test_db_session_decorator_10(self):
counter = count()
@db_session(retry=3, retry_exceptions=[TypeError])
def test():
next(counter)
self.X(a=3, b=3)
1/0
try:
test()
except ZeroDivisionError:
self.assertEqual(next(counter), 1)
with db_session:
self.assertEqual(count(x for x in self.X), 2)
else:
self.fail()
def test_db_session_decorator_11(self):
counter = count()
@db_session(retry=5, retry_exceptions=[ZeroDivisionError])
def test():
i = next(counter)
self.X(a=3, b=3)
if i < 2: 1/0
try:
test()
except ZeroDivisionError:
self.fail()
else:
self.assertEqual(next(counter), 3)
with db_session:
self.assertEqual(count(x for x in self.X), 3)
@raises_exception(TypeError, "The same exception ZeroDivisionError cannot be specified "
"in both allowed and retry exception lists simultaneously")
def test_db_session_decorator_12(self):
@db_session(retry=3, retry_exceptions=[ZeroDivisionError],
allowed_exceptions=[ZeroDivisionError])
def test():
pass
def test_db_session_decorator_13(self):
@db_session(allowed_exceptions=lambda e: isinstance(e, ZeroDivisionError))
def test():
self.X(a=3, b=3)
1/0
try:
test()
except ZeroDivisionError:
with db_session:
self.assertEqual(count(x for x in self.X), 3)
else:
self.fail()
def test_db_session_decorator_14(self):
@db_session(allowed_exceptions=lambda e: isinstance(e, TypeError))
def test():
self.X(a=3, b=3)
1/0
try:
test()
except ZeroDivisionError:
with db_session:
self.assertEqual(count(x for x in self.X), 2)
else:
self.fail()
def test_db_session_decorator_15(self):
counter = count()
@db_session(retry=3, retry_exceptions=lambda e: isinstance(e, ZeroDivisionError))
def test():
i = next(counter)
self.X(a=3, b=3)
1/0
try:
test()
except ZeroDivisionError:
self.assertEqual(next(counter), 4)
with db_session:
self.assertEqual(count(x for x in self.X), 2)
else:
self.fail()
def test_db_session_manager_1(self):
with db_session:
self.X(a=3, b=3)
with db_session:
self.assertEqual(count(x for x in self.X), 3)
@raises_exception(TypeError, "@db_session can accept 'retry' parameter "
"only when used as decorator and not as context manager")
def test_db_session_manager_2(self):
with db_session(retry=3):
self.X(a=3, b=3)
def test_db_session_manager_3(self):
try:
with db_session(allowed_exceptions=[TypeError]):
self.X(a=3, b=3)
1/0
except ZeroDivisionError:
with db_session:
self.assertEqual(count(x for x in self.X), 2)
else:
self.fail()
def test_db_session_manager_4(self):
try:
with db_session(allowed_exceptions=[ZeroDivisionError]):
self.X(a=3, b=3)
1/0
except ZeroDivisionError:
with db_session:
self.assertEqual(count(x for x in self.X), 3)
else:
self.fail()
@raises_exception(TypeError, "@db_session can accept 'ddl' parameter "
"only when used as decorator and not as context manager")
def test_db_session_ddl_1(self):
with db_session(ddl=True):
pass
@raises_exception(TransactionError, "test() cannot be called inside of db_session")
def test_db_session_ddl_2(self):
@db_session(ddl=True)
def test():
pass
with db_session:
test()
def test_db_session_ddl_3(self):
@db_session(ddl=True)
def test():
pass
test()
db = Database('sqlite', ':memory:')
class Group(db.Entity):
id = PrimaryKey(int)
major = Required(unicode)
students = Set('Student')
class Student(db.Entity):
name = Required(unicode)
picture = Optional(buffer, lazy=True)
group = Required('Group')
db.generate_mapping(create_tables=True)
with db_session:
g1 = Group(id=1, major='Math')
g2 = Group(id=2, major='Physics')
s1 = Student(id=1, name='S1', group=g1)
s2 = Student(id=2, name='S2', group=g1)
s3 = Student(id=3, name='S3', group=g2)
class TestDBSessionScope(unittest.TestCase):
def setUp(self):
rollback()
def tearDown(self):
rollback()
def test1(self):
with db_session:
s1 = Student[1]
name = s1.name
@raises_exception(DatabaseSessionIsOver, 'Cannot load attribute Student[1].picture: the database session is over')
def test2(self):
with db_session:
s1 = Student[1]
picture = s1.picture
@raises_exception(DatabaseSessionIsOver, 'Cannot load attribute Group[1].major: the database session is over')
def test3(self):
with db_session:
s1 = Student[1]
group_id = s1.group.id
major = s1.group.major
@raises_exception(DatabaseSessionIsOver, 'Cannot assign new value to attribute Student[1].name: the database session is over')
def test4(self):
with db_session:
s1 = Student[1]
s1.name = 'New name'
def test5(self):
with db_session:
g1 = Group[1]
self.assertEqual(str(g1.students), 'StudentSet([...])')
@raises_exception(DatabaseSessionIsOver, 'Cannot load collection Group[1].students: the database session is over')
def test6(self):
with db_session:
g1 = Group[1]
l = len(g1.students)
@raises_exception(DatabaseSessionIsOver, 'Cannot change collection Group[1].Group.students: the database session is over')
def test7(self):
with db_session:
s1 = Student[1]
g1 = Group[1]
g1.students.remove(s1)
@raises_exception(DatabaseSessionIsOver, 'Cannot change collection Group[1].Group.students: the database session is over')
def test8(self):
with db_session:
g2_students = Group[2].students
g1 = Group[1]
g1.students = g2_students
@raises_exception(DatabaseSessionIsOver, 'Cannot change collection Group[1].Group.students: the database session is over')
def test9(self):
with db_session:
s3 = Student[3]
g1 = Group[1]
g1.students.add(s3)
@raises_exception(DatabaseSessionIsOver, 'Cannot change collection Group[1].Group.students: the database session is over')
def test10(self):
with db_session:
g1 = Group[1]
g1.students.clear()
@raises_exception(DatabaseSessionIsOver, 'Cannot delete object Student[1]: the database session is over')
def test11(self):
with db_session:
s1 = Student[1]
s1.delete()
@raises_exception(DatabaseSessionIsOver, 'Cannot change object Student[1]: the database session is over')
def test12(self):
with db_session:
s1 = Student[1]
s1.set(name='New name')
if __name__ == '__main__':
unittest.main()
|
mit
|
michalkurka/h2o-3
|
h2o-py/tests/testdir_algos/gam/pyunit_PUBDEV_7709_gam_error_messages.py
|
2
|
2064
|
from __future__ import division
from __future__ import print_function
from past.utils import old_div
import sys
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gam import H2OGeneralizedAdditiveEstimator
# In this test, we want to make sure the correct error message is returned when a gam column has two
# few values to be divided into bins and when the knot locations are not increasing in value.
def test_gam_knots_key():
# bad gam_column with not enough values is chosen to be gam_column
h2o_data = h2o.import_file(path=pyunit_utils.locate("smalldata/prostate/prostate_complete.csv.zip"))
h2o_data.head()
myY = "CAPSULE"
myX = ["ID","AGE","RACE","GLEASON","DCAPS","PSA","VOL","DPROS"]
h2o_data[myY] = h2o_data[myY].asfactor()
try:
h2o_model = H2OGeneralizedAdditiveEstimator(family='binomial', gam_columns=["GLEASON"])
h2o_model.train(x=myX, y=myY, training_frame=h2o_data)
assert False, "Should have throw exception due to bad gam_column choice"
except Exception as ex:
print(ex)
temp = str(ex)
assert "does have not enough values to generate well-defined knots" in temp, "wrong error message received."
# knots not chosen in ascending error and corresponding error message
knots1 = [-0.98143075, -1.99905699, 0.02599159, 1.00770987, 1.99942290]
frameKnots1 = h2o.H2OFrame(python_obj=knots1)
try:
h2o_model = H2OGeneralizedAdditiveEstimator(family='binomial', gam_columns=["GLEASON"],
knot_ids = [frameKnots1.key])
h2o_model.train(x=myX, y=myY, training_frame=h2o_data)
assert False, "Should have throw exception due to bad knot location choices"
except Exception as ex:
print(ex)
temp = str(ex)
assert "knots not sorted in ascending order for gam_column" in temp, "wrong error message received."
if __name__ == "__main__":
pyunit_utils.standalone_test(test_gam_knots_key)
else:
test_gam_knots_key()
|
apache-2.0
|
slongwang/selenium
|
py/test/selenium/webdriver/support/event_firing_webdriver_tests.py
|
65
|
8213
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
try:
from io import BytesIO
except ImportError:
from cStringIO import StringIO as BytesIO
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.events import EventFiringWebDriver, \
AbstractEventListener
class EventFiringWebDriverTests(unittest.TestCase):
def setup_method(self, method):
self.log = BytesIO()
def test_should_fire_navigation_events(self):
log = self.log
class TestListener(AbstractEventListener):
def before_navigate_to(self, url, driver):
log.write(("before_navigate_to %s" % url.split("/")[-1]).encode())
def after_navigate_to(self, url, driver):
log.write(("after_navigate_to %s" % url.split("/")[-1]).encode())
def before_navigate_back(self, driver):
log.write(b"before_navigate_back")
def after_navigate_back(self, driver):
log.write(b"after_navigate_back")
def before_navigate_forward(self, driver):
log.write(b"before_navigate_forward")
def after_navigate_forward(self, driver):
log.write(b"after_navigate_forward")
ef_driver = EventFiringWebDriver(self.driver, TestListener())
ef_driver.get(self._pageURL("formPage"))
ef_driver.find_element(by=By.ID, value="imageButton").submit()
self.assertEqual(ef_driver.title, "We Arrive Here")
ef_driver.back()
self.assertEqual(ef_driver.title, "We Leave From Here")
ef_driver.forward()
self.assertEqual(ef_driver.title, "We Arrive Here")
self.assertEqual(b"before_navigate_to formPage.html" \
+ b"after_navigate_to formPage.html" \
+ b"before_navigate_back" \
+ b"after_navigate_back" \
+ b"before_navigate_forward" \
+ b"after_navigate_forward", log.getvalue())
def test_should_fire_click_event(self):
log = self.log
class TestListener(AbstractEventListener):
def before_click(self, element, driver):
log.write(b"before_click")
def after_click(self, element, driver):
log.write(b"after_click")
ef_driver = EventFiringWebDriver(self.driver, TestListener())
ef_driver.get(self._pageURL("clicks"))
ef_driver.find_element(By.ID, "overflowLink").click()
self.assertEqual(ef_driver.title, "XHTML Test Page")
self.assertEqual(b"before_click" + b"after_click", log.getvalue())
def test_should_fire_change_value_event(self):
log = self.log
class TestListener(AbstractEventListener):
def before_change_value_of(self, element, driver):
log.write(b"before_change_value_of")
def after_change_value_of(self, element, driver):
log.write(b"after_change_value_of")
ef_driver = EventFiringWebDriver(self.driver, TestListener())
ef_driver.get(self._pageURL("readOnlyPage"))
element = ef_driver.find_element_by_id("writableTextInput")
element.clear()
self.assertEqual("", element.get_attribute("value"))
ef_driver.get(self._pageURL("javascriptPage"))
keyReporter = ef_driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("abc def")
self.assertEqual(keyReporter.get_attribute("value"), "abc def")
self.assertEqual(b"before_change_value_of" \
+ b"after_change_value_of" \
+ b"before_change_value_of" \
+ b"after_change_value_of", log.getvalue())
def test_should_fire_find_event(self):
log = self.log
class TestListener(AbstractEventListener):
def before_find(self, by, value, driver):
log.write(("before_find by %s %s" % (by, value)).encode())
def after_find(self, by, value, driver):
log.write(("after_find by %s %s" % (by, value)).encode())
ef_driver = EventFiringWebDriver(self.driver, TestListener())
ef_driver.get(self._pageURL("simpleTest"))
e = ef_driver.find_element_by_id("oneline")
self.assertEqual("A single line of text", e.text)
e = ef_driver.find_element_by_xpath("/html/body/p[1]")
self.assertEqual("A single line of text", e.text)
ef_driver.get(self._pageURL("frameset"))
elements = ef_driver.find_elements_by_css_selector("frame#sixth")
self.assertEqual(1, len(elements))
self.assertEqual("frame", elements[0].tag_name.lower())
self.assertEqual("sixth", elements[0].get_attribute("id"))
self.assertEqual(b"before_find by id oneline" \
+ b"after_find by id oneline" \
+ b"before_find by xpath /html/body/p[1]" \
+ b"after_find by xpath /html/body/p[1]" \
+ b"before_find by css selector frame#sixth" \
+ b"after_find by css selector frame#sixth" , log.getvalue())
def test_should_call_listener_when_an_exception_is_thrown(self):
log = self.log
class TestListener(AbstractEventListener):
def on_exception(self, exception, driver):
if isinstance(exception, NoSuchElementException):
log.write(b"NoSuchElementException is thrown")
ef_driver = EventFiringWebDriver(self.driver, TestListener())
ef_driver.get(self._pageURL("simpleTest"))
try:
ef_driver.find_element(By.ID, "foo")
self.fail("Expected exception to be propagated")
except NoSuchElementException:
pass
self.assertEqual(b"NoSuchElementException is thrown", log.getvalue())
def test_should_unwrap_element_args_when_calling_scripts(self):
ef_driver = EventFiringWebDriver(self.driver, AbstractEventListener())
ef_driver.get(self._pageURL("javascriptPage"))
button = ef_driver.find_element_by_id("plainButton")
value = ef_driver.execute_script(
"arguments[0]['flibble'] = arguments[0].getAttribute('id'); return arguments[0]['flibble']",
button)
self.assertEqual("plainButton", value)
def test_should_unwrap_element_args_when_switching_frames(self):
ef_driver = EventFiringWebDriver(self.driver, AbstractEventListener())
ef_driver.get(self._pageURL("iframes"))
frame = ef_driver.find_element_by_id("iframe1")
ef_driver.switch_to.frame(frame)
self.assertEqual("click me!", ef_driver.find_element_by_id("imageButton").get_attribute("alt"))
def test_should_be_able_to_access_wrapped_instance_from_event_calls(self):
driver = self.driver
class TestListener(AbstractEventListener):
def before_navigate_to(self, url, d):
assert driver is d
ef_driver = EventFiringWebDriver(driver, TestListener())
wrapped_driver = ef_driver.wrapped_driver
assert driver is wrapped_driver
ef_driver.get(self._pageURL("simpleTest"))
def teardown_method(self, method):
self.log.close()
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
|
apache-2.0
|
xavierwu/scikit-learn
|
sklearn/metrics/base.py
|
231
|
4378
|
"""
Common code for all metrics
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils import check_array, check_consistent_length
from ..utils.multiclass import type_of_target
class UndefinedMetricWarning(UserWarning):
pass
def _average_binary_score(binary_metric, y_true, y_score, average,
sample_weight=None):
"""Average a binary metric for multilabel classification
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
binary_metric : callable, returns shape [n_classes]
The binary metric function to use.
Returns
-------
score : float or array of shape [n_classes]
If not ``None``, average the score, else return the score for each
classes.
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options:
raise ValueError('average has to be one of {0}'
''.format(average_options))
y_type = type_of_target(y_true)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if y_type == "binary":
return binary_metric(y_true, y_score, sample_weight=sample_weight)
check_consistent_length(y_true, y_score, sample_weight)
y_true = check_array(y_true)
y_score = check_array(y_score)
not_average_axis = 1
score_weight = sample_weight
average_weight = None
if average == "micro":
if score_weight is not None:
score_weight = np.repeat(score_weight, y_true.shape[1])
y_true = y_true.ravel()
y_score = y_score.ravel()
elif average == 'weighted':
if score_weight is not None:
average_weight = np.sum(np.multiply(
y_true, np.reshape(score_weight, (-1, 1))), axis=0)
else:
average_weight = np.sum(y_true, axis=0)
if average_weight.sum() == 0:
return 0
elif average == 'samples':
# swap average_weight <-> score_weight
average_weight = score_weight
score_weight = None
not_average_axis = 0
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_score.ndim == 1:
y_score = y_score.reshape((-1, 1))
n_classes = y_score.shape[not_average_axis]
score = np.zeros((n_classes,))
for c in range(n_classes):
y_true_c = y_true.take([c], axis=not_average_axis).ravel()
y_score_c = y_score.take([c], axis=not_average_axis).ravel()
score[c] = binary_metric(y_true_c, y_score_c,
sample_weight=score_weight)
# Average the results
if average is not None:
return np.average(score, weights=average_weight)
else:
return score
|
bsd-3-clause
|
ammarkhann/FinalSeniorCode
|
lib/python2.7/site-packages/nose/ext/dtcompat.py
|
96
|
88063
|
# Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters ([email protected]).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
#
# Modified for inclusion in nose to provide support for DocFileTest in
# python 2.3:
#
# - all doctests removed from module (they fail under 2.3 and 2.5)
# - now handles the $py.class extension when ran under Jython
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
'is_private',
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re
import unittest, difflib, pdb, tempfile
import warnings
from StringIO import StringIO
# Don't whine about the deprecated is_private function in this
# module's tests.
warnings.filterwarnings("ignore", "is_private", DeprecationWarning,
__name__, 0)
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
# Create a new flag unless `name` is already known.
return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def is_private(prefix, base):
"""prefix, base -> true iff name prefix + "." + base is "private".
Prefix may be an empty string, and base does not contain a period.
Prefix is ignored (although functions you write conforming to this
protocol may make use of it).
Return true iff base begins with an (at least one) underscore, but
does not both begin and end with (at least) two underscores.
"""
warnings.warn("is_private is deprecated; it wasn't useful; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning, stacklevel=2)
return base[:1] == "_" and not base[:2] == "__" == base[-2:]
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, (str, unicode)):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning every
non-blank line in `s`, and return the result.
"""
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
pdb.Pdb.__init__(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError, 'Expected a module: %r' % module
if path.startswith('/'):
raise ValueError, 'Module-relative files may not have absolute paths'
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that preceed the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, basestring), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
# This lets us sort tests by name:
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp((self.name, self.filename, self.lineno, id(self)),
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, _namefilter=None, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
# _namefilter is undocumented, and exists only for temporary backward-
# compatibility support of testmod's deprecated isprivate mess.
self._namefilter = _namefilter
def find(self, obj, name=None, module=None, globs=None,
extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
# Recursively expore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
# Sort the tests by alpha order of names, for consistency in
# verbose-mode output. This was a feature of doctest in Pythons
# <= 2.3 that got lost by accident in 2.4. It was repaired in
# 2.4.4 and 2.5.
tests.sort()
return tests
def _filter(self, obj, prefix, base):
"""
Return true if the given object should not be examined.
"""
return (self._namefilter is not None and
self._namefilter(prefix, base))
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.isfunction(object):
return module.__dict__ is object.func_globals
elif inspect.isclass(object):
# Some jython classes don't set __module__
return module.__name__ == getattr(object, '__module__', None)
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print 'Finding tests in %s' % name
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, basestring):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, basestring)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).im_func
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, basestring):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, basestring):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
elif sys.platform.startswith('java') and \
filename.endswith('$py.class'):
filename = '%s.py' % filename[:-9]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.im_func
if inspect.isfunction(obj): obj = obj.func_code
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then supress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec compile(example.source, filename, "single",
compileflags, 1) in test.globs
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not quiet:
got += _exception_traceback(exc_info)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
m1 = re.match(r'[^:]*:', example.exc_msg)
m2 = re.match(r'[^:]*:', exc_msg)
if m1 and m2 and check(m1.group(0), m2.group(0),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exc_info)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return failures, tries
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>[\w\.]+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
return example.source.splitlines(True)
else:
return self.save_linecache_getlines(filename)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print len(notests), "items had no tests:"
notests.sort()
for thing in notests:
print " ", thing
if passed:
print len(passed), "items passed all tests:"
passed.sort()
for thing, count in passed:
print " %3d tests in %s" % (count, thing)
if failed:
print self.DIVIDER
print len(failed), "items had failures:"
failed.sort()
for thing, (f, t) in failed:
print " %3d of %3d in %s" % (f, t, thing)
if verbose:
print totalt, "tests in", len(self._name2ft), "items."
print totalt - totalf, "passed and", totalf, "failed."
if totalf:
print "***Test Failed***", totalf, "failures."
elif verbose:
print "Test passed."
return totalf, totalt
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
print "*** DocTestRunner.merge: '" + name + "' in both" \
" testers; summing outcomes."
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__. Unless isprivate is specified, private names
are not skipped.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See doctest.__doc__ for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Deprecated in Python 2.4:
Optional keyword arg "isprivate" specifies a function used to
determine whether a name is private. The default function is
treat all functions as public. Optionally, "isprivate" can be
set to doctest.is_private to skip over functions marked as private
using the underscore naming convention; see its docs for details.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if isprivate is not None:
warnings.warn("the isprivate argument is deprecated; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning)
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser()):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if module_relative:
package = _normalize_module(package)
filename = _module_relative_path(package, filename)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
# Read the file, convert it to a test, and run it.
s = open(filename).read()
test = parser.get_doctest(s, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None,
isprivate=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
DeprecationWarning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.isprivate = isprivate
self.optionflags = optionflags
self.testfinder = DocTestFinder(_namefilter=isprivate)
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print "Running string", name
(f,t) = self.testrunner.run(test)
if self.verbose:
print f, "of", t, "examples failed in string", name
return (f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return (f,t)
def rundict(self, d, name, module=None):
import new
m = new.module(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import new
m = new.module(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test)
self.tearDown()
def id(self):
return self._dt_test.name
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if globs is None:
globs = module.__dict__
if not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
raise ValueError(module, "has no tests")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
elif sys.platform.startswith('java') and \
filename.endswith('$py.class'):
filename = '%s.py' % filename[:-9]
test.filename = filename
suite.addTest(DocTestCase(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(), **options):
if globs is None:
globs = {}
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
if module_relative:
package = _normalize_module(package)
path = _module_relative_path(package, path)
# Find the file and read it.
name = os.path.basename(path)
doc = open(path).read()
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
# Add a courtesy newline to prevent exec from choking (see bug #1172785)
return '\n'.join(output) + '\n'
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
f = open(srcfilename, 'w')
f.write(src)
f.close()
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
execfile(srcfilename, globs, globs)
except:
print sys.exc_info()[1]
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
__test__ = {}
|
mit
|
UNR-AERIAL/scikit-learn
|
examples/linear_model/plot_iris_logistic.py
|
283
|
1678
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
saketkc/statsmodels
|
examples/incomplete/dates.py
|
29
|
1251
|
"""
Using dates with timeseries models
"""
import statsmodels.api as sm
import pandas as pd
# Getting started
# ---------------
data = sm.datasets.sunspots.load()
# Right now an annual date series must be datetimes at the end of the year.
dates = sm.tsa.datetools.dates_from_range('1700', length=len(data.endog))
# Using Pandas
# ------------
# Make a pandas TimeSeries or DataFrame
endog = pd.TimeSeries(data.endog, index=dates)
# and instantiate the model
ar_model = sm.tsa.AR(endog, freq='A')
pandas_ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
# Let's do some out-of-sample prediction
pred = pandas_ar_res.predict(start='2005', end='2015')
print(pred)
# Using explicit dates
# --------------------
ar_model = sm.tsa.AR(data.endog, dates=dates, freq='A')
ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
pred = ar_res.predict(start='2005', end='2015')
print(pred)
# This just returns a regular array, but since the model has date information
# attached, you can get the prediction dates in a roundabout way.
print(ar_res.data.predict_dates)
# This attribute only exists if predict has been called. It holds the dates
# associated with the last call to predict.
#..TODO: should this be attached to the results instance?
|
bsd-3-clause
|
joelbitar/rfinder
|
requests/compat.py
|
87
|
2567
|
# -*- coding: utf-8 -*-
"""
pythoncompat
"""
from .packages import charade as chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
#: Python 3.0.x
is_py30 = (is_py3 and _ver[1] == 0)
#: Python 3.1.x
is_py31 = (is_py3 and _ver[1] == 1)
#: Python 3.2.x
is_py32 = (is_py3 and _ver[1] == 2)
#: Python 3.3.x
is_py33 = (is_py3 and _ver[1] == 3)
#: Python 3.4.x
is_py34 = (is_py3 and _ver[1] == 4)
#: Python 2.7.x
is_py27 = (is_py2 and _ver[1] == 7)
#: Python 2.6.x
is_py26 = (is_py2 and _ver[1] == 6)
#: Python 2.5.x
is_py25 = (is_py2 and _ver[1] == 5)
#: Python 2.4.x
is_py24 = (is_py2 and _ver[1] == 4) # I'm assuming this is not by choice.
# ---------
# Platforms
# ---------
# Syntax sugar.
_ver = sys.version.lower()
is_pypy = ('pypy' in _ver)
is_jython = ('jython' in _ver)
is_ironpython = ('iron' in _ver)
# Assume CPython, if nothing else.
is_cpython = not any((is_pypy, is_jython, is_ironpython))
# Windows-based system.
is_windows = 'win32' in str(sys.platform).lower()
# Standard Linux 2+ system.
is_linux = ('linux' in str(sys.platform).lower())
is_osx = ('darwin' in str(sys.platform).lower())
is_hpux = ('hpux' in str(sys.platform).lower()) # Complete guess.
is_solaris = ('solar==' in str(sys.platform).lower()) # Complete guess.
try:
import simplejson as json
except ImportError:
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
from httplib import IncompleteRead
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
from http.client import IncompleteRead
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
|
lgpl-3.0
|
40223226/2015cd_midterm2
|
static/Brython3.1.1-20150328-091302/Lib/pydoc.py
|
637
|
102017
|
#!/usr/bin/env python3
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on the given port on the
local machine. Port number 0 can be used to get an arbitrary unused port.
Run "pydoc -b" to start an HTTP server on an arbitrary unused port and
open a Web browser to interactively browse documentation. The -p option
can be used with the -b option to explicitly specify the server port.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
http://docs.python.org/X.Y/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__all__ = ['help']
__author__ = "Ka-Ping Yee <[email protected]>"
__date__ = "26 February 2001"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import builtins
import imp
import importlib.machinery
#brython fix me
import inspect
import io
import os
#brython fix me
#import pkgutil
import platform
import re
import sys
import time
import tokenize
import warnings
from collections import deque
from reprlib import Repr
#fix me brython
#from traceback import extract_tb, format_exception_only
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', result.rstrip()) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = doc.strip().split('\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not lines[1].rstrip():
return lines[0], '\n'.join(lines[2:])
return '', '\n'.join(lines)
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = pairs[1].join(text.split(pairs[0]))
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
#fix me brython
#return _re_stripid.sub(r'\1', text)
return text
def _is_some_method(obj):
return (inspect.isfunction(obj) or
inspect.ismethod(obj) or
inspect.isbuiltin(obj) or
inspect.ismethoddescriptor(obj))
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None, obj=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant or internal.
if name in {'__author__', '__builtins__', '__cached__', '__credits__',
'__date__', '__doc__', '__file__', '__initializing__',
'__loader__', '__module__', '__name__', '__package__',
'__path__', '__qualname__', '__slots__', '__version__'}:
return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
# Namedtuples have public fields and methods with a single leading underscore
if name.startswith('_') and hasattr(obj, '_fields'):
return True
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
results = []
for (name, kind, cls, value) in inspect.classify_class_attrs(object):
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
results.append((name, kind, cls, value))
return results
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not line.strip():
line = file.readline()
if not line: break
line = line.strip()
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not line.strip():
line = file.readline()
if not line: break
result = line.split('"""')[0].strip()
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (None, None))
if lastupdate is None or lastupdate < mtime:
try:
file = tokenize.open(filename)
except IOError:
# module can't be opened, so skip it
return None
binary_suffixes = importlib.machinery.BYTECODE_SUFFIXES[:]
binary_suffixes += importlib.machinery.EXTENSION_SUFFIXES[:]
if any(filename.endswith(x) for x in binary_suffixes):
# binary modules have to be imported
file.close()
if any(filename.endswith(x) for x in
importlib.machinery.BYTECODE_SUFFIXES):
loader = importlib.machinery.SourcelessFileLoader('__temp__',
filename)
else:
loader = importlib.machinery.ExtensionFileLoader('__temp__',
filename)
try:
module = loader.load_module('__temp__')
except:
return None
result = (module.__doc__ or '').splitlines()[0]
del sys.modules['__temp__']
else:
# text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
self.filename = filename
self.exc, self.value, self.tb = exc_info
def __str__(self):
exc = self.exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
with open(path, 'rb') as file:
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.seek(0)
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Remove the module from sys.modules and re-import to try
# and avoid problems with partially loaded modules.
# Also remove any submodules because they won't appear
# in the newly loaded module's namespace if they're already
# in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
#fix me brython
#elif exc is ImportError and value.name == path:
elif exc is ImportError and str(value) == str(path):
# No such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in path.split('.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
PYTHONDOCS = os.environ.get("PYTHONDOCS",
"http://docs.python.org/%d.%d/library"
% sys.version_info[:2])
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError(message)
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS", self.PYTHONDOCS)
basedir = os.path.join(sys.base_exec_prefix, "lib",
"python%d.%d" % sys.version_info[:2])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'_thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__)
else:
docloc = os.path.join(docloc, object.__name__ + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + '_'.join(type(x).__name__.split())
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents)
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(text.expandtabs())
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)//cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100//cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, modpkginfo):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = modpkginfo
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def filelink(self, url, path):
"""Make a link to source file."""
return '<a href="file:%s">%s</a>' % (url, path)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + ', '.join(parents) + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = name.split('.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
('.'.join(parts[:i+1]), parts[i]))
linkedname = '.'.join(links + parts[-1:])
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = self.filelink(url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = version[11:-1].strip()
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % ', '.join(info)
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Reference</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda t: self.modulelink(t[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = [value for (key, value) in classes]
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', ' '.join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', ' '.join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', '<br>\n'.join(contents))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
print('docclass')
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value, name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if callable(value) or inspect.isdatadescriptor(value):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = [(name, kind, cls, value)
for name, kind, cls, value in classify_class_attrs(object)
if visiblename(name, obj=object)]
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
pass
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is builtins.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
attrs.sort(key=lambda t: t[0])
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % ', '.join(parents)
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.__self__.__class__
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.__self__ is not None:
note = ' method of %s instance' % self.classlink(
object.__self__.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.__func__
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, kwonlyargs, kwdefaults, varkw, defaults, ann = \
inspect.getfullargspec(object)
argspec = inspect.formatargspec(
args, varargs, kwonlyargs, kwdefaults, varkw, defaults, ann,
formatvalue=self.formatvalue,
formatannotation=inspect.formatannotationrelativeto(object))
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
# XXX lambda's won't usually have func_annotations['return']
# since the syntax doesn't support but it is possible.
# So removing parentheses isn't truly safe.
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
if any((0xD800 <= ord(ch) <= 0xDFFF) for ch in name):
# ignore a module if its name contains a surrogate character
continue
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
#def repr1(self, x, level):
# if hasattr(type(x), '__name__'):
# methodname = 'repr_' + '_'.join(type(x).__name__.split())
# if hasattr(self, methodname):
# return getattr(self, methodname)(x, level)
# return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return ''.join(ch + '\b' + ch for ch in text)
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = [prefix + line for line in text.split('\n')]
if lines: lines[-1] = lines[-1].rstrip()
return '\n'.join(lines)
def section(self, title, contents):
"""Format a section with a given heading."""
clean_contents = self.indent(contents).rstrip()
return self.bold(title) + '\n' + clean_contents + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = (classname(c, modname) for c in bases)
result = result + '(%s)' % ', '.join(parents)
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
all = getattr(object, '__all__', None)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE REFERENCE', docloc + """
The following documentation is automatically generated from the Python
source files. It may be incomplete, incorrect or include features that
are considered implementation detail and may vary between Python
implementations. When in doubt, consult the module reference at the
location listed above.
""")
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', '\n'.join(modpkgs))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', '\n'.join(submodules))
if classes:
classlist = [value for key, value in classes]
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', '\n'.join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', '\n'.join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', '\n'.join(contents))
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = version[11:-1].strip()
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', str(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', str(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', str(object.__credits__))
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % ', '.join(parents)
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value,
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if callable(value) or inspect.isdatadescriptor(value):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = [(name, kind, cls, value)
for name, kind, cls, value in classify_class_attrs(object)
if visiblename(name, obj=object)]
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is builtins.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(contents.rstrip(), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.__self__.__class__
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.__self__ is not None:
note = ' method of %s instance' % classname(
object.__self__.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.__func__
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults, kwonlyargs, kwdefaults, ann = \
inspect.getfullargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, kwonlyargs, kwdefaults, ann,
formatvalue=self.formatvalue,
formatannotation=inspect.formatannotationrelativeto(object))
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
# XXX lambda's won't usually have func_annotations['return']
# since the syntax doesn't support but it is possible.
# So removing parentheses isn't truly safe.
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and self.indent(doc).rstrip() + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
class _PlainTextDoc(TextDoc):
"""Subclass of TextDoc which overrides string styling"""
def bold(self, text):
return text
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if not hasattr(sys.stdout, "isatty"):
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(text)
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(text)
file.close()
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = plain(text).split('\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
r = inc = os.environ.get('LINES', 25) - 1
sys.stdout.write('\n'.join(lines[:inc]) + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + '\n'.join(lines[r:r+inc]) + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(plain(text))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in path.split('.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport('.'.join(parts[:n+1]), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
else:
object = builtins
for part in parts[n:]:
try:
object = getattr(object, part)
except AttributeError:
return None
return object
# --------------------------------------- interactive interpreter interface
text = TextDoc()
plaintext = _PlainTextDoc()
html = HTMLDoc()
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if not object:
raise ImportError('no Python documentation found for %r' % thing)
return object, thing
else:
name = getattr(thing, '__name__', None)
return thing, name if isinstance(name, str) else None
def render_doc(thing, title='Python Library Documentation: %s', forceload=0,
renderer=None):
"""Render text documentation, given an object or a path to an object."""
if renderer is None:
renderer = text
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + renderer.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0,
output=None):
"""Display text documentation, given an object or a path to an object."""
try:
if output is None:
pager(render_doc(thing, title, forceload))
else:
output.write(render_doc(thing, title, forceload, plaintext))
except (ImportError, ErrorDuringImport) as value:
print(value)
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w', encoding='utf-8')
file.write(page)
file.close()
print('wrote', name + '.html')
except (ImportError, ErrorDuringImport) as value:
print(value)
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/sphinxext/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'False': '',
'None': '',
'True': '',
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'nonlocal NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'nonlocal': ('nonlocal', 'global NAMESPACES'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS' : ("'", "'''", "r'", "b'", '"""', '"', 'r"', 'b"'),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.items():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING range LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS MAPPINGMETHODS '
'NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global nonlocal ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'CONVERSIONS': ('conversions', ''),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS NUMBERS TUPLELITERALS '
'LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
#fix me brython
self.input = self._input or sys.stdin
self.output = self._output or sys.stdout
#fix me brython
#input = property(lambda self: self._input or sys.stdin)
#output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = replace(request, '"', '', "'", '').strip()
if request.lower() in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using input() when appropriate."""
if self.input is sys.stdin:
return input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(request.split()[1])
elif request in self.symbols: self.showsymbol(request)
elif request in ['True', 'False', 'None']:
# special case these keywords since they are objects too
doc(eval(request), 'Help on %s:')
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:', output=self._output)
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:', output=self._output)
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the interactive help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/%s/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % tuple([sys.version[:3]]*2))
def list(self, items, columns=4, width=80):
items = list(sorted(items))
colw = width // columns
rows = (len(items) + columns - 1) // columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw - 1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(doc.strip() + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import formatter
buffer = io.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + ', '.join(xrefs.split()) + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def _gettopic(self, topic, more_xrefs=''):
"""Return unbuffered tuple of (topic, xrefs).
If an error occurs here, the exception is caught and displayed by
the url handler.
This function duplicates the showtopic method but returns its
result directly so it can be formatted for display in an html page.
"""
try:
import pydoc_data.topics
except ImportError:
return('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''' , '')
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
raise ValueError('could not find topic')
if isinstance(target, str):
return self._gettopic(target, more_xrefs)
label, xrefs = target
doc = pydoc_data.topics.topics[label]
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
return doc, xrefs
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if modname.find('.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper()
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = key.lower()
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
name = __import__(modname).__doc__ or ''
desc = name.split('\n')[0]
name = modname + ' - ' + desc
if name.lower().find(key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
try:
loader = importer.find_module(modname)
except SyntaxError:
# raised by tests for bad coding cookies or BOM
continue
if hasattr(loader, 'get_source'):
try:
source = loader.get_source(modname)
except Exception:
if onerror:
onerror(modname)
continue
desc = source_synopsis(io.StringIO(source)) or ''
if hasattr(loader, 'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
try:
module = loader.load_module(modname)
except ImportError:
if onerror:
onerror(modname)
continue
desc = (module.__doc__ or '').splitlines()[0]
path = getattr(module,'__file__',None)
name = modname + ' - ' + desc
if name.lower().find(key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print(modname, desc and '- ' + desc)
def onerror(modname):
pass
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key, onerror=onerror)
# --------------------------------------- enhanced Web browser interface
def _start_server(urlhandler, port):
"""Start an HTTP server thread on a specific port.
Start an HTML/text server thread, so HTML or text documents can be
browsed dynamically and interactively with a Web browser. Example use:
>>> import time
>>> import pydoc
Define a URL handler. To determine what the client is asking
for, check the URL and content_type.
Then get or generate some text or HTML code and return it.
>>> def my_url_handler(url, content_type):
... text = 'the URL sent was: (%s, %s)' % (url, content_type)
... return text
Start server thread on port 0.
If you use port 0, the server will pick a random port number.
You can then use serverthread.port to get the port number.
>>> port = 0
>>> serverthread = pydoc._start_server(my_url_handler, port)
Check that the server is really started. If it is, open browser
and get first page. Use serverthread.url as the starting page.
>>> if serverthread.serving:
... import webbrowser
The next two lines are commented out so a browser doesn't open if
doctest is run on this module.
#... webbrowser.open(serverthread.url)
#True
Let the server do its thing. We just need to monitor its status.
Use time.sleep so the loop doesn't hog the CPU.
>>> starttime = time.time()
>>> timeout = 1 #seconds
This is a short timeout for testing purposes.
>>> while serverthread.serving:
... time.sleep(.01)
... if serverthread.serving and time.time() - starttime > timeout:
... serverthread.stop()
... break
Print any errors that may have occurred.
>>> print(serverthread.error)
None
"""
import http.server
import email.message
import select
import threading
class DocHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
"""Process a request from an HTML browser.
The URL received is in self.path.
Get an HTML page from self.urlhandler and send it.
"""
if self.path.endswith('.css'):
content_type = 'text/css'
else:
content_type = 'text/html'
self.send_response(200)
self.send_header('Content-Type', '%s; charset=UTF-8' % content_type)
self.end_headers()
self.wfile.write(self.urlhandler(
self.path, content_type).encode('utf-8'))
def log_message(self, *args):
# Don't log messages.
pass
class DocServer(http.server.HTTPServer):
def __init__(self, port, callback):
self.host = (sys.platform == 'mac') and '127.0.0.1' or 'localhost'
self.address = ('', port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
self.quit = False
def serve_until_quit(self):
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd:
self.handle_request()
self.server_close()
def server_activate(self):
self.base.server_activate(self)
if self.callback:
self.callback(self)
class ServerThread(threading.Thread):
def __init__(self, urlhandler, port):
self.urlhandler = urlhandler
self.port = int(port)
threading.Thread.__init__(self)
self.serving = False
self.error = None
def run(self):
"""Start the server."""
try:
DocServer.base = http.server.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = email.message.Message
DocHandler.urlhandler = staticmethod(self.urlhandler)
docsvr = DocServer(self.port, self.ready)
self.docserver = docsvr
docsvr.serve_until_quit()
except Exception as e:
self.error = e
def ready(self, server):
self.serving = True
self.host = server.host
self.port = server.server_port
self.url = 'http://%s:%d/' % (self.host, self.port)
def stop(self):
"""Stop the server and this thread nicely"""
self.docserver.quit = True
self.serving = False
self.url = None
thread = ServerThread(urlhandler, port)
thread.start()
# Wait until thread.serving is True to make sure we are
# really up before returning.
while not thread.error and not thread.serving:
time.sleep(.01)
return thread
def _url_handler(url, content_type="text/html"):
"""The pydoc url handler for use with the pydoc server.
If the content_type is 'text/css', the _pydoc.css style
sheet is read and returned if it exits.
If the content_type is 'text/html', then the result of
get_html_page(url) is returned.
"""
class _HTMLDoc(HTMLDoc):
def page(self, title, contents):
"""Format an HTML page."""
css_path = "pydoc_data/_pydoc.css"
css_link = (
'<link rel="stylesheet" type="text/css" href="%s">' %
css_path)
return '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Pydoc: %s</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
%s</head><body bgcolor="#f0f0f8">%s<div style="clear:both;padding-top:.5em;">%s</div>
</body></html>''' % (title, css_link, html_navbar(), contents)
def filelink(self, url, path):
return '<a href="getfile?key=%s">%s</a>' % (url, path)
html = _HTMLDoc()
def html_navbar():
version = html.escape("%s [%s, %s]" % (platform.python_version(),
platform.python_build()[0],
platform.python_compiler()))
return """
<div style='float:left'>
Python %s<br>%s
</div>
<div style='float:right'>
<div style='text-align:center'>
<a href="index.html">Module Index</a>
: <a href="topics.html">Topics</a>
: <a href="keywords.html">Keywords</a>
</div>
<div>
<form action="get" style='display:inline;'>
<input type=text name=key size=15>
<input type=submit value="Get">
</form>
<form action="search" style='display:inline;'>
<input type=text name=key size=15>
<input type=submit value="Search">
</form>
</div>
</div>
""" % (version, html.escape(platform.platform(terse=True)))
def html_index():
"""Module Index page."""
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
heading = html.heading(
'<big><big><strong>Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
names = [name for name in sys.builtin_module_names
if name != '__main__']
contents = html.multicolumn(names, bltinlink)
contents = [heading, '<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
contents.append(html.index(dir, seen))
contents.append(
'<p align=right><font color="#909090" face="helvetica,'
'arial"><strong>pydoc</strong> by Ka-Ping Yee'
'<[email protected]></font>')
return 'Index of Modules', ''.join(contents)
def html_search(key):
"""Search results page."""
# scan for modules
search_result = []
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
search_result.append((modname, desc and '- ' + desc))
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key)
# format page
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
results = []
heading = html.heading(
'<big><big><strong>Search Results</strong></big></big>',
'#ffffff', '#7799ee')
for name, desc in search_result:
results.append(bltinlink(name) + desc)
contents = heading + html.bigsection(
'key = %s' % key, '#ffffff', '#ee77aa', '<br>'.join(results))
return 'Search Results', contents
def html_getfile(path):
"""Get and display a source file listing safely."""
path = path.replace('%20', ' ')
with tokenize.open(path) as fp:
lines = html.escape(fp.read())
body = '<pre>%s</pre>' % lines
heading = html.heading(
'<big><big><strong>File Listing</strong></big></big>',
'#ffffff', '#7799ee')
contents = heading + html.bigsection(
'File: %s' % path, '#ffffff', '#ee77aa', body)
return 'getfile %s' % path, contents
def html_topics():
"""Index of topic texts available."""
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
heading = html.heading(
'<big><big><strong>INDEX</strong></big></big>',
'#ffffff', '#7799ee')
names = sorted(Helper.topics.keys())
contents = html.multicolumn(names, bltinlink)
contents = heading + html.bigsection(
'Topics', '#ffffff', '#ee77aa', contents)
return 'Topics', contents
def html_keywords():
"""Index of keywords."""
heading = html.heading(
'<big><big><strong>INDEX</strong></big></big>',
'#ffffff', '#7799ee')
names = sorted(Helper.keywords.keys())
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
contents = html.multicolumn(names, bltinlink)
contents = heading + html.bigsection(
'Keywords', '#ffffff', '#ee77aa', contents)
return 'Keywords', contents
def html_topicpage(topic):
"""Topic or keyword help page."""
buf = io.StringIO()
htmlhelp = Helper(buf, buf)
contents, xrefs = htmlhelp._gettopic(topic)
if topic in htmlhelp.keywords:
title = 'KEYWORD'
else:
title = 'TOPIC'
heading = html.heading(
'<big><big><strong>%s</strong></big></big>' % title,
'#ffffff', '#7799ee')
contents = '<pre>%s</pre>' % html.markup(contents)
contents = html.bigsection(topic , '#ffffff','#ee77aa', contents)
if xrefs:
xrefs = sorted(xrefs.split())
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
xrefs = html.multicolumn(xrefs, bltinlink)
xrefs = html.section('Related help topics: ',
'#ffffff', '#ee77aa', xrefs)
return ('%s %s' % (title, topic),
''.join((heading, contents, xrefs)))
def html_getobj(url):
obj = locate(url, forceload=1)
if obj is None and url != 'None':
raise ValueError('could not find object')
title = describe(obj)
content = html.document(obj, url)
return title, content
def html_error(url, exc):
heading = html.heading(
'<big><big><strong>Error</strong></big></big>',
'#ffffff', '#7799ee')
contents = '<br>'.join(html.escape(line) for line in
format_exception_only(type(exc), exc))
contents = heading + html.bigsection(url, '#ffffff', '#bb0000',
contents)
return "Error - %s" % url, contents
def get_html_page(url):
"""Generate an HTML page for url."""
complete_url = url
if url.endswith('.html'):
url = url[:-5]
try:
if url in ("", "index"):
title, content = html_index()
elif url == "topics":
title, content = html_topics()
elif url == "keywords":
title, content = html_keywords()
elif '=' in url:
op, _, url = url.partition('=')
if op == "search?key":
title, content = html_search(url)
elif op == "getfile?key":
title, content = html_getfile(url)
elif op == "topic?key":
# try topics first, then objects.
try:
title, content = html_topicpage(url)
except ValueError:
title, content = html_getobj(url)
elif op == "get?key":
# try objects first, then topics.
if url in ("", "index"):
title, content = html_index()
else:
try:
title, content = html_getobj(url)
except ValueError:
title, content = html_topicpage(url)
else:
raise ValueError('bad pydoc url')
else:
title, content = html_getobj(url)
except Exception as exc:
# Catch any errors and display them in an error page.
title, content = html_error(complete_url, exc)
return html.page(title, content)
if url.startswith('/'):
url = url[1:]
if content_type == 'text/css':
path_here = os.path.dirname(os.path.realpath(__file__))
css_path = os.path.join(path_here, url)
with open(css_path) as fp:
return ''.join(fp.readlines())
elif content_type == 'text/html':
return get_html_page(url)
# Errors outside the url handler are caught by the server.
raise TypeError('unknown content type %r for url %s' % (content_type, url))
def browse(port=0, *, open_browser=True):
"""Start the enhanced pydoc Web server and open a Web browser.
Use port '0' to start the server on an arbitrary port.
Set open_browser to False to suppress opening a browser.
"""
import webbrowser
serverthread = _start_server(_url_handler, port)
if serverthread.error:
print(serverthread.error)
return
if serverthread.serving:
server_help_msg = 'Server commands: [b]rowser, [q]uit'
if open_browser:
webbrowser.open(serverthread.url)
try:
print('Server ready at', serverthread.url)
print(server_help_msg)
while serverthread.serving:
cmd = input('server> ')
cmd = cmd.lower()
if cmd == 'q':
break
elif cmd == 'b':
webbrowser.open(serverthread.url)
else:
print(server_help_msg)
except (KeyboardInterrupt, EOFError):
print()
finally:
if serverthread.serving:
serverthread.stop()
print('Server stopped')
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and x.find(os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage(Exception): pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'bk:p:w')
writing = False
start_server = False
open_browser = False
port = None
for opt, val in opts:
if opt == '-b':
start_server = True
open_browser = True
if opt == '-k':
apropos(val)
return
if opt == '-p':
start_server = True
port = val
if opt == '-w':
writing = True
if start_server:
if port is None:
port = 0
browse(port, open_browser=open_browser)
return
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print('file %r does not exist' % arg)
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport as value:
print(value)
except (getopt.error, BadUsage):
cmd = os.path.splitext(os.path.basename(sys.argv[0]))[0]
print("""pydoc - the Python documentation tool
{cmd} <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '{sep}', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
{cmd} -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
{cmd} -p <port>
Start an HTTP server on the given port on the local machine. Port
number 0 can be used to get an arbitrary unused port.
{cmd} -b
Start an HTTP server on an arbitrary unused port and open a Web browser
to interactively browse documentation. The -p option can be used with
the -b option to explicitly specify the server port.
{cmd} -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '{sep}', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""".format(cmd=cmd, sep=os.sep))
if __name__ == '__main__':
cli()
|
gpl-3.0
|
indrajitr/ansible
|
lib/ansible/parsing/utils/addresses.py
|
241
|
8167
|
# Copyright 2015 Abhijit Menon-Sen <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.errors import AnsibleParserError, AnsibleError
# Components that match a numeric or alphanumeric begin:end or begin:end:step
# range expression inside square brackets.
numeric_range = r'''
\[
(?:[0-9]+:[0-9]+) # numeric begin:end
(?::[0-9]+)? # numeric :step (optional)
\]
'''
hexadecimal_range = r'''
\[
(?:[0-9a-f]+:[0-9a-f]+) # hexadecimal begin:end
(?::[0-9]+)? # numeric :step (optional)
\]
'''
alphanumeric_range = r'''
\[
(?:
[a-z]:[a-z]| # one-char alphabetic range
[0-9]+:[0-9]+ # ...or a numeric one
)
(?::[0-9]+)? # numeric :step (optional)
\]
'''
# Components that match a 16-bit portion of an IPv6 address in hexadecimal
# notation (0..ffff) or an 8-bit portion of an IPv4 address in decimal notation
# (0..255) or an [x:y(:z)] numeric range.
ipv6_component = r'''
(?:
[0-9a-f]{{1,4}}| # 0..ffff
{range} # or a numeric range
)
'''.format(range=hexadecimal_range)
ipv4_component = r'''
(?:
[01]?[0-9]{{1,2}}| # 0..199
2[0-4][0-9]| # 200..249
25[0-5]| # 250..255
{range} # or a numeric range
)
'''.format(range=numeric_range)
# A hostname label, e.g. 'foo' in 'foo.example.com'. Consists of alphanumeric
# characters plus dashes (and underscores) or valid ranges. The label may not
# start or end with a hyphen or an underscore. This is interpolated into the
# hostname pattern below. We don't try to enforce the 63-char length limit.
label = r'''
(?:[\w]|{range}) # Starts with an alphanumeric or a range
(?:[\w_-]|{range})* # Then zero or more of the same or [_-]
(?<![_-]) # ...as long as it didn't end with [_-]
'''.format(range=alphanumeric_range)
patterns = {
# This matches a square-bracketed expression with a port specification. What
# is inside the square brackets is validated later.
'bracketed_hostport': re.compile(
r'''^
\[(.+)\] # [host identifier]
:([0-9]+) # :port number
$
''', re.X
),
# This matches a bare IPv4 address or hostname (or host pattern including
# [x:y(:z)] ranges) with a port specification.
'hostport': re.compile(
r'''^
((?: # We want to match:
[^:\[\]] # (a non-range character
| # ...or...
\[[^\]]*\] # a complete bracketed expression)
)*) # repeated as many times as possible
:([0-9]+) # followed by a port number
$
''', re.X
),
# This matches an IPv4 address, but also permits range expressions.
'ipv4': re.compile(
r'''^
(?:{i4}\.){{3}}{i4} # Three parts followed by dots plus one
$
'''.format(i4=ipv4_component), re.X | re.I
),
# This matches an IPv6 address, but also permits range expressions.
#
# This expression looks complex, but it really only spells out the various
# combinations in which the basic unit of an IPv6 address (0..ffff) can be
# written, from :: to 1:2:3:4:5:6:7:8, plus the IPv4-in-IPv6 variants such
# as ::ffff:192.0.2.3.
#
# Note that we can't just use ipaddress.ip_address() because we also have to
# accept ranges in place of each component.
'ipv6': re.compile(
r'''^
(?:{0}:){{7}}{0}| # uncompressed: 1:2:3:4:5:6:7:8
(?:{0}:){{1,6}}:| # compressed variants, which are all
(?:{0}:)(?::{0}){{1,6}}| # a::b for various lengths of a,b
(?:{0}:){{2}}(?::{0}){{1,5}}|
(?:{0}:){{3}}(?::{0}){{1,4}}|
(?:{0}:){{4}}(?::{0}){{1,3}}|
(?:{0}:){{5}}(?::{0}){{1,2}}|
(?:{0}:){{6}}(?::{0})| # ...all with 2 <= a+b <= 7
:(?::{0}){{1,6}}| # ::ffff(:ffff...)
{0}?::| # ffff::, ::
# ipv4-in-ipv6 variants
(?:0:){{6}}(?:{0}\.){{3}}{0}|
::(?:ffff:)?(?:{0}\.){{3}}{0}|
(?:0:){{5}}ffff:(?:{0}\.){{3}}{0}
$
'''.format(ipv6_component), re.X | re.I
),
# This matches a hostname or host pattern including [x:y(:z)] ranges.
#
# We roughly follow DNS rules here, but also allow ranges (and underscores).
# In the past, no systematic rules were enforced about inventory hostnames,
# but the parsing context (e.g. shlex.split(), fnmatch.fnmatch()) excluded
# various metacharacters anyway.
#
# We don't enforce DNS length restrictions here (63 characters per label,
# 253 characters total) or make any attempt to process IDNs.
'hostname': re.compile(
r'''^
{label} # We must have at least one label
(?:\.{label})* # Followed by zero or more .labels
$
'''.format(label=label), re.X | re.I | re.UNICODE
),
}
def parse_address(address, allow_ranges=False):
"""
Takes a string and returns a (host, port) tuple. If the host is None, then
the string could not be parsed as a host identifier with an optional port
specification. If the port is None, then no port was specified.
The host identifier may be a hostname (qualified or not), an IPv4 address,
or an IPv6 address. If allow_ranges is True, then any of those may contain
[x:y] range specifications, e.g. foo[1:3] or foo[0:5]-bar[x-z].
The port number is an optional :NN suffix on an IPv4 address or host name,
or a mandatory :NN suffix on any square-bracketed expression: IPv6 address,
IPv4 address, or host name. (This means the only way to specify a port for
an IPv6 address is to enclose it in square brackets.)
"""
# First, we extract the port number if one is specified.
port = None
for matching in ['bracketed_hostport', 'hostport']:
m = patterns[matching].match(address)
if m:
(address, port) = m.groups()
port = int(port)
continue
# What we're left with now must be an IPv4 or IPv6 address, possibly with
# numeric ranges, or a hostname with alphanumeric ranges.
host = None
for matching in ['ipv4', 'ipv6', 'hostname']:
m = patterns[matching].match(address)
if m:
host = address
continue
# If it isn't any of the above, we don't understand it.
if not host:
raise AnsibleError("Not a valid network hostname: %s" % address)
# If we get to this point, we know that any included ranges are valid.
# If the caller is prepared to handle them, all is well.
# Otherwise we treat it as a parse failure.
if not allow_ranges and '[' in host:
raise AnsibleParserError("Detected range in host but was asked to ignore ranges")
return (host, port)
|
gpl-3.0
|
YoungKwonJo/mlxtend
|
tests/tests_evaluate/test_learning_curves.py
|
1
|
2212
|
from mlxtend.evaluate import plot_learning_curves
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.tree import DecisionTreeClassifier
import numpy as np
def test_training_size():
iris = datasets.load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.6, random_state=2)
clf = DecisionTreeClassifier(max_depth=1, random_state=1)
training_errors, test_errors = plot_learning_curves(X_train, y_train, X_test, y_test, clf, kind='training_size', suppress_plot=True)
desired1 = [0.32, 0.33, 0.32, 0.33, 0.30, 0.31, 0.31, 0.22, 0.22, 0.22]
desired2 = [0.35, 0.35, 0.35, 0.35, 0.43, 0.45, 0.35, 0.35, 0.45, 0.45]
np.testing.assert_almost_equal(training_errors, desired1, decimal=2)
np.testing.assert_almost_equal(test_errors, desired2, decimal=2)
def test_scikit_metrics():
iris = datasets.load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.6, random_state=2)
clf = DecisionTreeClassifier(max_depth=1, random_state=1)
training_errors, test_errors = plot_learning_curves(X_train, y_train, X_test, y_test, clf, kind='training_size', suppress_plot=True, scoring='accuracy')
desired1 = [0.68, 0.67, 0.68, 0.67, 0.7, 0.69, 0.69, 0.78, 0.78, 0.78]
desired2 = [0.65, 0.65, 0.65, 0.65, 0.57, 0.55, 0.65, 0.65, 0.55, 0.55]
np.testing.assert_almost_equal(training_errors, desired1, decimal=2)
np.testing.assert_almost_equal(test_errors, desired2, decimal=2)
def test_n_features():
iris = datasets.load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.6, random_state=2)
clf = DecisionTreeClassifier(max_depth=1, random_state=1)
training_errors, test_errors = plot_learning_curves(X_train, y_train, X_test, y_test, clf, kind='n_features', suppress_plot=True)
desired1 = [0.40, 0.40, 0.32, 0.32]
desired2 = [0.42, 0.42, 0.35, 0.35]
np.testing.assert_almost_equal(training_errors, desired1, decimal=2)
np.testing.assert_almost_equal(test_errors, desired2, decimal=2)
|
bsd-3-clause
|
ojake/django
|
tests/admin_views/admin.py
|
98
|
30354
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import tempfile
from wsgiref.util import FileWrapper
from django import forms
from django.conf.urls import url
from django.contrib import admin
from django.contrib.admin import BooleanFieldListFilter
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.admin import GroupAdmin, UserAdmin
from django.contrib.auth.models import Group, User
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
from django.core.mail import EmailMessage
from django.db import models
from django.forms.models import BaseModelFormSet
from django.http import HttpResponse, StreamingHttpResponse
from django.utils.safestring import mark_safe
from django.utils.six import StringIO
from .models import (
Actor, AdminOrderedAdminMethod, AdminOrderedCallable, AdminOrderedField,
AdminOrderedModelMethod, Album, Answer, Article, BarAccount, Book,
Category, Chapter, ChapterXtra1, Child, ChildOfReferer, Choice, City,
Collector, Color, Color2, ComplexSortedPerson, CoverLetter, CustomArticle,
CyclicOne, CyclicTwo, DependentChild, DooHickey, EmptyModel,
EmptyModelHidden, EmptyModelMixin, EmptyModelVisible, ExplicitlyProvidedPK,
ExternalSubscriber, Fabric, FancyDoodad, FieldOverridePost,
FilteredManager, FooAccount, FoodDelivery, FunkyTag, Gadget, Gallery,
Grommet, ImplicitlyGeneratedPK, Ingredient, InlineReference, InlineReferer,
Inquisition, Language, Link, MainPrepopulated, ModelWithStringPrimaryKey,
NotReferenced, OldSubscriber, OtherStory, Paper, Parent,
ParentWithDependentChildren, Person, Persona, Picture, Pizza, Plot,
PlotDetails, PluggableSearchPerson, Podcast, Post, PrePopulatedPost,
PrePopulatedPostLargeSlug, PrePopulatedSubPost, Promo, Question, Recipe,
Recommendation, Recommender, ReferencedByInline, ReferencedByParent,
RelatedPrepopulated, Report, Reservation, Restaurant,
RowLevelChangePermissionModel, Section, ShortMessage, Simple, Sketch,
State, Story, StumpJoke, Subscriber, SuperVillain, Telegram, Thing,
Topping, UnchangeableObject, UndeletableObject, UnorderedObject,
UserMessenger, Villain, Vodcast, Whatsit, Widget, Worker, WorkHour,
)
def callable_year(dt_value):
try:
return dt_value.year
except AttributeError:
return None
callable_year.admin_order_field = 'date'
class ArticleInline(admin.TabularInline):
model = Article
fk_name = 'section'
prepopulated_fields = {
'title': ('content',)
}
fieldsets = (
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content')
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section')
})
)
class ChapterInline(admin.TabularInline):
model = Chapter
class ChapterXtra1Admin(admin.ModelAdmin):
list_filter = ('chap',
'chap__title',
'chap__book',
'chap__book__name',
'chap__book__promo',
'chap__book__promo__name',)
class ArticleAdmin(admin.ModelAdmin):
list_display = ('content', 'date', callable_year, 'model_year',
'modeladmin_year', 'model_year_reversed')
list_filter = ('date', 'section')
view_on_site = False
fieldsets = (
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content')
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section', 'sub_section')
})
)
def changelist_view(self, request):
"Test that extra_context works"
return super(ArticleAdmin, self).changelist_view(
request, extra_context={
'extra_var': 'Hello!'
}
)
def modeladmin_year(self, obj):
return obj.date.year
modeladmin_year.admin_order_field = 'date'
modeladmin_year.short_description = None
def delete_model(self, request, obj):
EmailMessage(
'Greetings from a deleted object',
'I hereby inform you that some user deleted me',
'[email protected]',
['[email protected]']
).send()
return super(ArticleAdmin, self).delete_model(request, obj)
def save_model(self, request, obj, form, change=True):
EmailMessage(
'Greetings from a created object',
'I hereby inform you that some user created me',
'[email protected]',
['[email protected]']
).send()
return super(ArticleAdmin, self).save_model(request, obj, form, change)
class ArticleAdmin2(admin.ModelAdmin):
def has_module_permission(self, request):
return False
class RowLevelChangePermissionModelAdmin(admin.ModelAdmin):
def has_change_permission(self, request, obj=None):
""" Only allow changing objects with even id number """
return request.user.is_staff and (obj is not None) and (obj.id % 2 == 0)
class CustomArticleAdmin(admin.ModelAdmin):
"""
Tests various hooks for using custom templates and contexts.
"""
change_list_template = 'custom_admin/change_list.html'
change_form_template = 'custom_admin/change_form.html'
add_form_template = 'custom_admin/add_form.html'
object_history_template = 'custom_admin/object_history.html'
delete_confirmation_template = 'custom_admin/delete_confirmation.html'
delete_selected_confirmation_template = 'custom_admin/delete_selected_confirmation.html'
def changelist_view(self, request):
"Test that extra_context works"
return super(CustomArticleAdmin, self).changelist_view(
request, extra_context={
'extra_var': 'Hello!'
}
)
class ThingAdmin(admin.ModelAdmin):
list_filter = ('color__warm', 'color__value', 'pub_date',)
class InquisitionAdmin(admin.ModelAdmin):
list_display = ('leader', 'country', 'expected')
class SketchAdmin(admin.ModelAdmin):
raw_id_fields = ('inquisition', 'defendant0', 'defendant1')
class FabricAdmin(admin.ModelAdmin):
list_display = ('surface',)
list_filter = ('surface',)
class BasePersonModelFormSet(BaseModelFormSet):
def clean(self):
for person_dict in self.cleaned_data:
person = person_dict.get('id')
alive = person_dict.get('alive')
if person and alive and person.name == "Grace Hopper":
raise forms.ValidationError("Grace is not a Zombie")
class PersonAdmin(admin.ModelAdmin):
list_display = ('name', 'gender', 'alive')
list_editable = ('gender', 'alive')
list_filter = ('gender',)
search_fields = ('^name',)
save_as = True
def get_changelist_formset(self, request, **kwargs):
return super(PersonAdmin, self).get_changelist_formset(request,
formset=BasePersonModelFormSet, **kwargs)
def get_queryset(self, request):
# Order by a field that isn't in list display, to be able to test
# whether ordering is preserved.
return super(PersonAdmin, self).get_queryset(request).order_by('age')
class FooAccountAdmin(admin.StackedInline):
model = FooAccount
extra = 1
class BarAccountAdmin(admin.StackedInline):
model = BarAccount
extra = 1
class PersonaAdmin(admin.ModelAdmin):
inlines = (
FooAccountAdmin,
BarAccountAdmin
)
class SubscriberAdmin(admin.ModelAdmin):
actions = ['mail_admin']
def mail_admin(self, request, selected):
EmailMessage(
'Greetings from a ModelAdmin action',
'This is the test email from an admin action',
'[email protected]',
['[email protected]']
).send()
def external_mail(modeladmin, request, selected):
EmailMessage(
'Greetings from a function action',
'This is the test email from a function action',
'[email protected]',
['[email protected]']
).send()
external_mail.short_description = 'External mail (Another awesome action)'
def redirect_to(modeladmin, request, selected):
from django.http import HttpResponseRedirect
return HttpResponseRedirect('/some-where-else/')
redirect_to.short_description = 'Redirect to (Awesome action)'
def download(modeladmin, request, selected):
buf = StringIO('This is the content of the file')
return StreamingHttpResponse(FileWrapper(buf))
download.short_description = 'Download subscription'
def no_perm(modeladmin, request, selected):
return HttpResponse(content='No permission to perform this action',
status=403)
no_perm.short_description = 'No permission to run'
class ExternalSubscriberAdmin(admin.ModelAdmin):
actions = [redirect_to, external_mail, download, no_perm]
class PodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'release_date')
list_editable = ('release_date',)
date_hierarchy = 'release_date'
ordering = ('name',)
class VodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'released')
list_editable = ('released',)
ordering = ('name',)
class ChildInline(admin.StackedInline):
model = Child
class ParentAdmin(admin.ModelAdmin):
model = Parent
inlines = [ChildInline]
save_as = True
list_editable = ('name',)
def save_related(self, request, form, formsets, change):
super(ParentAdmin, self).save_related(request, form, formsets, change)
first_name, last_name = form.instance.name.split()
for child in form.instance.child_set.all():
if len(child.name.split()) < 2:
child.name = child.name + ' ' + last_name
child.save()
class EmptyModelAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return super(EmptyModelAdmin, self).get_queryset(request).filter(pk__gt=1)
class OldSubscriberAdmin(admin.ModelAdmin):
actions = None
temp_storage = FileSystemStorage(tempfile.mkdtemp())
UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload')
class PictureInline(admin.TabularInline):
model = Picture
extra = 1
class GalleryAdmin(admin.ModelAdmin):
inlines = [PictureInline]
class PictureAdmin(admin.ModelAdmin):
pass
class LanguageAdmin(admin.ModelAdmin):
list_display = ['iso', 'shortlist', 'english_name', 'name']
list_editable = ['shortlist']
class RecommendationAdmin(admin.ModelAdmin):
show_full_result_count = False
search_fields = ('=titletranslation__text', '=recommender__titletranslation__text',)
class WidgetInline(admin.StackedInline):
model = Widget
class DooHickeyInline(admin.StackedInline):
model = DooHickey
class GrommetInline(admin.StackedInline):
model = Grommet
class WhatsitInline(admin.StackedInline):
model = Whatsit
class FancyDoodadInline(admin.StackedInline):
model = FancyDoodad
class CategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'collector', 'order')
list_editable = ('order',)
class CategoryInline(admin.StackedInline):
model = Category
class CollectorAdmin(admin.ModelAdmin):
inlines = [
WidgetInline, DooHickeyInline, GrommetInline, WhatsitInline,
FancyDoodadInline, CategoryInline
]
class LinkInline(admin.TabularInline):
model = Link
extra = 1
readonly_fields = ("posted", "multiline")
def multiline(self, instance):
return "InlineMultiline\ntest\nstring"
class SubPostInline(admin.TabularInline):
model = PrePopulatedSubPost
prepopulated_fields = {
'subslug': ('subtitle',)
}
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ('subslug',)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PrePopulatedPostAdmin(admin.ModelAdmin):
list_display = ['title', 'slug']
prepopulated_fields = {
'slug': ('title',)
}
inlines = [SubPostInline]
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ('slug',)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PostAdmin(admin.ModelAdmin):
list_display = ['title', 'public']
readonly_fields = (
'posted', 'awesomeness_level', 'coolness', 'value',
'multiline', 'multiline_html', lambda obj: "foo"
)
inlines = [
LinkInline
]
def coolness(self, instance):
if instance.pk:
return "%d amount of cool." % instance.pk
else:
return "Unknown coolness."
def value(self, instance):
return 1000
def multiline(self, instance):
return "Multiline\ntest\nstring"
def multiline_html(self, instance):
return mark_safe("Multiline<br>\nhtml<br>\ncontent")
multiline_html.allow_tags = True
value.short_description = 'Value in $US'
class FieldOverridePostForm(forms.ModelForm):
model = FieldOverridePost
class Meta:
help_texts = {
'posted': 'Overridden help text for the date',
}
labels = {
'public': 'Overridden public label',
}
class FieldOverridePostAdmin(PostAdmin):
form = FieldOverridePostForm
class CustomChangeList(ChangeList):
def get_queryset(self, request):
return self.root_queryset.filter(pk=9999) # Does not exist
class GadgetAdmin(admin.ModelAdmin):
def get_changelist(self, request, **kwargs):
return CustomChangeList
class ToppingAdmin(admin.ModelAdmin):
readonly_fields = ('pizzas',)
class PizzaAdmin(admin.ModelAdmin):
readonly_fields = ('toppings',)
class WorkHourAdmin(admin.ModelAdmin):
list_display = ('datum', 'employee')
list_filter = ('employee',)
class FoodDeliveryAdmin(admin.ModelAdmin):
list_display = ('reference', 'driver', 'restaurant')
list_editable = ('driver', 'restaurant')
class CoverLetterAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses defer(), to test
verbose_name display in messages shown after adding/editing CoverLetter
instances.
Note that the CoverLetter model defines a __unicode__ method.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(CoverLetterAdmin, self).get_queryset(request).defer('date_written')
class PaperAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses only(), to test
verbose_name display in messages shown after adding/editing Paper
instances.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(PaperAdmin, self).get_queryset(request).only('title')
class ShortMessageAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses defer(), to test
verbose_name display in messages shown after adding/editing ShortMessage
instances.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(ShortMessageAdmin, self).get_queryset(request).defer('timestamp')
class TelegramAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses only(), to test
verbose_name display in messages shown after adding/editing Telegram
instances.
Note that the Telegram model defines a __unicode__ method.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(TelegramAdmin, self).get_queryset(request).only('title')
class StoryForm(forms.ModelForm):
class Meta:
widgets = {'title': forms.HiddenInput}
class StoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
list_display_links = ('title',) # 'id' not in list_display_links
list_editable = ('content', )
form = StoryForm
ordering = ["-pk"]
class OtherStoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
list_display_links = ('title', 'id') # 'id' in list_display_links
list_editable = ('content', )
ordering = ["-pk"]
class ComplexSortedPersonAdmin(admin.ModelAdmin):
list_display = ('name', 'age', 'is_employee', 'colored_name')
ordering = ('name',)
def colored_name(self, obj):
return '<span style="color: #%s;">%s</span>' % ('ff00ff', obj.name)
colored_name.allow_tags = True
colored_name.admin_order_field = 'name'
class PluggableSearchPersonAdmin(admin.ModelAdmin):
list_display = ('name', 'age')
search_fields = ('name',)
def get_search_results(self, request, queryset, search_term):
queryset, use_distinct = super(PluggableSearchPersonAdmin, self).get_search_results(request, queryset, search_term)
try:
search_term_as_int = int(search_term)
queryset |= self.model.objects.filter(age=search_term_as_int)
except:
pass
return queryset, use_distinct
class AlbumAdmin(admin.ModelAdmin):
list_filter = ['title']
class PrePopulatedPostLargeSlugAdmin(admin.ModelAdmin):
prepopulated_fields = {
'slug': ('title',)
}
class AdminOrderedFieldAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', 'order')
class AdminOrderedModelMethodAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', 'some_order')
class AdminOrderedAdminMethodAdmin(admin.ModelAdmin):
def some_admin_order(self, obj):
return obj.order
some_admin_order.admin_order_field = 'order'
ordering = ('order',)
list_display = ('stuff', 'some_admin_order')
def admin_ordered_callable(obj):
return obj.order
admin_ordered_callable.admin_order_field = 'order'
class AdminOrderedCallableAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', admin_ordered_callable)
class ReportAdmin(admin.ModelAdmin):
def extra(self, request):
return HttpResponse()
def get_urls(self):
# Corner case: Don't call parent implementation
return [
url(r'^extra/$',
self.extra,
name='cable_extra'),
]
class CustomTemplateBooleanFieldListFilter(BooleanFieldListFilter):
template = 'custom_filter_template.html'
class CustomTemplateFilterColorAdmin(admin.ModelAdmin):
list_filter = (('warm', CustomTemplateBooleanFieldListFilter),)
# For Selenium Prepopulated tests -------------------------------------
class RelatedPrepopulatedInline1(admin.StackedInline):
fieldsets = (
(None, {
'fields': (('pubdate', 'status'), ('name', 'slug1', 'slug2',),)
}),
)
formfield_overrides = {models.CharField: {'strip': False}}
model = RelatedPrepopulated
extra = 1
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class RelatedPrepopulatedInline2(admin.TabularInline):
model = RelatedPrepopulated
extra = 1
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class MainPrepopulatedAdmin(admin.ModelAdmin):
inlines = [RelatedPrepopulatedInline1, RelatedPrepopulatedInline2]
fieldsets = (
(None, {
'fields': (('pubdate', 'status'), ('name', 'slug1', 'slug2', 'slug3'))
}),
)
formfield_overrides = {models.CharField: {'strip': False}}
prepopulated_fields = {
'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name'],
'slug3': ['name'],
}
class UnorderedObjectAdmin(admin.ModelAdmin):
list_display = ['name']
list_editable = ['name']
list_per_page = 2
class UndeletableObjectAdmin(admin.ModelAdmin):
def change_view(self, *args, **kwargs):
kwargs['extra_context'] = {'show_delete': False}
return super(UndeletableObjectAdmin, self).change_view(*args, **kwargs)
class UnchangeableObjectAdmin(admin.ModelAdmin):
def get_urls(self):
# Disable change_view, but leave other urls untouched
urlpatterns = super(UnchangeableObjectAdmin, self).get_urls()
return [p for p in urlpatterns if p.name and not p.name.endswith("_change")]
def callable_on_unknown(obj):
return obj.unknown
class AttributeErrorRaisingAdmin(admin.ModelAdmin):
list_display = [callable_on_unknown, ]
class CustomManagerAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return FilteredManager.objects
class MessageTestingAdmin(admin.ModelAdmin):
actions = ["message_debug", "message_info", "message_success",
"message_warning", "message_error", "message_extra_tags"]
def message_debug(self, request, selected):
self.message_user(request, "Test debug", level="debug")
def message_info(self, request, selected):
self.message_user(request, "Test info", level="info")
def message_success(self, request, selected):
self.message_user(request, "Test success", level="success")
def message_warning(self, request, selected):
self.message_user(request, "Test warning", level="warning")
def message_error(self, request, selected):
self.message_user(request, "Test error", level="error")
def message_extra_tags(self, request, selected):
self.message_user(request, "Test tags", extra_tags="extra_tag")
class ChoiceList(admin.ModelAdmin):
list_display = ['choice']
readonly_fields = ['choice']
fields = ['choice']
class DependentChildAdminForm(forms.ModelForm):
"""
Issue #20522
Form to test child dependency on parent object's validation
"""
def clean(self):
parent = self.cleaned_data.get('parent')
if parent.family_name and parent.family_name != self.cleaned_data.get('family_name'):
raise ValidationError("Children must share a family name with their parents " +
"in this contrived test case")
return super(DependentChildAdminForm, self).clean()
class DependentChildInline(admin.TabularInline):
model = DependentChild
form = DependentChildAdminForm
class ParentWithDependentChildrenAdmin(admin.ModelAdmin):
inlines = [DependentChildInline]
# Tests for ticket 11277 ----------------------------------
class FormWithoutHiddenField(forms.ModelForm):
first = forms.CharField()
second = forms.CharField()
class FormWithoutVisibleField(forms.ModelForm):
first = forms.CharField(widget=forms.HiddenInput)
second = forms.CharField(widget=forms.HiddenInput)
class FormWithVisibleAndHiddenField(forms.ModelForm):
first = forms.CharField(widget=forms.HiddenInput)
second = forms.CharField()
class EmptyModelVisibleAdmin(admin.ModelAdmin):
form = FormWithoutHiddenField
fieldsets = (
(None, {
'fields': (('first', 'second'),),
}),
)
class EmptyModelHiddenAdmin(admin.ModelAdmin):
form = FormWithoutVisibleField
fieldsets = EmptyModelVisibleAdmin.fieldsets
class EmptyModelMixinAdmin(admin.ModelAdmin):
form = FormWithVisibleAndHiddenField
fieldsets = EmptyModelVisibleAdmin.fieldsets
class CityInlineAdmin(admin.TabularInline):
model = City
view_on_site = False
class StateAdmin(admin.ModelAdmin):
inlines = [CityInlineAdmin]
class RestaurantInlineAdmin(admin.TabularInline):
model = Restaurant
view_on_site = True
class CityAdmin(admin.ModelAdmin):
inlines = [RestaurantInlineAdmin]
view_on_site = True
class WorkerAdmin(admin.ModelAdmin):
def view_on_site(self, obj):
return '/worker/%s/%s/' % (obj.surname, obj.name)
class WorkerInlineAdmin(admin.TabularInline):
model = Worker
def view_on_site(self, obj):
return '/worker_inline/%s/%s/' % (obj.surname, obj.name)
class RestaurantAdmin(admin.ModelAdmin):
inlines = [WorkerInlineAdmin]
view_on_site = False
def get_changeform_initial_data(self, request):
return {'name': 'overridden_value'}
class FunkyTagAdmin(admin.ModelAdmin):
list_display = ('name', 'content_object')
class InlineReferenceInline(admin.TabularInline):
model = InlineReference
class InlineRefererAdmin(admin.ModelAdmin):
inlines = [InlineReferenceInline]
class GetFormsetsArgumentCheckingAdmin(admin.ModelAdmin):
fields = ['name']
def add_view(self, request, *args, **kwargs):
request.is_add_view = True
return super(GetFormsetsArgumentCheckingAdmin, self).add_view(request, *args, **kwargs)
def change_view(self, request, *args, **kwargs):
request.is_add_view = False
return super(GetFormsetsArgumentCheckingAdmin, self).change_view(request, *args, **kwargs)
def get_formsets_with_inlines(self, request, obj=None):
if request.is_add_view and obj is not None:
raise Exception("'obj' passed to get_formsets_with_inlines wasn't None during add_view")
if not request.is_add_view and obj is None:
raise Exception("'obj' passed to get_formsets_with_inlines was None during change_view")
return super(GetFormsetsArgumentCheckingAdmin, self).get_formsets_with_inlines(request, obj)
site = admin.AdminSite(name="admin")
site.site_url = '/my-site-url/'
site.register(Article, ArticleAdmin)
site.register(CustomArticle, CustomArticleAdmin)
site.register(Section, save_as=True, inlines=[ArticleInline], readonly_fields=['name_property'])
site.register(ModelWithStringPrimaryKey)
site.register(Color)
site.register(Thing, ThingAdmin)
site.register(Actor)
site.register(Inquisition, InquisitionAdmin)
site.register(Sketch, SketchAdmin)
site.register(Person, PersonAdmin)
site.register(Persona, PersonaAdmin)
site.register(Subscriber, SubscriberAdmin)
site.register(ExternalSubscriber, ExternalSubscriberAdmin)
site.register(OldSubscriber, OldSubscriberAdmin)
site.register(Podcast, PodcastAdmin)
site.register(Vodcast, VodcastAdmin)
site.register(Parent, ParentAdmin)
site.register(EmptyModel, EmptyModelAdmin)
site.register(Fabric, FabricAdmin)
site.register(Gallery, GalleryAdmin)
site.register(Picture, PictureAdmin)
site.register(Language, LanguageAdmin)
site.register(Recommendation, RecommendationAdmin)
site.register(Recommender)
site.register(Collector, CollectorAdmin)
site.register(Category, CategoryAdmin)
site.register(Post, PostAdmin)
site.register(FieldOverridePost, FieldOverridePostAdmin)
site.register(Gadget, GadgetAdmin)
site.register(Villain)
site.register(SuperVillain)
site.register(Plot)
site.register(PlotDetails)
site.register(CyclicOne)
site.register(CyclicTwo)
site.register(WorkHour, WorkHourAdmin)
site.register(Reservation)
site.register(FoodDelivery, FoodDeliveryAdmin)
site.register(RowLevelChangePermissionModel, RowLevelChangePermissionModelAdmin)
site.register(Paper, PaperAdmin)
site.register(CoverLetter, CoverLetterAdmin)
site.register(ShortMessage, ShortMessageAdmin)
site.register(Telegram, TelegramAdmin)
site.register(Story, StoryAdmin)
site.register(OtherStory, OtherStoryAdmin)
site.register(Report, ReportAdmin)
site.register(MainPrepopulated, MainPrepopulatedAdmin)
site.register(UnorderedObject, UnorderedObjectAdmin)
site.register(UndeletableObject, UndeletableObjectAdmin)
site.register(UnchangeableObject, UnchangeableObjectAdmin)
site.register(State, StateAdmin)
site.register(City, CityAdmin)
site.register(Restaurant, RestaurantAdmin)
site.register(Worker, WorkerAdmin)
site.register(FunkyTag, FunkyTagAdmin)
site.register(ReferencedByParent)
site.register(ChildOfReferer)
site.register(ReferencedByInline)
site.register(InlineReferer, InlineRefererAdmin)
# We intentionally register Promo and ChapterXtra1 but not Chapter nor ChapterXtra2.
# That way we cover all four cases:
# related ForeignKey object registered in admin
# related ForeignKey object not registered in admin
# related OneToOne object registered in admin
# related OneToOne object not registered in admin
# when deleting Book so as exercise all four troublesome (w.r.t escaping
# and calling force_text to avoid problems on Python 2.3) paths through
# contrib.admin.utils's get_deleted_objects function.
site.register(Book, inlines=[ChapterInline])
site.register(Promo)
site.register(ChapterXtra1, ChapterXtra1Admin)
site.register(Pizza, PizzaAdmin)
site.register(Topping, ToppingAdmin)
site.register(Album, AlbumAdmin)
site.register(Question)
site.register(Answer)
site.register(PrePopulatedPost, PrePopulatedPostAdmin)
site.register(ComplexSortedPerson, ComplexSortedPersonAdmin)
site.register(FilteredManager, CustomManagerAdmin)
site.register(PluggableSearchPerson, PluggableSearchPersonAdmin)
site.register(PrePopulatedPostLargeSlug, PrePopulatedPostLargeSlugAdmin)
site.register(AdminOrderedField, AdminOrderedFieldAdmin)
site.register(AdminOrderedModelMethod, AdminOrderedModelMethodAdmin)
site.register(AdminOrderedAdminMethod, AdminOrderedAdminMethodAdmin)
site.register(AdminOrderedCallable, AdminOrderedCallableAdmin)
site.register(Color2, CustomTemplateFilterColorAdmin)
site.register(Simple, AttributeErrorRaisingAdmin)
site.register(UserMessenger, MessageTestingAdmin)
site.register(Choice, ChoiceList)
site.register(ParentWithDependentChildren, ParentWithDependentChildrenAdmin)
site.register(EmptyModelHidden, EmptyModelHiddenAdmin)
site.register(EmptyModelVisible, EmptyModelVisibleAdmin)
site.register(EmptyModelMixin, EmptyModelMixinAdmin)
site.register(StumpJoke)
site.register(Recipe)
site.register(Ingredient)
site.register(NotReferenced)
site.register(ExplicitlyProvidedPK, GetFormsetsArgumentCheckingAdmin)
site.register(ImplicitlyGeneratedPK, GetFormsetsArgumentCheckingAdmin)
# Register core models we need in our tests
site.register(User, UserAdmin)
site.register(Group, GroupAdmin)
# Used to test URL namespaces
site2 = admin.AdminSite(name="namespaced_admin")
site2.register(User, UserAdmin)
site2.register(Group, GroupAdmin)
site7 = admin.AdminSite(name="admin7")
site7.register(Article, ArticleAdmin2)
|
bsd-3-clause
|
manashmndl/kivy
|
kivy/input/providers/__init__.py
|
52
|
1879
|
# pylint: disable=W0611
'''
Providers
=========
'''
import os
from kivy.utils import platform as core_platform
from kivy.logger import Logger
import kivy.input.providers.tuio
import kivy.input.providers.mouse
platform = core_platform
if platform == 'win' or 'KIVY_DOC' in os.environ:
try:
import kivy.input.providers.wm_touch
import kivy.input.providers.wm_pen
except:
err = 'Input: WM_Touch/WM_Pen not supported by your version of Windows'
Logger.warning(err)
if platform == 'macosx' or 'KIVY_DOC' in os.environ:
try:
import kivy.input.providers.mactouch
except:
err = 'Input: MacMultitouchSupport is not supported by your system'
Logger.exception(err)
if platform == 'linux' or 'KIVY_DOC' in os.environ:
try:
import kivy.input.providers.probesysfs
except:
err = 'Input: ProbeSysfs is not supported by your version of linux'
Logger.exception(err)
try:
import kivy.input.providers.mtdev
except:
err = 'Input: MTDev is not supported by your version of linux'
Logger.exception(err)
try:
import kivy.input.providers.hidinput
except:
err = 'Input: HIDInput is not supported by your version of linux'
Logger.exception(err)
try:
import kivy.input.providers.linuxwacom
except:
err = 'Input: LinuxWacom is not supported by your version of linux'
Logger.exception(err)
if platform == 'android' or 'KIVY_DOC' in os.environ:
try:
import kivy.input.providers.androidjoystick
except:
err = 'Input: AndroidJoystick is not supported by your version ' \
'of linux'
Logger.exception(err)
try:
import kivy.input.providers.leapfinger # NOQA
except:
err = 'Input: LeapFinger is not available on your system'
Logger.exception(err)
|
mit
|
alvarolopez/nova
|
nova/db/sqlalchemy/migrate_repo/versions/275_add_keypair_type.py
|
79
|
1459
|
# Copyright (c) 2015 Cloudbase Solutions SRL
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Column, Table
from sqlalchemy import Enum
from nova.objects import keypair
def upgrade(migrate_engine):
"""Function adds key_pairs type field."""
meta = MetaData(bind=migrate_engine)
key_pairs = Table('key_pairs', meta, autoload=True)
shadow_key_pairs = Table('shadow_key_pairs', meta, autoload=True)
enum = Enum('ssh', 'x509', metadata=meta, name='keypair_types')
enum.create()
keypair_type = Column('type', enum, nullable=False,
server_default=keypair.KEYPAIR_TYPE_SSH)
if hasattr(key_pairs.c, 'type'):
key_pairs.c.type.drop()
if hasattr(shadow_key_pairs.c, 'type'):
shadow_key_pairs.c.type.drop()
key_pairs.create_column(keypair_type)
shadow_key_pairs.create_column(keypair_type.copy())
|
apache-2.0
|
bigdataelephants/scikit-learn
|
examples/manifold/plot_swissroll.py
|
330
|
1446
|
"""
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
|
bsd-3-clause
|
chirilo/kuma
|
vendor/packages/babel/messages/frontend.py
|
63
|
52212
|
# -*- coding: utf-8 -*-
"""
babel.messages.frontend
~~~~~~~~~~~~~~~~~~~~~~~
Frontends for the message extraction functionality.
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
try:
from ConfigParser import RawConfigParser
except ImportError:
from configparser import RawConfigParser
from datetime import datetime
from distutils import log
from distutils.cmd import Command
from distutils.errors import DistutilsOptionError, DistutilsSetupError
from locale import getpreferredencoding
import logging
from optparse import OptionParser
import os
import re
import shutil
import sys
import tempfile
from babel import __version__ as VERSION
from babel import Locale, localedata
from babel.core import UnknownLocaleError
from babel.messages.catalog import Catalog
from babel.messages.extract import extract_from_dir, DEFAULT_KEYWORDS, \
DEFAULT_MAPPING
from babel.messages.mofile import write_mo
from babel.messages.pofile import read_po, write_po
from babel.util import odict, LOCALTZ
from babel._compat import string_types, BytesIO, PY2
class compile_catalog(Command):
"""Catalog compilation command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import compile_catalog
setup(
...
cmdclass = {'compile_catalog': compile_catalog}
)
.. versionadded:: 0.9
"""
description = 'compile message catalogs to binary MO files'
user_options = [
('domain=', 'D',
"domain of PO file (default 'messages')"),
('directory=', 'd',
'path to base directory containing the catalogs'),
('input-file=', 'i',
'name of the input file'),
('output-file=', 'o',
"name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/<domain>.mo')"),
('locale=', 'l',
'locale of the catalog to compile'),
('use-fuzzy', 'f',
'also include fuzzy translations'),
('statistics', None,
'print statistics about translations')
]
boolean_options = ['use-fuzzy', 'statistics']
def initialize_options(self):
self.domain = 'messages'
self.directory = None
self.input_file = None
self.output_file = None
self.locale = None
self.use_fuzzy = False
self.statistics = False
def finalize_options(self):
if not self.input_file and not self.directory:
raise DistutilsOptionError('you must specify either the input file '
'or the base directory')
if not self.output_file and not self.directory:
raise DistutilsOptionError('you must specify either the output file '
'or the base directory')
def run(self):
po_files = []
mo_files = []
if not self.input_file:
if self.locale:
po_files.append((self.locale,
os.path.join(self.directory, self.locale,
'LC_MESSAGES',
self.domain + '.po')))
mo_files.append(os.path.join(self.directory, self.locale,
'LC_MESSAGES',
self.domain + '.mo'))
else:
for locale in os.listdir(self.directory):
po_file = os.path.join(self.directory, locale,
'LC_MESSAGES', self.domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
mo_files.append(os.path.join(self.directory, locale,
'LC_MESSAGES',
self.domain + '.mo'))
else:
po_files.append((self.locale, self.input_file))
if self.output_file:
mo_files.append(self.output_file)
else:
mo_files.append(os.path.join(self.directory, self.locale,
'LC_MESSAGES',
self.domain + '.mo'))
if not po_files:
raise DistutilsOptionError('no message catalogs found')
for idx, (locale, po_file) in enumerate(po_files):
mo_file = mo_files[idx]
infile = open(po_file, 'rb')
try:
catalog = read_po(infile, locale)
finally:
infile.close()
if self.statistics:
translated = 0
for message in list(catalog)[1:]:
if message.string:
translated +=1
percentage = 0
if len(catalog):
percentage = translated * 100 // len(catalog)
log.info('%d of %d messages (%d%%) translated in %r',
translated, len(catalog), percentage, po_file)
if catalog.fuzzy and not self.use_fuzzy:
log.warn('catalog %r is marked as fuzzy, skipping', po_file)
continue
for message, errors in catalog.check():
for error in errors:
log.error('error: %s:%d: %s', po_file, message.lineno,
error)
log.info('compiling catalog %r to %r', po_file, mo_file)
outfile = open(mo_file, 'wb')
try:
write_mo(outfile, catalog, use_fuzzy=self.use_fuzzy)
finally:
outfile.close()
class extract_messages(Command):
"""Message extraction command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import extract_messages
setup(
...
cmdclass = {'extract_messages': extract_messages}
)
"""
description = 'extract localizable strings from the project code'
user_options = [
('charset=', None,
'charset to use in the output file'),
('keywords=', 'k',
'space-separated list of keywords to look for in addition to the '
'defaults'),
('no-default-keywords', None,
'do not include the default keywords'),
('mapping-file=', 'F',
'path to the mapping configuration file'),
('no-location', None,
'do not include location comments with filename and line number'),
('omit-header', None,
'do not include msgid "" entry in header'),
('output-file=', 'o',
'name of the output file'),
('width=', 'w',
'set output line width (default 76)'),
('no-wrap', None,
'do not break long message lines, longer than the output line width, '
'into several lines'),
('sort-output', None,
'generate sorted output (default False)'),
('sort-by-file', None,
'sort output by file location (default False)'),
('msgid-bugs-address=', None,
'set report address for msgid'),
('copyright-holder=', None,
'set copyright holder in output'),
('add-comments=', 'c',
'place comment block with TAG (or those preceding keyword lines) in '
'output file. Separate multiple TAGs with commas(,)'),
('strip-comments', None,
'strip the comment TAGs from the comments.'),
('input-dirs=', None,
'directories that should be scanned for messages. Separate multiple '
'directories with commas(,)'),
]
boolean_options = [
'no-default-keywords', 'no-location', 'omit-header', 'no-wrap',
'sort-output', 'sort-by-file', 'strip-comments'
]
def initialize_options(self):
self.charset = 'utf-8'
self.keywords = ''
self._keywords = DEFAULT_KEYWORDS.copy()
self.no_default_keywords = False
self.mapping_file = None
self.no_location = False
self.omit_header = False
self.output_file = None
self.input_dirs = None
self.width = None
self.no_wrap = False
self.sort_output = False
self.sort_by_file = False
self.msgid_bugs_address = None
self.copyright_holder = None
self.add_comments = None
self._add_comments = []
self.strip_comments = False
def finalize_options(self):
if self.no_default_keywords and not self.keywords:
raise DistutilsOptionError('you must specify new keywords if you '
'disable the default ones')
if self.no_default_keywords:
self._keywords = {}
if self.keywords:
self._keywords.update(parse_keywords(self.keywords.split()))
if not self.output_file:
raise DistutilsOptionError('no output file specified')
if self.no_wrap and self.width:
raise DistutilsOptionError("'--no-wrap' and '--width' are mutually "
"exclusive")
if not self.no_wrap and not self.width:
self.width = 76
elif self.width is not None:
self.width = int(self.width)
if self.sort_output and self.sort_by_file:
raise DistutilsOptionError("'--sort-output' and '--sort-by-file' "
"are mutually exclusive")
if self.input_dirs:
self.input_dirs = re.split(',\s*', self.input_dirs)
else:
self.input_dirs = dict.fromkeys([k.split('.',1)[0]
for k in self.distribution.packages
]).keys()
if self.add_comments:
self._add_comments = self.add_comments.split(',')
def run(self):
mappings = self._get_mappings()
outfile = open(self.output_file, 'wb')
try:
catalog = Catalog(project=self.distribution.get_name(),
version=self.distribution.get_version(),
msgid_bugs_address=self.msgid_bugs_address,
copyright_holder=self.copyright_holder,
charset=self.charset)
for dirname, (method_map, options_map) in mappings.items():
def callback(filename, method, options):
if method == 'ignore':
return
filepath = os.path.normpath(os.path.join(dirname, filename))
optstr = ''
if options:
optstr = ' (%s)' % ', '.join(['%s="%s"' % (k, v) for
k, v in options.items()])
log.info('extracting messages from %s%s', filepath, optstr)
extracted = extract_from_dir(dirname, method_map, options_map,
keywords=self._keywords,
comment_tags=self._add_comments,
callback=callback,
strip_comment_tags=
self.strip_comments)
for filename, lineno, message, comments, context in extracted:
filepath = os.path.normpath(os.path.join(dirname, filename))
catalog.add(message, None, [(filepath, lineno)],
auto_comments=comments, context=context)
log.info('writing PO template file to %s' % self.output_file)
write_po(outfile, catalog, width=self.width,
no_location=self.no_location,
omit_header=self.omit_header,
sort_output=self.sort_output,
sort_by_file=self.sort_by_file)
finally:
outfile.close()
def _get_mappings(self):
mappings = {}
if self.mapping_file:
fileobj = open(self.mapping_file, 'U')
try:
method_map, options_map = parse_mapping(fileobj)
for dirname in self.input_dirs:
mappings[dirname] = method_map, options_map
finally:
fileobj.close()
elif getattr(self.distribution, 'message_extractors', None):
message_extractors = self.distribution.message_extractors
for dirname, mapping in message_extractors.items():
if isinstance(mapping, string_types):
method_map, options_map = parse_mapping(BytesIO(mapping))
else:
method_map, options_map = [], {}
for pattern, method, options in mapping:
method_map.append((pattern, method))
options_map[pattern] = options or {}
mappings[dirname] = method_map, options_map
else:
for dirname in self.input_dirs:
mappings[dirname] = DEFAULT_MAPPING, {}
return mappings
def check_message_extractors(dist, name, value):
"""Validate the ``message_extractors`` keyword argument to ``setup()``.
:param dist: the distutils/setuptools ``Distribution`` object
:param name: the name of the keyword argument (should always be
"message_extractors")
:param value: the value of the keyword argument
:raise `DistutilsSetupError`: if the value is not valid
"""
assert name == 'message_extractors'
if not isinstance(value, dict):
raise DistutilsSetupError('the value of the "message_extractors" '
'parameter must be a dictionary')
class init_catalog(Command):
"""New catalog initialization command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import init_catalog
setup(
...
cmdclass = {'init_catalog': init_catalog}
)
"""
description = 'create a new catalog based on a POT file'
user_options = [
('domain=', 'D',
"domain of PO file (default 'messages')"),
('input-file=', 'i',
'name of the input file'),
('output-dir=', 'd',
'path to output directory'),
('output-file=', 'o',
"name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
('locale=', 'l',
'locale for the new localized catalog'),
('width=', 'w',
'set output line width (default 76)'),
('no-wrap', None,
'do not break long message lines, longer than the output line width, '
'into several lines'),
]
boolean_options = ['no-wrap']
def initialize_options(self):
self.output_dir = None
self.output_file = None
self.input_file = None
self.locale = None
self.domain = 'messages'
self.no_wrap = False
self.width = None
def finalize_options(self):
if not self.input_file:
raise DistutilsOptionError('you must specify the input file')
if not self.locale:
raise DistutilsOptionError('you must provide a locale for the '
'new catalog')
try:
self._locale = Locale.parse(self.locale)
except UnknownLocaleError as e:
raise DistutilsOptionError(e)
if not self.output_file and not self.output_dir:
raise DistutilsOptionError('you must specify the output directory')
if not self.output_file:
self.output_file = os.path.join(self.output_dir, self.locale,
'LC_MESSAGES', self.domain + '.po')
if not os.path.exists(os.path.dirname(self.output_file)):
os.makedirs(os.path.dirname(self.output_file))
if self.no_wrap and self.width:
raise DistutilsOptionError("'--no-wrap' and '--width' are mutually "
"exclusive")
if not self.no_wrap and not self.width:
self.width = 76
elif self.width is not None:
self.width = int(self.width)
def run(self):
log.info('creating catalog %r based on %r', self.output_file,
self.input_file)
infile = open(self.input_file, 'rb')
try:
# Although reading from the catalog template, read_po must be fed
# the locale in order to correctly calculate plurals
catalog = read_po(infile, locale=self.locale)
finally:
infile.close()
catalog.locale = self._locale
catalog.revision_date = datetime.now(LOCALTZ)
catalog.fuzzy = False
outfile = open(self.output_file, 'wb')
try:
write_po(outfile, catalog, width=self.width)
finally:
outfile.close()
class update_catalog(Command):
"""Catalog merging command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import update_catalog
setup(
...
cmdclass = {'update_catalog': update_catalog}
)
.. versionadded:: 0.9
"""
description = 'update message catalogs from a POT file'
user_options = [
('domain=', 'D',
"domain of PO file (default 'messages')"),
('input-file=', 'i',
'name of the input file'),
('output-dir=', 'd',
'path to base directory containing the catalogs'),
('output-file=', 'o',
"name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
('locale=', 'l',
'locale of the catalog to compile'),
('width=', 'w',
'set output line width (default 76)'),
('no-wrap', None,
'do not break long message lines, longer than the output line width, '
'into several lines'),
('ignore-obsolete=', None,
'whether to omit obsolete messages from the output'),
('no-fuzzy-matching', 'N',
'do not use fuzzy matching'),
('previous', None,
'keep previous msgids of translated messages')
]
boolean_options = ['ignore_obsolete', 'no_fuzzy_matching', 'previous']
def initialize_options(self):
self.domain = 'messages'
self.input_file = None
self.output_dir = None
self.output_file = None
self.locale = None
self.width = None
self.no_wrap = False
self.ignore_obsolete = False
self.no_fuzzy_matching = False
self.previous = False
def finalize_options(self):
if not self.input_file:
raise DistutilsOptionError('you must specify the input file')
if not self.output_file and not self.output_dir:
raise DistutilsOptionError('you must specify the output file or '
'directory')
if self.output_file and not self.locale:
raise DistutilsOptionError('you must specify the locale')
if self.no_wrap and self.width:
raise DistutilsOptionError("'--no-wrap' and '--width' are mutually "
"exclusive")
if not self.no_wrap and not self.width:
self.width = 76
elif self.width is not None:
self.width = int(self.width)
if self.no_fuzzy_matching and self.previous:
self.previous = False
def run(self):
po_files = []
if not self.output_file:
if self.locale:
po_files.append((self.locale,
os.path.join(self.output_dir, self.locale,
'LC_MESSAGES',
self.domain + '.po')))
else:
for locale in os.listdir(self.output_dir):
po_file = os.path.join(self.output_dir, locale,
'LC_MESSAGES',
self.domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
else:
po_files.append((self.locale, self.output_file))
domain = self.domain
if not domain:
domain = os.path.splitext(os.path.basename(self.input_file))[0]
infile = open(self.input_file, 'rb')
try:
template = read_po(infile)
finally:
infile.close()
if not po_files:
raise DistutilsOptionError('no message catalogs found')
for locale, filename in po_files:
log.info('updating catalog %r based on %r', filename,
self.input_file)
infile = open(filename, 'rb')
try:
catalog = read_po(infile, locale=locale, domain=domain)
finally:
infile.close()
catalog.update(template, self.no_fuzzy_matching)
tmpname = os.path.join(os.path.dirname(filename),
tempfile.gettempprefix() +
os.path.basename(filename))
tmpfile = open(tmpname, 'wb')
try:
try:
write_po(tmpfile, catalog,
ignore_obsolete=self.ignore_obsolete,
include_previous=self.previous, width=self.width)
finally:
tmpfile.close()
except:
os.remove(tmpname)
raise
try:
os.rename(tmpname, filename)
except OSError:
# We're probably on Windows, which doesn't support atomic
# renames, at least not through Python
# If the error is in fact due to a permissions problem, that
# same error is going to be raised from one of the following
# operations
os.remove(filename)
shutil.copy(tmpname, filename)
os.remove(tmpname)
class CommandLineInterface(object):
"""Command-line interface.
This class provides a simple command-line interface to the message
extraction and PO file generation functionality.
"""
usage = '%%prog %s [options] %s'
version = '%%prog %s' % VERSION
commands = {
'compile': 'compile message catalogs to MO files',
'extract': 'extract messages from source files and generate a POT file',
'init': 'create new message catalogs from a POT file',
'update': 'update existing message catalogs from a POT file'
}
def run(self, argv=sys.argv):
"""Main entry point of the command-line interface.
:param argv: list of arguments passed on the command-line
"""
self.parser = OptionParser(usage=self.usage % ('command', '[args]'),
version=self.version)
self.parser.disable_interspersed_args()
self.parser.print_help = self._help
self.parser.add_option('--list-locales', dest='list_locales',
action='store_true',
help="print all known locales and exit")
self.parser.add_option('-v', '--verbose', action='store_const',
dest='loglevel', const=logging.DEBUG,
help='print as much as possible')
self.parser.add_option('-q', '--quiet', action='store_const',
dest='loglevel', const=logging.ERROR,
help='print as little as possible')
self.parser.set_defaults(list_locales=False, loglevel=logging.INFO)
options, args = self.parser.parse_args(argv[1:])
self._configure_logging(options.loglevel)
if options.list_locales:
identifiers = localedata.locale_identifiers()
longest = max([len(identifier) for identifier in identifiers])
identifiers.sort()
format = u'%%-%ds %%s' % (longest + 1)
for identifier in identifiers:
locale = Locale.parse(identifier)
output = format % (identifier, locale.english_name)
print(output.encode(sys.stdout.encoding or
getpreferredencoding() or
'ascii', 'replace'))
return 0
if not args:
self.parser.error('no valid command or option passed. '
'Try the -h/--help option for more information.')
cmdname = args[0]
if cmdname not in self.commands:
self.parser.error('unknown command "%s"' % cmdname)
return getattr(self, cmdname)(args[1:])
def _configure_logging(self, loglevel):
self.log = logging.getLogger('babel')
self.log.setLevel(loglevel)
# Don't add a new handler for every instance initialization (#227), this
# would cause duplicated output when the CommandLineInterface as an
# normal Python class.
if self.log.handlers:
handler = self.log.handlers[0]
else:
handler = logging.StreamHandler()
self.log.addHandler(handler)
handler.setLevel(loglevel)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
def _help(self):
print(self.parser.format_help())
print("commands:")
longest = max([len(command) for command in self.commands])
format = " %%-%ds %%s" % max(8, longest + 1)
commands = sorted(self.commands.items())
for name, description in commands:
print(format % (name, description))
def compile(self, argv):
"""Subcommand for compiling a message catalog to a MO file.
:param argv: the command arguments
:since: version 0.9
"""
parser = OptionParser(usage=self.usage % ('compile', ''),
description=self.commands['compile'])
parser.add_option('--domain', '-D', dest='domain',
help="domain of MO and PO files (default '%default')")
parser.add_option('--directory', '-d', dest='directory',
metavar='DIR', help='base directory of catalog files')
parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE',
help='locale of the catalog')
parser.add_option('--input-file', '-i', dest='input_file',
metavar='FILE', help='name of the input file')
parser.add_option('--output-file', '-o', dest='output_file',
metavar='FILE',
help="name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/"
"<domain>.mo')")
parser.add_option('--use-fuzzy', '-f', dest='use_fuzzy',
action='store_true',
help='also include fuzzy translations (default '
'%default)')
parser.add_option('--statistics', dest='statistics',
action='store_true',
help='print statistics about translations')
parser.set_defaults(domain='messages', use_fuzzy=False,
compile_all=False, statistics=False)
options, args = parser.parse_args(argv)
po_files = []
mo_files = []
if not options.input_file:
if not options.directory:
parser.error('you must specify either the input file or the '
'base directory')
if options.locale:
po_files.append((options.locale,
os.path.join(options.directory,
options.locale, 'LC_MESSAGES',
options.domain + '.po')))
mo_files.append(os.path.join(options.directory, options.locale,
'LC_MESSAGES',
options.domain + '.mo'))
else:
for locale in os.listdir(options.directory):
po_file = os.path.join(options.directory, locale,
'LC_MESSAGES', options.domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
mo_files.append(os.path.join(options.directory, locale,
'LC_MESSAGES',
options.domain + '.mo'))
else:
po_files.append((options.locale, options.input_file))
if options.output_file:
mo_files.append(options.output_file)
else:
if not options.directory:
parser.error('you must specify either the output file or '
'the base directory')
mo_files.append(os.path.join(options.directory, options.locale,
'LC_MESSAGES',
options.domain + '.mo'))
if not po_files:
parser.error('no message catalogs found')
for idx, (locale, po_file) in enumerate(po_files):
mo_file = mo_files[idx]
infile = open(po_file, 'rb')
try:
catalog = read_po(infile, locale)
finally:
infile.close()
if options.statistics:
translated = 0
for message in list(catalog)[1:]:
if message.string:
translated +=1
percentage = 0
if len(catalog):
percentage = translated * 100 // len(catalog)
self.log.info("%d of %d messages (%d%%) translated in %r",
translated, len(catalog), percentage, po_file)
if catalog.fuzzy and not options.use_fuzzy:
self.log.warning('catalog %r is marked as fuzzy, skipping',
po_file)
continue
for message, errors in catalog.check():
for error in errors:
self.log.error('error: %s:%d: %s', po_file, message.lineno,
error)
self.log.info('compiling catalog %r to %r', po_file, mo_file)
outfile = open(mo_file, 'wb')
try:
write_mo(outfile, catalog, use_fuzzy=options.use_fuzzy)
finally:
outfile.close()
def extract(self, argv):
"""Subcommand for extracting messages from source files and generating
a POT file.
:param argv: the command arguments
"""
parser = OptionParser(usage=self.usage % ('extract', 'dir1 <dir2> ...'),
description=self.commands['extract'])
parser.add_option('--charset', dest='charset',
help='charset to use in the output (default '
'"%default")')
parser.add_option('-k', '--keyword', dest='keywords', action='append',
help='keywords to look for in addition to the '
'defaults. You can specify multiple -k flags on '
'the command line.')
parser.add_option('--no-default-keywords', dest='no_default_keywords',
action='store_true',
help="do not include the default keywords")
parser.add_option('--mapping', '-F', dest='mapping_file',
help='path to the extraction mapping file')
parser.add_option('--no-location', dest='no_location',
action='store_true',
help='do not include location comments with filename '
'and line number')
parser.add_option('--omit-header', dest='omit_header',
action='store_true',
help='do not include msgid "" entry in header')
parser.add_option('-o', '--output', dest='output',
help='path to the output POT file')
parser.add_option('-w', '--width', dest='width', type='int',
help="set output line width (default 76)")
parser.add_option('--no-wrap', dest='no_wrap', action='store_true',
help='do not break long message lines, longer than '
'the output line width, into several lines')
parser.add_option('--sort-output', dest='sort_output',
action='store_true',
help='generate sorted output (default False)')
parser.add_option('--sort-by-file', dest='sort_by_file',
action='store_true',
help='sort output by file location (default False)')
parser.add_option('--msgid-bugs-address', dest='msgid_bugs_address',
metavar='EMAIL@ADDRESS',
help='set report address for msgid')
parser.add_option('--copyright-holder', dest='copyright_holder',
help='set copyright holder in output')
parser.add_option('--project', dest='project',
help='set project name in output')
parser.add_option('--version', dest='version',
help='set project version in output')
parser.add_option('--add-comments', '-c', dest='comment_tags',
metavar='TAG', action='append',
help='place comment block with TAG (or those '
'preceding keyword lines) in output file. One '
'TAG per argument call')
parser.add_option('--strip-comment-tags', '-s',
dest='strip_comment_tags', action='store_true',
help='Strip the comment tags from the comments.')
parser.set_defaults(charset='utf-8', keywords=[],
no_default_keywords=False, no_location=False,
omit_header = False, width=None, no_wrap=False,
sort_output=False, sort_by_file=False,
comment_tags=[], strip_comment_tags=False)
options, args = parser.parse_args(argv)
if not args:
parser.error('incorrect number of arguments')
keywords = DEFAULT_KEYWORDS.copy()
if options.no_default_keywords:
if not options.keywords:
parser.error('you must specify new keywords if you disable the '
'default ones')
keywords = {}
if options.keywords:
keywords.update(parse_keywords(options.keywords))
if options.mapping_file:
fileobj = open(options.mapping_file, 'U')
try:
method_map, options_map = parse_mapping(fileobj)
finally:
fileobj.close()
else:
method_map = DEFAULT_MAPPING
options_map = {}
if options.width and options.no_wrap:
parser.error("'--no-wrap' and '--width' are mutually exclusive.")
elif not options.width and not options.no_wrap:
options.width = 76
if options.sort_output and options.sort_by_file:
parser.error("'--sort-output' and '--sort-by-file' are mutually "
"exclusive")
catalog = Catalog(project=options.project,
version=options.version,
msgid_bugs_address=options.msgid_bugs_address,
copyright_holder=options.copyright_holder,
charset=options.charset)
for dirname in args:
if not os.path.isdir(dirname):
parser.error('%r is not a directory' % dirname)
def callback(filename, method, options):
if method == 'ignore':
return
filepath = os.path.normpath(os.path.join(dirname, filename))
optstr = ''
if options:
optstr = ' (%s)' % ', '.join(['%s="%s"' % (k, v) for
k, v in options.items()])
self.log.info('extracting messages from %s%s', filepath,
optstr)
extracted = extract_from_dir(dirname, method_map, options_map,
keywords, options.comment_tags,
callback=callback,
strip_comment_tags=
options.strip_comment_tags)
for filename, lineno, message, comments, context in extracted:
filepath = os.path.normpath(os.path.join(dirname, filename))
catalog.add(message, None, [(filepath, lineno)],
auto_comments=comments, context=context)
catalog_charset = catalog.charset
if options.output not in (None, '-'):
self.log.info('writing PO template file to %s' % options.output)
outfile = open(options.output, 'wb')
close_output = True
else:
outfile = sys.stdout
# This is a bit of a hack on Python 3. stdout is a text stream so
# we need to find the underlying file when we write the PO. In
# later versions of Babel we want the write_po function to accept
# text or binary streams and automatically adjust the encoding.
if not PY2 and hasattr(outfile, 'buffer'):
catalog.charset = outfile.encoding
outfile = outfile.buffer.raw
close_output = False
try:
write_po(outfile, catalog, width=options.width,
no_location=options.no_location,
omit_header=options.omit_header,
sort_output=options.sort_output,
sort_by_file=options.sort_by_file)
finally:
if close_output:
outfile.close()
catalog.charset = catalog_charset
def init(self, argv):
"""Subcommand for creating new message catalogs from a template.
:param argv: the command arguments
"""
parser = OptionParser(usage=self.usage % ('init', ''),
description=self.commands['init'])
parser.add_option('--domain', '-D', dest='domain',
help="domain of PO file (default '%default')")
parser.add_option('--input-file', '-i', dest='input_file',
metavar='FILE', help='name of the input file')
parser.add_option('--output-dir', '-d', dest='output_dir',
metavar='DIR', help='path to output directory')
parser.add_option('--output-file', '-o', dest='output_file',
metavar='FILE',
help="name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/"
"<domain>.po')")
parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE',
help='locale for the new localized catalog')
parser.add_option('-w', '--width', dest='width', type='int',
help="set output line width (default 76)")
parser.add_option('--no-wrap', dest='no_wrap', action='store_true',
help='do not break long message lines, longer than '
'the output line width, into several lines')
parser.set_defaults(domain='messages')
options, args = parser.parse_args(argv)
if not options.locale:
parser.error('you must provide a locale for the new catalog')
try:
locale = Locale.parse(options.locale)
except UnknownLocaleError as e:
parser.error(e)
if not options.input_file:
parser.error('you must specify the input file')
if not options.output_file and not options.output_dir:
parser.error('you must specify the output file or directory')
if not options.output_file:
options.output_file = os.path.join(options.output_dir,
options.locale, 'LC_MESSAGES',
options.domain + '.po')
if not os.path.exists(os.path.dirname(options.output_file)):
os.makedirs(os.path.dirname(options.output_file))
if options.width and options.no_wrap:
parser.error("'--no-wrap' and '--width' are mutually exclusive.")
elif not options.width and not options.no_wrap:
options.width = 76
infile = open(options.input_file, 'r')
try:
# Although reading from the catalog template, read_po must be fed
# the locale in order to correctly calculate plurals
catalog = read_po(infile, locale=options.locale)
finally:
infile.close()
catalog.locale = locale
catalog.revision_date = datetime.now(LOCALTZ)
self.log.info('creating catalog %r based on %r', options.output_file,
options.input_file)
outfile = open(options.output_file, 'wb')
try:
write_po(outfile, catalog, width=options.width)
finally:
outfile.close()
def update(self, argv):
"""Subcommand for updating existing message catalogs from a template.
:param argv: the command arguments
:since: version 0.9
"""
parser = OptionParser(usage=self.usage % ('update', ''),
description=self.commands['update'])
parser.add_option('--domain', '-D', dest='domain',
help="domain of PO file (default '%default')")
parser.add_option('--input-file', '-i', dest='input_file',
metavar='FILE', help='name of the input file')
parser.add_option('--output-dir', '-d', dest='output_dir',
metavar='DIR', help='path to output directory')
parser.add_option('--output-file', '-o', dest='output_file',
metavar='FILE',
help="name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/"
"<domain>.po')")
parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE',
help='locale of the translations catalog')
parser.add_option('-w', '--width', dest='width', type='int',
help="set output line width (default 76)")
parser.add_option('--no-wrap', dest='no_wrap', action = 'store_true',
help='do not break long message lines, longer than '
'the output line width, into several lines')
parser.add_option('--ignore-obsolete', dest='ignore_obsolete',
action='store_true',
help='do not include obsolete messages in the output '
'(default %default)')
parser.add_option('--no-fuzzy-matching', '-N', dest='no_fuzzy_matching',
action='store_true',
help='do not use fuzzy matching (default %default)')
parser.add_option('--previous', dest='previous', action='store_true',
help='keep previous msgids of translated messages '
'(default %default)')
parser.set_defaults(domain='messages', ignore_obsolete=False,
no_fuzzy_matching=False, previous=False)
options, args = parser.parse_args(argv)
if not options.input_file:
parser.error('you must specify the input file')
if not options.output_file and not options.output_dir:
parser.error('you must specify the output file or directory')
if options.output_file and not options.locale:
parser.error('you must specify the locale')
if options.no_fuzzy_matching and options.previous:
options.previous = False
po_files = []
if not options.output_file:
if options.locale:
po_files.append((options.locale,
os.path.join(options.output_dir,
options.locale, 'LC_MESSAGES',
options.domain + '.po')))
else:
for locale in os.listdir(options.output_dir):
po_file = os.path.join(options.output_dir, locale,
'LC_MESSAGES',
options.domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
else:
po_files.append((options.locale, options.output_file))
domain = options.domain
if not domain:
domain = os.path.splitext(os.path.basename(options.input_file))[0]
infile = open(options.input_file, 'U')
try:
template = read_po(infile)
finally:
infile.close()
if not po_files:
parser.error('no message catalogs found')
if options.width and options.no_wrap:
parser.error("'--no-wrap' and '--width' are mutually exclusive.")
elif not options.width and not options.no_wrap:
options.width = 76
for locale, filename in po_files:
self.log.info('updating catalog %r based on %r', filename,
options.input_file)
infile = open(filename, 'U')
try:
catalog = read_po(infile, locale=locale, domain=domain)
finally:
infile.close()
catalog.update(template, options.no_fuzzy_matching)
tmpname = os.path.join(os.path.dirname(filename),
tempfile.gettempprefix() +
os.path.basename(filename))
tmpfile = open(tmpname, 'wb')
try:
try:
write_po(tmpfile, catalog,
ignore_obsolete=options.ignore_obsolete,
include_previous=options.previous,
width=options.width)
finally:
tmpfile.close()
except:
os.remove(tmpname)
raise
try:
os.rename(tmpname, filename)
except OSError:
# We're probably on Windows, which doesn't support atomic
# renames, at least not through Python
# If the error is in fact due to a permissions problem, that
# same error is going to be raised from one of the following
# operations
os.remove(filename)
shutil.copy(tmpname, filename)
os.remove(tmpname)
def main():
return CommandLineInterface().run(sys.argv)
def parse_mapping(fileobj, filename=None):
"""Parse an extraction method mapping from a file-like object.
>>> buf = BytesIO(b'''
... [extractors]
... custom = mypackage.module:myfunc
...
... # Python source files
... [python: **.py]
...
... # Genshi templates
... [genshi: **/templates/**.html]
... include_attrs =
... [genshi: **/templates/**.txt]
... template_class = genshi.template:TextTemplate
... encoding = latin-1
...
... # Some custom extractor
... [custom: **/custom/*.*]
... ''')
>>> method_map, options_map = parse_mapping(buf)
>>> len(method_map)
4
>>> method_map[0]
('**.py', 'python')
>>> options_map['**.py']
{}
>>> method_map[1]
('**/templates/**.html', 'genshi')
>>> options_map['**/templates/**.html']['include_attrs']
''
>>> method_map[2]
('**/templates/**.txt', 'genshi')
>>> options_map['**/templates/**.txt']['template_class']
'genshi.template:TextTemplate'
>>> options_map['**/templates/**.txt']['encoding']
'latin-1'
>>> method_map[3]
('**/custom/*.*', 'mypackage.module:myfunc')
>>> options_map['**/custom/*.*']
{}
:param fileobj: a readable file-like object containing the configuration
text to parse
:see: `extract_from_directory`
"""
extractors = {}
method_map = []
options_map = {}
parser = RawConfigParser()
parser._sections = odict(parser._sections) # We need ordered sections
parser.readfp(fileobj, filename)
for section in parser.sections():
if section == 'extractors':
extractors = dict(parser.items(section))
else:
method, pattern = [part.strip() for part in section.split(':', 1)]
method_map.append((pattern, method))
options_map[pattern] = dict(parser.items(section))
if extractors:
for idx, (pattern, method) in enumerate(method_map):
if method in extractors:
method = extractors[method]
method_map[idx] = (pattern, method)
return (method_map, options_map)
def parse_keywords(strings=[]):
"""Parse keywords specifications from the given list of strings.
>>> kw = parse_keywords(['_', 'dgettext:2', 'dngettext:2,3', 'pgettext:1c,2']).items()
>>> kw.sort()
>>> for keyword, indices in kw:
... print (keyword, indices)
('_', None)
('dgettext', (2,))
('dngettext', (2, 3))
('pgettext', ((1, 'c'), 2))
"""
keywords = {}
for string in strings:
if ':' in string:
funcname, indices = string.split(':')
else:
funcname, indices = string, None
if funcname not in keywords:
if indices:
inds = []
for x in indices.split(','):
if x[-1] == 'c':
inds.append((int(x[:-1]), 'c'))
else:
inds.append(int(x))
indices = tuple(inds)
keywords[funcname] = indices
return keywords
if __name__ == '__main__':
main()
|
mpl-2.0
|
tomkralidis/QGIS
|
python/plugins/db_manager/table_viewer.py
|
67
|
3659
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtWidgets import QTableView, QAbstractItemView, QApplication, QAction
from qgis.PyQt.QtGui import QKeySequence, QCursor, QClipboard
from qgis.utils import OverrideCursor
from .db_plugins.plugin import DbError, Table
from .dlg_db_error import DlgDbError
class TableViewer(QTableView):
def __init__(self, parent=None):
QTableView.__init__(self, parent)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.item = None
self.dirty = False
# allow copying results
copyAction = QAction(QApplication.translate("DBManagerPlugin", "Copy"), self)
self.addAction(copyAction)
copyAction.setShortcuts(QKeySequence.Copy)
copyAction.triggered.connect(self.copySelectedResults)
self._clear()
def refresh(self):
self.dirty = True
self.loadData(self.item)
def loadData(self, item):
if item == self.item and not self.dirty:
return
self._clear()
if item is None:
return
if isinstance(item, Table):
self._loadTableData(item)
else:
return
self.item = item
self.item.aboutToChange.connect(self.setDirty)
def setDirty(self, val=True):
self.dirty = val
def _clear(self):
if self.item is not None:
try:
self.item.aboutToChange.disconnect(self.setDirty)
except:
# do not raise any error if self.item was deleted
pass
self.item = None
self.dirty = False
# delete the old model
model = self.model()
self.setModel(None)
if model:
model.deleteLater()
def _loadTableData(self, table):
with OverrideCursor(Qt.WaitCursor):
try:
# set the new model
self.setModel(table.tableDataModel(self))
except DbError as e:
DlgDbError.showError(e, self)
else:
self.update()
def copySelectedResults(self):
if len(self.selectedIndexes()) <= 0:
return
model = self.model()
# convert to string using tab as separator
text = model.headerToString("\t")
for idx in self.selectionModel().selectedRows():
text += "\n" + model.rowToString(idx.row(), "\t")
QApplication.clipboard().setText(text, QClipboard.Selection)
QApplication.clipboard().setText(text, QClipboard.Clipboard)
|
gpl-2.0
|
kytvi2p/Sigil
|
3rdparty/python/Lib/encodings/cp862.py
|
272
|
33370
|
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP862.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp862',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x05d0, # HEBREW LETTER ALEF
0x0081: 0x05d1, # HEBREW LETTER BET
0x0082: 0x05d2, # HEBREW LETTER GIMEL
0x0083: 0x05d3, # HEBREW LETTER DALET
0x0084: 0x05d4, # HEBREW LETTER HE
0x0085: 0x05d5, # HEBREW LETTER VAV
0x0086: 0x05d6, # HEBREW LETTER ZAYIN
0x0087: 0x05d7, # HEBREW LETTER HET
0x0088: 0x05d8, # HEBREW LETTER TET
0x0089: 0x05d9, # HEBREW LETTER YOD
0x008a: 0x05da, # HEBREW LETTER FINAL KAF
0x008b: 0x05db, # HEBREW LETTER KAF
0x008c: 0x05dc, # HEBREW LETTER LAMED
0x008d: 0x05dd, # HEBREW LETTER FINAL MEM
0x008e: 0x05de, # HEBREW LETTER MEM
0x008f: 0x05df, # HEBREW LETTER FINAL NUN
0x0090: 0x05e0, # HEBREW LETTER NUN
0x0091: 0x05e1, # HEBREW LETTER SAMEKH
0x0092: 0x05e2, # HEBREW LETTER AYIN
0x0093: 0x05e3, # HEBREW LETTER FINAL PE
0x0094: 0x05e4, # HEBREW LETTER PE
0x0095: 0x05e5, # HEBREW LETTER FINAL TSADI
0x0096: 0x05e6, # HEBREW LETTER TSADI
0x0097: 0x05e7, # HEBREW LETTER QOF
0x0098: 0x05e8, # HEBREW LETTER RESH
0x0099: 0x05e9, # HEBREW LETTER SHIN
0x009a: 0x05ea, # HEBREW LETTER TAV
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00a5, # YEN SIGN
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\u05d0' # 0x0080 -> HEBREW LETTER ALEF
'\u05d1' # 0x0081 -> HEBREW LETTER BET
'\u05d2' # 0x0082 -> HEBREW LETTER GIMEL
'\u05d3' # 0x0083 -> HEBREW LETTER DALET
'\u05d4' # 0x0084 -> HEBREW LETTER HE
'\u05d5' # 0x0085 -> HEBREW LETTER VAV
'\u05d6' # 0x0086 -> HEBREW LETTER ZAYIN
'\u05d7' # 0x0087 -> HEBREW LETTER HET
'\u05d8' # 0x0088 -> HEBREW LETTER TET
'\u05d9' # 0x0089 -> HEBREW LETTER YOD
'\u05da' # 0x008a -> HEBREW LETTER FINAL KAF
'\u05db' # 0x008b -> HEBREW LETTER KAF
'\u05dc' # 0x008c -> HEBREW LETTER LAMED
'\u05dd' # 0x008d -> HEBREW LETTER FINAL MEM
'\u05de' # 0x008e -> HEBREW LETTER MEM
'\u05df' # 0x008f -> HEBREW LETTER FINAL NUN
'\u05e0' # 0x0090 -> HEBREW LETTER NUN
'\u05e1' # 0x0091 -> HEBREW LETTER SAMEKH
'\u05e2' # 0x0092 -> HEBREW LETTER AYIN
'\u05e3' # 0x0093 -> HEBREW LETTER FINAL PE
'\u05e4' # 0x0094 -> HEBREW LETTER PE
'\u05e5' # 0x0095 -> HEBREW LETTER FINAL TSADI
'\u05e6' # 0x0096 -> HEBREW LETTER TSADI
'\u05e7' # 0x0097 -> HEBREW LETTER QOF
'\u05e8' # 0x0098 -> HEBREW LETTER RESH
'\u05e9' # 0x0099 -> HEBREW LETTER SHIN
'\u05ea' # 0x009a -> HEBREW LETTER TAV
'\xa2' # 0x009b -> CENT SIGN
'\xa3' # 0x009c -> POUND SIGN
'\xa5' # 0x009d -> YEN SIGN
'\u20a7' # 0x009e -> PESETA SIGN
'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
'\u2310' # 0x00a9 -> REVERSED NOT SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN)
'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
'\xb5' # 0x00e6 -> MICRO SIGN
'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
'\u221e' # 0x00ec -> INFINITY
'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
'\u2229' # 0x00ef -> INTERSECTION
'\u2261' # 0x00f0 -> IDENTICAL TO
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a5: 0x009d, # YEN SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f7: 0x00f6, # DIVISION SIGN
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x05d0: 0x0080, # HEBREW LETTER ALEF
0x05d1: 0x0081, # HEBREW LETTER BET
0x05d2: 0x0082, # HEBREW LETTER GIMEL
0x05d3: 0x0083, # HEBREW LETTER DALET
0x05d4: 0x0084, # HEBREW LETTER HE
0x05d5: 0x0085, # HEBREW LETTER VAV
0x05d6: 0x0086, # HEBREW LETTER ZAYIN
0x05d7: 0x0087, # HEBREW LETTER HET
0x05d8: 0x0088, # HEBREW LETTER TET
0x05d9: 0x0089, # HEBREW LETTER YOD
0x05da: 0x008a, # HEBREW LETTER FINAL KAF
0x05db: 0x008b, # HEBREW LETTER KAF
0x05dc: 0x008c, # HEBREW LETTER LAMED
0x05dd: 0x008d, # HEBREW LETTER FINAL MEM
0x05de: 0x008e, # HEBREW LETTER MEM
0x05df: 0x008f, # HEBREW LETTER FINAL NUN
0x05e0: 0x0090, # HEBREW LETTER NUN
0x05e1: 0x0091, # HEBREW LETTER SAMEKH
0x05e2: 0x0092, # HEBREW LETTER AYIN
0x05e3: 0x0093, # HEBREW LETTER FINAL PE
0x05e4: 0x0094, # HEBREW LETTER PE
0x05e5: 0x0095, # HEBREW LETTER FINAL TSADI
0x05e6: 0x0096, # HEBREW LETTER TSADI
0x05e7: 0x0097, # HEBREW LETTER QOF
0x05e8: 0x0098, # HEBREW LETTER RESH
0x05e9: 0x0099, # HEBREW LETTER SHIN
0x05ea: 0x009a, # HEBREW LETTER TAV
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
gpl-3.0
|
stargaser/astropy
|
astropy/stats/info_theory.py
|
3
|
14921
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple functions for model selection.
"""
import numpy as np
__all__ = ['bayesian_info_criterion', 'bayesian_info_criterion_lsq',
'akaike_info_criterion', 'akaike_info_criterion_lsq']
__doctest_requires__ = {'bayesian_info_criterion_lsq': ['scipy'],
'akaike_info_criterion_lsq': ['scipy']}
def bayesian_info_criterion(log_likelihood, n_params, n_samples):
r""" Computes the Bayesian Information Criterion (BIC) given the log of the
likelihood function evaluated at the estimated (or analytically derived)
parameters, the number of parameters, and the number of samples.
The BIC is usually applied to decide whether increasing the number of free
parameters (hence, increasing the model complexity) yields significantly
better fittings. The decision is in favor of the model with the lowest
BIC.
BIC is given as
.. math::
\mathrm{BIC} = k \ln(n) - 2L,
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters, and :math:`L` is the log likelihood function of the model
evaluated at the maximum likelihood estimate (i. e., the parameters for
which L is maximized).
When comparing two models define
:math:`\Delta \mathrm{BIC} = \mathrm{BIC}_h - \mathrm{BIC}_l`, in which
:math:`\mathrm{BIC}_h` is the higher BIC, and :math:`\mathrm{BIC}_l` is
the lower BIC. The higher is :math:`\Delta \mathrm{BIC}` the stronger is
the evidence against the model with higher BIC.
The general rule of thumb is:
:math:`0 < \Delta\mathrm{BIC} \leq 2`: weak evidence that model low is
better
:math:`2 < \Delta\mathrm{BIC} \leq 6`: moderate evidence that model low is
better
:math:`6 < \Delta\mathrm{BIC} \leq 10`: strong evidence that model low is
better
:math:`\Delta\mathrm{BIC} > 10`: very strong evidence that model low is
better
For a detailed explanation, see [1]_ - [5]_.
Parameters
----------
log_likelihood : float
Logarithm of the likelihood function of the model evaluated at the
point of maxima (with respect to the parameter space).
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
bic : float
Bayesian Information Criterion.
Examples
--------
The following example was originally presented in [1]_. Consider a
Gaussian model (mu, sigma) and a t-Student model (mu, sigma, delta).
In addition, assume that the t model has presented a higher likelihood.
The question that the BIC is proposed to answer is: "Is the increase in
likelihood due to larger number of parameters?"
>>> from astropy.stats.info_theory import bayesian_info_criterion
>>> lnL_g = -176.4
>>> lnL_t = -173.0
>>> n_params_g = 2
>>> n_params_t = 3
>>> n_samples = 100
>>> bic_g = bayesian_info_criterion(lnL_g, n_params_g, n_samples)
>>> bic_t = bayesian_info_criterion(lnL_t, n_params_t, n_samples)
>>> bic_g - bic_t # doctest: +FLOAT_CMP
2.1948298140119391
Therefore, there exist a moderate evidence that the increasing in
likelihood for t-Student model is due to the larger number of parameters.
References
----------
.. [1] Richards, D. Maximum Likelihood Estimation and the Bayesian
Information Criterion.
<https://hea-www.harvard.edu/astrostat/Stat310_0910/dr_20100323_mle.pdf>
.. [2] Wikipedia. Bayesian Information Criterion.
<https://en.wikipedia.org/wiki/Bayesian_information_criterion>
.. [3] Origin Lab. Comparing Two Fitting Functions.
<https://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [4] Liddle, A. R. Information Criteria for Astrophysical Model
Selection. 2008. <https://arxiv.org/pdf/astro-ph/0701113v2.pdf>
.. [5] Liddle, A. R. How many cosmological parameters? 2008.
<https://arxiv.org/pdf/astro-ph/0401198v3.pdf>
"""
return n_params*np.log(n_samples) - 2.0*log_likelihood
def bayesian_info_criterion_lsq(ssr, n_params, n_samples):
r"""
Computes the Bayesian Information Criterion (BIC) assuming that the
observations come from a Gaussian distribution.
In this case, BIC is given as
.. math::
\mathrm{BIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + k\ln(n)
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters and :math:`\mathrm{SSR}` stands for the sum of squared residuals
between model and data.
This is applicable, for instance, when the parameters of a model are
estimated using the least squares statistic. See [1]_ and [2]_.
Parameters
----------
ssr : float
Sum of squared residuals (SSR) between model and data.
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
bic : float
Examples
--------
Consider the simple 1-D fitting example presented in the Astropy
modeling webpage [3]_. There, two models (Box and Gaussian) were fitted to
a source flux using the least squares statistic. However, the fittings
themselves do not tell much about which model better represents this
hypothetical source. Therefore, we are going to apply to BIC in order to
decide in favor of a model.
>>> import numpy as np
>>> from astropy.modeling import models, fitting
>>> from astropy.stats.info_theory import bayesian_info_criterion_lsq
>>> # Generate fake data
>>> np.random.seed(0)
>>> x = np.linspace(-5., 5., 200)
>>> y = 3 * np.exp(-0.5 * (x - 1.3)**2 / 0.8**2)
>>> y += np.random.normal(0., 0.2, x.shape)
>>> # Fit the data using a Box model.
>>> # Bounds are not really needed but included here to demonstrate usage.
>>> t_init = models.Trapezoid1D(amplitude=1., x_0=0., width=1., slope=0.5,
... bounds={"x_0": (-5., 5.)})
>>> fit_t = fitting.LevMarLSQFitter()
>>> t = fit_t(t_init, x, y)
>>> # Fit the data using a Gaussian
>>> g_init = models.Gaussian1D(amplitude=1., mean=0, stddev=1.)
>>> fit_g = fitting.LevMarLSQFitter()
>>> g = fit_g(g_init, x, y)
>>> # Compute the mean squared errors
>>> ssr_t = np.sum((t(x) - y)*(t(x) - y))
>>> ssr_g = np.sum((g(x) - y)*(g(x) - y))
>>> # Compute the bics
>>> bic_t = bayesian_info_criterion_lsq(ssr_t, 4, x.shape[0])
>>> bic_g = bayesian_info_criterion_lsq(ssr_g, 3, x.shape[0])
>>> bic_t - bic_g # doctest: +FLOAT_CMP
30.644474706065466
Hence, there is a very strong evidence that the Gaussian model has a
significantly better representation of the data than the Box model. This
is, obviously, expected since the true model is Gaussian.
References
----------
.. [1] Wikipedia. Bayesian Information Criterion.
<https://en.wikipedia.org/wiki/Bayesian_information_criterion>
.. [2] Origin Lab. Comparing Two Fitting Functions.
<https://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [3] Astropy Models and Fitting
<http://docs.astropy.org/en/stable/modeling>
"""
return bayesian_info_criterion(-0.5 * n_samples * np.log(ssr / n_samples),
n_params, n_samples)
def akaike_info_criterion(log_likelihood, n_params, n_samples):
r"""
Computes the Akaike Information Criterion (AIC).
Like the Bayesian Information Criterion, the AIC is a measure of
relative fitting quality which is used for fitting evaluation and model
selection. The decision is in favor of the model with the lowest AIC.
AIC is given as
.. math::
\mathrm{AIC} = 2(k - L)
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters, and :math:`L` is the log likelihood function of the model
evaluated at the maximum likelihood estimate (i. e., the parameters for
which L is maximized).
In case that the sample size is not "large enough" a correction is
applied, i.e.
.. math::
\mathrm{AIC} = 2(k - L) + \dfrac{2k(k+1)}{n - k - 1}
Rule of thumb [1]_:
:math:`\Delta\mathrm{AIC}_i = \mathrm{AIC}_i - \mathrm{AIC}_{min}`
:math:`\Delta\mathrm{AIC}_i < 2`: substantial support for model i
:math:`3 < \Delta\mathrm{AIC}_i < 7`: considerably less support for model i
:math:`\Delta\mathrm{AIC}_i > 10`: essentially none support for model i
in which :math:`\mathrm{AIC}_{min}` stands for the lower AIC among the
models which are being compared.
For detailed explanations see [1]_-[6]_.
Parameters
----------
log_likelihood : float
Logarithm of the likelihood function of the model evaluated at the
point of maxima (with respect to the parameter space).
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
aic : float
Akaike Information Criterion.
Examples
--------
The following example was originally presented in [2]_. Basically, two
models are being compared. One with six parameters (model 1) and another
with five parameters (model 2). Despite of the fact that model 2 has a
lower AIC, we could decide in favor of model 1 since the difference (in
AIC) between them is only about 1.0.
>>> n_samples = 121
>>> lnL1 = -3.54
>>> n1_params = 6
>>> lnL2 = -4.17
>>> n2_params = 5
>>> aic1 = akaike_info_criterion(lnL1, n1_params, n_samples)
>>> aic2 = akaike_info_criterion(lnL2, n2_params, n_samples)
>>> aic1 - aic2 # doctest: +FLOAT_CMP
0.9551029748283746
Therefore, we can strongly support the model 1 with the advantage that
it has more free parameters.
References
----------
.. [1] Cavanaugh, J. E. Model Selection Lecture II: The Akaike
Information Criterion.
<http://machinelearning102.pbworks.com/w/file/fetch/47699383/ms_lec_2_ho.pdf>
.. [2] Mazerolle, M. J. Making sense out of Akaike's Information
Criterion (AIC): its use and interpretation in model selection and
inference from ecological data.
<https://corpus.ulaval.ca/jspui/handle/20.500.11794/17461>
.. [3] Wikipedia. Akaike Information Criterion.
<https://en.wikipedia.org/wiki/Akaike_information_criterion>
.. [4] Origin Lab. Comparing Two Fitting Functions.
<https://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [5] Liddle, A. R. Information Criteria for Astrophysical Model
Selection. 2008. <https://arxiv.org/pdf/astro-ph/0701113v2.pdf>
.. [6] Liddle, A. R. How many cosmological parameters? 2008.
<https://arxiv.org/pdf/astro-ph/0401198v3.pdf>
"""
# Correction in case of small number of observations
if n_samples/float(n_params) >= 40.0:
aic = 2.0 * (n_params - log_likelihood)
else:
aic = (2.0 * (n_params - log_likelihood) +
2.0 * n_params * (n_params + 1.0) /
(n_samples - n_params - 1.0))
return aic
def akaike_info_criterion_lsq(ssr, n_params, n_samples):
r"""
Computes the Akaike Information Criterion assuming that the observations
are Gaussian distributed.
In this case, AIC is given as
.. math::
\mathrm{AIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + 2k
In case that the sample size is not "large enough", a correction is
applied, i.e.
.. math::
\mathrm{AIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + 2k +
\dfrac{2k(k+1)}{n-k-1}
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters and :math:`\mathrm{SSR}` stands for the sum of squared residuals
between model and data.
This is applicable, for instance, when the parameters of a model are
estimated using the least squares statistic.
Parameters
----------
ssr : float
Sum of squared residuals (SSR) between model and data.
n_params : int
Number of free parameters of the model, i.e., the dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
aic : float
Akaike Information Criterion.
Examples
--------
This example is based on Astropy Modeling webpage, Compound models
section.
>>> import numpy as np
>>> from astropy.modeling import models, fitting
>>> from astropy.stats.info_theory import akaike_info_criterion_lsq
>>> np.random.seed(42)
>>> # Generate fake data
>>> g1 = models.Gaussian1D(.1, 0, 0.2) # changed this to noise level
>>> g2 = models.Gaussian1D(.1, 0.3, 0.2) # and added another Gaussian
>>> g3 = models.Gaussian1D(2.5, 0.5, 0.1)
>>> x = np.linspace(-1, 1, 200)
>>> y = g1(x) + g2(x) + g3(x) + np.random.normal(0., 0.2, x.shape)
>>> # Fit with three Gaussians
>>> g3_init = (models.Gaussian1D(.1, 0, 0.1)
... + models.Gaussian1D(.1, 0.2, 0.15)
... + models.Gaussian1D(2.4, .4, 0.1))
>>> fitter = fitting.LevMarLSQFitter()
>>> g3_fit = fitter(g3_init, x, y)
>>> # Fit with two Gaussians
>>> g2_init = (models.Gaussian1D(.1, 0, 0.1) +
... models.Gaussian1D(2, 0.5, 0.1))
>>> g2_fit = fitter(g2_init, x, y)
>>> # Fit with only one Gaussian
>>> g1_init = models.Gaussian1D(amplitude=2., mean=0.3, stddev=.5)
>>> g1_fit = fitter(g1_init, x, y)
>>> # Compute the mean squared errors
>>> ssr_g3 = np.sum((g3_fit(x) - y)**2.0)
>>> ssr_g2 = np.sum((g2_fit(x) - y)**2.0)
>>> ssr_g1 = np.sum((g1_fit(x) - y)**2.0)
>>> akaike_info_criterion_lsq(ssr_g3, 9, x.shape[0]) # doctest: +FLOAT_CMP
-634.5257517810961
>>> akaike_info_criterion_lsq(ssr_g2, 6, x.shape[0]) # doctest: +FLOAT_CMP
-662.83834510232043
>>> akaike_info_criterion_lsq(ssr_g1, 3, x.shape[0]) # doctest: +FLOAT_CMP
-647.47312032659499
Hence, from the AIC values, we would prefer to choose the model g2_fit.
However, we can considerably support the model g3_fit, since the
difference in AIC is about 2.4. We should reject the model g1_fit.
References
----------
.. [1] Akaike Information Criterion.
<https://en.wikipedia.org/wiki/Akaike_information_criterion>
.. [2] Origin Lab. Comparing Two Fitting Functions.
<https://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
"""
return akaike_info_criterion(-0.5 * n_samples * np.log(ssr / n_samples),
n_params, n_samples)
|
bsd-3-clause
|
richardnpaul/FWL-Website
|
lib/python2.7/site-packages/django/contrib/staticfiles/utils.py
|
322
|
1973
|
import os
import fnmatch
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
def matches_patterns(path, patterns=None):
"""
Return True or False depending on whether the ``path`` should be
ignored (if it matches any pattern in ``ignore_patterns``).
"""
if patterns is None:
patterns = []
for pattern in patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
def get_files(storage, ignore_patterns=None, location=''):
"""
Recursively walk the storage directories yielding the paths
of all files that should be copied.
"""
if ignore_patterns is None:
ignore_patterns = []
directories, files = storage.listdir(location)
for fn in files:
if matches_patterns(fn, ignore_patterns):
continue
if location:
fn = os.path.join(location, fn)
yield fn
for dir in directories:
if matches_patterns(dir, ignore_patterns):
continue
if location:
dir = os.path.join(location, dir)
for fn in get_files(storage, ignore_patterns, dir):
yield fn
def check_settings(base_url=None):
"""
Checks if the staticfiles settings have sane values.
"""
if base_url is None:
base_url = settings.STATIC_URL
if not base_url:
raise ImproperlyConfigured(
"You're using the staticfiles app "
"without having set the required STATIC_URL setting.")
if settings.MEDIA_URL == base_url:
raise ImproperlyConfigured("The MEDIA_URL and STATIC_URL "
"settings must have different values")
if ((settings.MEDIA_ROOT and settings.STATIC_ROOT) and
(settings.MEDIA_ROOT == settings.STATIC_ROOT)):
raise ImproperlyConfigured("The MEDIA_ROOT and STATIC_ROOT "
"settings must have different values")
|
gpl-3.0
|
FFMG/myoddweb.piger
|
monitor/api/python/Python-3.7.2/Lib/ctypes/test/test_unaligned_structures.py
|
282
|
1215
|
import sys, unittest
from ctypes import *
structures = []
byteswapped_structures = []
if sys.byteorder == "little":
SwappedStructure = BigEndianStructure
else:
SwappedStructure = LittleEndianStructure
for typ in [c_short, c_int, c_long, c_longlong,
c_float, c_double,
c_ushort, c_uint, c_ulong, c_ulonglong]:
class X(Structure):
_pack_ = 1
_fields_ = [("pad", c_byte),
("value", typ)]
class Y(SwappedStructure):
_pack_ = 1
_fields_ = [("pad", c_byte),
("value", typ)]
structures.append(X)
byteswapped_structures.append(Y)
class TestStructures(unittest.TestCase):
def test_native(self):
for typ in structures:
## print typ.value
self.assertEqual(typ.value.offset, 1)
o = typ()
o.value = 4
self.assertEqual(o.value, 4)
def test_swapped(self):
for typ in byteswapped_structures:
## print >> sys.stderr, typ.value
self.assertEqual(typ.value.offset, 1)
o = typ()
o.value = 4
self.assertEqual(o.value, 4)
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
|
Aakash1312/appleseed
|
src/appleseed.python/__init__.py
|
3
|
1430
|
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2012-2013 Esteban Tovagliari, Jupiter Jazz Limited
# Copyright (c) 2014-2017 Esteban Tovagliari, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from _appleseedpython import *
from logtarget import *
|
mit
|
vitaly-krugl/nupic
|
tests/integration/nupic/opf/opf_checkpoint_test/experiments/backwards_compatibility/base.py
|
20
|
15012
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2012-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
VERBOSITY = 0
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': {
'days': 0,
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0,
'fields': [(u'c1', 'first'), (u'c0', 'first')],
},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : VERBOSITY,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'c0_timeOfDay': { 'fieldname': u'c0',
'name': u'c0_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
u'c0_dayOfWeek': { 'dayOfWeek': (21, 1),
'fieldname': u'c0',
'name': u'c0_dayOfWeek',
'type': 'DateEncoder'},
u'c0_weekend': { 'fieldname': u'c0',
'name': u'c0_weekend',
'type': 'DateEncoder',
'weekend': 21},
u'c1': { 'clipInput': True,
'fieldname': u'c1',
'n': 100,
'name': u'c1',
'type': 'AdaptiveScalarEncoder',
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : VERBOSITY,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : VERBOSITY,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '24',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
dataPath = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data.csv'))
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupicengine/cluster/database/StreamDef.json.
#
'dataset' : { 'aggregation': config['aggregationInfo'],
u'info': u'82b42f21-7f86-47b3-bab4-3738703bf612',
u'streams': [ { u'columns': [u'c0', u'c1'],
u'info': u'82b42f21-7f86-47b3-bab4-3738703bf612',
u'source': 'file://%s' % (dataPath),
u'first_record': config['firstRecord'],
u'last_record': config['lastRecord'],
u'types': [u'datetime', u'float']}],
u'timeField': u'c0',
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'c1', u'predictionSteps': [24]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'c1', metric='multiStep', inferenceElement='multiStepBestPredictions', params={'window': 1000, 'steps': [24], 'errorMetric': 'altMAPE'}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
|
agpl-3.0
|
magicrub/MissionPlanner
|
Lib/encodings/cp865.py
|
93
|
35572
|
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP865.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp865',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00a4, # CURRENCY SIGN
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
u'\xa3' # 0x009c -> POUND SIGN
u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
u'\u20a7' # 0x009e -> PESETA SIGN
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\u2310' # 0x00a9 -> REVERSED NOT SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xa4' # 0x00af -> CURRENCY SIGN
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
u'\u221e' # 0x00ec -> INFINITY
u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
u'\u2229' # 0x00ef -> INTERSECTION
u'\u2261' # 0x00f0 -> IDENTICAL TO
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x00af, # CURRENCY SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
gpl-3.0
|
eric-stanley/youtube-dl
|
youtube_dl/extractor/r7.py
|
130
|
3674
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
js_to_json,
unescapeHTML,
int_or_none,
)
class R7IE(InfoExtractor):
_VALID_URL = r'''(?x)https?://
(?:
(?:[a-zA-Z]+)\.r7\.com(?:/[^/]+)+/idmedia/|
noticias\.r7\.com(?:/[^/]+)+/[^/]+-|
player\.r7\.com/video/i/
)
(?P<id>[\da-f]{24})
'''
_TESTS = [{
'url': 'http://videos.r7.com/policiais-humilham-suspeito-a-beira-da-morte-morre-com-dignidade-/idmedia/54e7050b0cf2ff57e0279389.html',
'md5': '403c4e393617e8e8ddc748978ee8efde',
'info_dict': {
'id': '54e7050b0cf2ff57e0279389',
'ext': 'mp4',
'title': 'Policiais humilham suspeito à beira da morte: "Morre com dignidade"',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 98,
'like_count': int,
'view_count': int,
},
}, {
'url': 'http://esportes.r7.com/videos/cigano-manda-recado-aos-fas/idmedia/4e176727b51a048ee6646a1b.html',
'only_matching': True,
}, {
'url': 'http://noticias.r7.com/record-news/video/representante-do-instituto-sou-da-paz-fala-sobre-fim-do-estatuto-do-desarmamento-5480fc580cf2285b117f438d/',
'only_matching': True,
}, {
'url': 'http://player.r7.com/video/i/54e7050b0cf2ff57e0279389?play=true&video=http://vsh.r7.com/54e7050b0cf2ff57e0279389/ER7_RE_BG_MORTE_JOVENS_570kbps_2015-02-2009f17818-cc82-4c8f-86dc-89a66934e633-ATOS_copy.mp4&linkCallback=http://videos.r7.com/policiais-humilham-suspeito-a-beira-da-morte-morre-com-dignidade-/idmedia/54e7050b0cf2ff57e0279389.html&thumbnail=http://vtb.r7.com/ER7_RE_BG_MORTE_JOVENS_570kbps_2015-02-2009f17818-cc82-4c8f-86dc-89a66934e633-thumb.jpg&idCategory=192&share=true&layout=full&full=true',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://player.r7.com/video/i/%s' % video_id, video_id)
item = self._parse_json(js_to_json(self._search_regex(
r'(?s)var\s+item\s*=\s*({.+?});', webpage, 'player')), video_id)
title = unescapeHTML(item['title'])
thumbnail = item.get('init', {}).get('thumbUri')
duration = None
statistics = item.get('statistics', {})
like_count = int_or_none(statistics.get('likes'))
view_count = int_or_none(statistics.get('views'))
formats = []
for format_key, format_dict in item['playlist'][0].items():
src = format_dict.get('src')
if not src:
continue
format_id = format_dict.get('format') or format_key
if duration is None:
duration = format_dict.get('duration')
if '.f4m' in src:
formats.extend(self._extract_f4m_formats(src, video_id, preference=-1))
elif src.endswith('.m3u8'):
formats.extend(self._extract_m3u8_formats(src, video_id, 'mp4', preference=-2))
else:
formats.append({
'url': src,
'format_id': format_id,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
'like_count': like_count,
'view_count': view_count,
'formats': formats,
}
|
unlicense
|
ravi-sharma/python-api-library
|
src/kayako/objects/ticket/ticket_enums.py
|
4
|
6936
|
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2011, Evan Leis
#
# Distributed under the terms of the Lesser GNU General Public License (LGPL)
#-----------------------------------------------------------------------------
'''
Created on Jun 8, 2011
@author: evan
'''
from lxml import etree
from kayako.core.object import KayakoObject
class TicketPriority(KayakoObject):
'''
Kayako TicketPriority API Object.
id
title
displayorder
frcolorcode
bgcolorcode
displayicon
type
uservisibilitycustom
usergroupid
'''
controller = '/Tickets/TicketPriority'
__parameters__ = [
'id',
'title',
'displayorder',
'frcolorcode',
'bgcolorcode',
'displayicon',
'type',
'uservisibilitycustom',
'usergroupid',
]
@classmethod
def _parse_ticket_priority(cls, ticket_priority_tree):
params = dict(
id=cls._get_int(ticket_priority_tree.find('id')),
title=cls._get_string(ticket_priority_tree.find('title')),
displayorder=cls._get_int(ticket_priority_tree.find('displayorder')),
frcolorcode=cls._get_string(ticket_priority_tree.find('frcolorcode')),
bgcolorcode=cls._get_string(ticket_priority_tree.find('bgcolorcode')),
displayicon=cls._get_string(ticket_priority_tree.find('displayicon')),
type=cls._get_string(ticket_priority_tree.find('type')),
uservisibilitycustom=cls._get_boolean(ticket_priority_tree.find('uservisibilitycustom')),
usergroupid=cls._get_int(ticket_priority_tree.find('usergroupid'), required=False),
)
return params
@classmethod
def get_all(cls, api):
response = api._request(cls.controller, 'GET')
tree = etree.parse(response)
return [TicketPriority(api, **cls._parse_ticket_priority(ticket_priority_tree)) for ticket_priority_tree in tree.findall('ticketpriority')]
@classmethod
def get(cls, api, id):
response = api._request('%s/%s/' % (cls.controller, id), 'GET')
tree = etree.parse(response)
node = tree.find('ticketpriority')
if node is None:
return None
params = cls._parse_ticket_priority(tree.find('ticketpriority'))
return TicketPriority(api, **params)
def __str__(self):
return '<TicketPriority (%s): %s>' % (self.id, self.title)
class TicketStatus(KayakoObject):
'''
Kayako TicketStatus API Object.
id
title
displayorder
departmentid
displayicon
type
displayinmainlist
markasresolved
displaycount
statuscolor
statusbgcolor
resetduetime
triggersurvey
staffvisibilitycustom
'''
controller = '/Tickets/TicketStatus'
__parameters__ = [
'id',
'title',
'displayorder',
'departmentid',
'displayicon',
'type',
'displayinmainlist',
'markasresolved',
'displaycount',
'statuscolor',
'statusbgcolor',
'resetduetime',
'triggersurvey',
'staffvisibilitycustom',
]
@classmethod
def _parse_ticket_status(cls, ticket_status_tree):
params = dict(
id=cls._get_int(ticket_status_tree.find('id')),
title=cls._get_string(ticket_status_tree.find('title')),
displayorder=cls._get_int(ticket_status_tree.find('displayorder')),
departmentid=cls._get_int(ticket_status_tree.find('departmentid')),
displayicon=cls._get_string(ticket_status_tree.find('displayicon')),
type=cls._get_string(ticket_status_tree.find('type')),
displayinmainlist=cls._get_boolean(ticket_status_tree.find('displayinmainlist')),
markasresolved=cls._get_boolean(ticket_status_tree.find('markasresolved')),
displaycount=cls._get_int(ticket_status_tree.find('displaycount')),
statuscolor=cls._get_string(ticket_status_tree.find('statuscolor')),
statusbgcolor=cls._get_string(ticket_status_tree.find('statusbgcolor')),
resetduetime=cls._get_boolean(ticket_status_tree.find('resetduetime')),
triggersurvey=cls._get_boolean(ticket_status_tree.find('triggersurvey')),
staffvisibilitycustom=cls._get_boolean(ticket_status_tree.find('staffvisibilitycustom')),
)
return params
@classmethod
def get_all(cls, api):
response = api._request(cls.controller, 'GET')
tree = etree.parse(response)
return [TicketStatus(api, **cls._parse_ticket_status(ticket_status_tree)) for ticket_status_tree in tree.findall('ticketstatus')]
@classmethod
def get(cls, api, id):
response = api._request('%s/%s/' % (cls.controller, id), 'GET')
tree = etree.parse(response)
node = tree.find('ticketstatus')
if node is None:
return None
params = cls._parse_ticket_status(node)
return TicketStatus(api, **params)
def __str__(self):
return '<TicketStatus (%s): %s>' % (self.id, self.title)
class TicketType(KayakoObject):
'''
Kayako TicketType API Object.
id
title
displayorder
departmentid
displayicon
type
uservisibilitycustom
'''
controller = '/Tickets/TicketType'
__parameters__ = [
'id',
'title',
'displayorder',
'departmentid',
'displayicon',
'type',
'uservisibilitycustom',
]
@classmethod
def _parse_ticket_type(cls, ticket_type_tree):
params = dict(
id=cls._get_int(ticket_type_tree.find('id')),
title=cls._get_string(ticket_type_tree.find('title')),
displayorder=cls._get_int(ticket_type_tree.find('displayorder')),
departmentid=cls._get_int(ticket_type_tree.find('departmentid')),
displayicon=cls._get_string(ticket_type_tree.find('displayicon')),
type=cls._get_string(ticket_type_tree.find('type')),
uservisibilitycustom=cls._get_boolean(ticket_type_tree.find('uservisibilitycustom')),
)
return params
@classmethod
def get_all(cls, api):
response = api._request(cls.controller, 'GET')
tree = etree.parse(response)
return [TicketType(api, **cls._parse_ticket_type(ticket_type_tree)) for ticket_type_tree in tree.findall('tickettype')]
@classmethod
def get(cls, api, id):
response = api._request('%s/%s/' % (cls.controller, id), 'GET')
tree = etree.parse(response)
node = tree.find('tickettype')
if node is None:
return None
params = cls._parse_ticket_type(node)
return TicketType(api, **params)
def __str__(self):
return '<TicketType (%s): %s>' % (self.id, self.title)
|
bsd-2-clause
|
mehanig/scrapi
|
tests/test_helpers.py
|
5
|
7171
|
import vcr
import mock
import pytest
from scrapi import requests
from scrapi.base import helpers
@pytest.fixture(autouse=True)
def mock_maybe_load_response(monkeypatch):
mock_mlr = mock.Mock()
mock_mlr.return_value = None
mock_save = lambda x: x
monkeypatch.setattr(requests, '_maybe_load_response', mock_mlr)
monkeypatch.setattr(requests.HarvesterResponse, 'save', mock_save)
class TestHelpers(object):
def test_format_one_tag(self):
single_tag = ' A single tag '
single_output = helpers.format_tags(single_tag)
assert single_output == ['a single tag']
assert isinstance(single_output, list)
def test_format_many_tags(self):
many_tags = [' A', 'Bunch', ' oftags ']
many_output = helpers.format_tags(many_tags)
assert set(many_output) == set(['a', 'bunch', 'oftags'])
def test_format_sep_tags(self):
sep_tags = ['These, we know', 'should be many']
sep_output = helpers.format_tags(sep_tags, sep=',')
assert set(sep_output) == set(['these', 'we know', 'should be many'])
def test_extract_dois(self):
identifiers = ['doi: THIS_IS_A_DOI!', 'http://dx.doi.org/andalsothis', 'doi:doi:thistoook']
valid_dois = helpers.oai_extract_dois(identifiers)
assert valid_dois == [
'http://dx.doi.org/THIS_IS_A_DOI!',
'http://dx.doi.org/andalsothis',
'http://dx.doi.org/thistoook'
]
def oai_process_uris(self):
identifiers = ['I might be a url but rly I am naaaahhttt']
with pytest.raises(ValueError):
helpers.oai_extract_url(identifiers)
def test_extract_uris(self):
identifiers = ['doi:10.whateverwhatever', 'http://alloutofbubblegum.com',
'http://viewcontent.cgi/iamacoolpdf', 'http://GETTHETABLES.com',
'Vahedifard, F. et al. (2013). G??otechnique 63, No. 6, 451???462 [http://dx.doi.org/10.1680/geot.11.P.130] ',
'I am a bunch of text but I also have a doi:10.10.thisisarealdoi']
uri_dict = helpers.oai_process_uris(identifiers)
assert uri_dict == {
'canonicalUri': 'http://alloutofbubblegum.com',
'objectUris': ['http://dx.doi.org/10.whateverwhatever',
'http://dx.doi.org/10.1680/geot.11.P.130',
'http://dx.doi.org/10.10.thisisarealdoi',
'http://viewcontent.cgi/iamacoolpdf'],
'providerUris': ['http://alloutofbubblegum.com', 'http://GETTHETABLES.com']
}
def test_extract_uris_use_doi(self):
identifiers = ['doi:10.whateverwhatever', 'http://alloutofbubblegum.com',
'http://viewcontent.cgi/iamacoolpdf', 'http://GETTHETABLES.com',
'Vahedifard, F. et al. (2013). G??otechnique 63, No. 6, 451???462 [http://dx.doi.org/10.1680/geot.11.P.130] ',
'I am a bunch of text but I also have a doi:10.10.thisisarealdoi']
uri_dict = helpers.oai_process_uris(identifiers, use_doi=True)
assert uri_dict == {
'canonicalUri': 'http://dx.doi.org/10.10.thisisarealdoi',
'objectUris': ['http://dx.doi.org/10.whateverwhatever',
'http://dx.doi.org/10.1680/geot.11.P.130',
'http://dx.doi.org/10.10.thisisarealdoi',
'http://viewcontent.cgi/iamacoolpdf'],
'providerUris': ['http://alloutofbubblegum.com', 'http://GETTHETABLES.com']
}
def test_process_contributors(self):
args = ['Stardust Rhodes', 'Golddust Rhodes', 'Dusty Rhodes']
response = helpers.oai_process_contributors(args)
assert isinstance(response, list)
@vcr.use_cassette('tests/vcr/asu.yaml')
def test_oai_get_records_and_token(self):
url = 'http://repository.asu.edu/oai-pmh?verb=ListRecords&metadataPrefix=oai_dc&from=2015-03-10&until=2015-03-11'
force = False
verify = True
throttle = 0.5
namespaces = {
'dc': 'http://purl.org/dc/elements/1.1/',
'ns0': 'http://www.openarchives.org/OAI/2.0/',
'oai_dc': 'http://www.openarchives.org/OAI/2.0/',
}
records, token = helpers.oai_get_records_and_token(url, throttle, force, namespaces, verify)
assert records
assert token
assert len(records) == 50
def test_extract_doi_from_text(self):
text = ["""
Ryder, Z., & Dudley, B. R. (2014). Methods of WOO WOO WOO and D3 comming atcha by a
Continuous Flow Microreactor. Crystal Growth & Design, 14(9),
4759-4767. doi:10.1021/woowoowoo yep yep yep what he do"""]
extracted_doi = helpers.extract_doi_from_text(text)
assert extracted_doi == 'http://dx.doi.org/10.1021/woowoowoo'
def test_gather_identifiers(self):
identifiers = [['doi:10.whateverwhatever',
'http://viewcontent.cgi/iamacoolpdf'],
'451???462 [http://dx.doi.org/10.1680/geot.11.P.130]',
'I am a bunch of text but I also have a doi:10.10.thisisarealdoi',
['http://bubbaray.com', 'http://devon.net']]
gathered = helpers.gather_identifiers(identifiers)
assert gathered == ['doi:10.whateverwhatever',
'http://viewcontent.cgi/iamacoolpdf',
'451???462 [http://dx.doi.org/10.1680/geot.11.P.130]',
'I am a bunch of text but I also have a doi:10.10.thisisarealdoi',
'http://bubbaray.com',
'http://devon.net']
def test_gather_object_uris(self):
identifiers = ['doi:10.whateverwhatever',
'http://viewcontent.cgi/iamacoolpdf',
'451???462 [http://dx.doi.org/10.1680/geot.11.P.130]',
'I am a bunch of text but I also have a doi:10.10.thisisarealdoi',
'http://bubbaray.com',
'http://devon.net']
object_uris = helpers.gather_object_uris(identifiers)
assert object_uris == [
'http://dx.doi.org/10.whateverwhatever',
'http://dx.doi.org/10.1680/geot.11.P.130',
'http://dx.doi.org/10.10.thisisarealdoi'
]
def test_seperate_provider_object_uris(self):
identifiers = [
'http://dx.doi.org/10.whateverwhatever',
'http://cgi.viewcontent.apdf.pdf',
'http://get_the_tables.net'
]
provider_uris, object_uris = helpers.seperate_provider_object_uris(identifiers)
assert provider_uris == ['http://get_the_tables.net']
assert object_uris == ['http://dx.doi.org/10.whateverwhatever', 'http://cgi.viewcontent.apdf.pdf']
def test_format_doi_as_url(self):
doi1 = ' doi:10.dudleyzrule '
doi2 = 'DOI:10.getthetables '
assert helpers.format_doi_as_url(doi1) == 'http://dx.doi.org/10.dudleyzrule'
assert helpers.format_doi_as_url(doi2) == 'http://dx.doi.org/10.getthetables'
|
apache-2.0
|
tdtrask/ansible
|
lib/ansible/modules/monitoring/icinga2_host.py
|
19
|
10096
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This module is proudly sponsored by CGI (www.cgi.com) and
# KPN (www.kpn.com).
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: icinga2_host
short_description: Manage a host in Icinga2
description:
- "Add or remove a host to Icinga2 through the API."
- "See U(https://www.icinga.com/docs/icinga2/latest/doc/12-icinga2-api/)"
version_added: "2.5"
author: "Jurgen Brand (@t794104)"
options:
url:
description:
- HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
required: true
use_proxy:
description:
- If C(no), it will not use a proxy, even if one is defined in
an environment variable on the target hosts.
default: 'yes'
type: bool
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
default: 'yes'
type: bool
url_username:
description:
- The username for use in HTTP basic authentication.
- This parameter can be used without C(url_password) for sites that allow empty passwords.
url_password:
description:
- The password for use in HTTP basic authentication.
- If the C(url_username) parameter is not specified, the C(url_password) parameter will not be used.
force_basic_auth:
description:
- httplib2, the library used by the uri module only sends authentication information when a webservice
responds to an initial request with a 401 status. Since some basic auth services do not properly
send a 401, logins will fail. This option forces the sending of the Basic authentication header
upon initial request.
default: 'no'
type: bool
client_cert:
description:
- PEM formatted certificate chain file to be used for SSL client
authentication. This file can also include the key as well, and if
the key is included, C(client_key) is not required.
client_key:
description:
- PEM formatted file that contains your private key to be used for SSL
client authentication. If C(client_cert) contains both the certificate
and key, this option is not required.
state:
description:
- Apply feature state.
required: false
choices: [ "present", "absent" ]
default: present
name:
description:
- Name used to create / delete the host. This does not need to be the FQDN, but does needs to be unique.
required: true
zone:
description:
- The zone from where this host should be polled.
template:
description:
- The template used to define the host.
- Template cannot be modified after object creation.
required: false
default: None
check_command:
description:
- The command used to check if the host is alive.
required: false
default: "hostalive"
display_name:
description:
- The name used to display the host.
required: false
default: if none is give it is the value of the <name> parameter
ip:
description:
- The IP address of the host.
required: true
variables:
description:
- List of variables.
required: false
default: None
'''
EXAMPLES = '''
- name: Add host to icinga
icinga_host:
url: "https://icinga2.example.co,m"
url_username: "ansible"
url_password: "a_secret"
state: present
name: "{{ ansible_fqdn }}"
ip: "{{ ansible_default_ipv4.address }}"
delegate_to: 127.0.0.1
'''
RETURN = '''
name:
description: The name used to create, modify or delete the host
type: string
returned: always
data:
description: The data structure used for create, modify or delete of the host
type: dict
returned: always
'''
import json
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url, url_argument_spec
# ===========================================
# Icinga2 API class
#
class icinga2_api:
module = None
def call_url(self, path, data='', method='GET'):
headers = {
'Accept': 'application/json',
'X-HTTP-Method-Override': method,
}
url = self.module.params.get("url") + "/" + path
rsp, info = fetch_url(module=self.module, url=url, data=data, headers=headers, method=method)
body = ''
if rsp:
body = json.loads(rsp.read())
if info['status'] >= 400:
body = info['body']
return {'code': info['status'], 'data': body}
def check_connection(self):
ret = self.call_url('v1/status')
if ret['code'] == 200:
return True
return False
def exists(self, hostname):
data = {
"filter": "match(\"" + hostname + "\", host.name)",
}
ret = self.call_url(
path="v1/objects/hosts",
data=self.module.jsonify(data)
)
if ret['code'] == 200:
if len(ret['data']['results']) == 1:
return True
return False
def create(self, hostname, data):
ret = self.call_url(
path="v1/objects/hosts/" + hostname,
data=self.module.jsonify(data),
method="PUT"
)
return ret
def delete(self, hostname):
data = {"cascade": 1}
ret = self.call_url(
path="v1/objects/hosts/" + hostname,
data=self.module.jsonify(data),
method="DELETE"
)
return ret
def modify(self, hostname, data):
ret = self.call_url(
path="v1/objects/hosts/" + hostname,
data=self.module.jsonify(data),
method="POST"
)
return ret
def diff(self, hostname, data):
ret = self.call_url(
path="v1/objects/hosts/" + hostname,
method="GET"
)
changed = False
ic_data = ret['data']['results'][0]
for key in data['attrs']:
if key not in ic_data['attrs'].keys():
changed = True
elif data['attrs'][key] != ic_data['attrs'][key]:
changed = True
return changed
# ===========================================
# Module execution.
#
def main():
# use the predefined argument spec for url
argument_spec = url_argument_spec()
# remove unnecessary argument 'force'
del argument_spec['force']
# add our own arguments
argument_spec.update(
state=dict(default="present", choices=["absent", "present"]),
name=dict(required=True, aliases=['host']),
zone=dict(),
template=dict(default=None),
check_command=dict(default="hostalive"),
display_name=dict(default=None),
ip=dict(required=True),
variables=dict(type='dict', default=None),
)
# Define the main module
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
state = module.params["state"]
name = module.params["name"]
zone = module.params["zone"]
template = []
template.append(name)
if module.params["template"]:
template.append(module.params["template"])
check_command = module.params["check_command"]
ip = module.params["ip"]
display_name = module.params["display_name"]
if not display_name:
display_name = name
variables = module.params["variables"]
try:
icinga = icinga2_api()
icinga.module = module
icinga.check_connection()
except Exception as e:
module.fail_json(msg="unable to connect to Icinga. Exception message: %s" % (e))
data = {
'attrs': {
'address': ip,
'display_name': display_name,
'check_command': check_command,
'zone': zone,
'vars': {
'made_by': "ansible",
},
'templates': template,
}
}
if variables:
data['attrs']['vars'].update(variables)
changed = False
if icinga.exists(name):
if state == "absent":
if module.check_mode:
module.exit_json(changed=True, name=name, data=data)
else:
try:
ret = icinga.delete(name)
if ret['code'] == 200:
changed = True
else:
module.fail_json(msg="bad return code deleting host: %s" % (ret['data']))
except Exception as e:
module.fail_json(msg="exception deleting host: " + str(e))
elif icinga.diff(name, data):
if module.check_mode:
module.exit_json(changed=False, name=name, data=data)
# Template attribute is not allowed in modification
del data['attrs']['templates']
ret = icinga.modify(name, data)
if ret['code'] == 200:
changed = True
else:
module.fail_json(msg="bad return code modifying host: %s" % (ret['data']))
else:
if state == "present":
if module.check_mode:
changed = True
else:
try:
ret = icinga.create(name, data)
if ret['code'] == 200:
changed = True
else:
module.fail_json(msg="bad return code creating host: %s" % (ret['data']))
except Exception as e:
module.fail_json(msg="exception creating host: " + str(e))
module.exit_json(changed=changed, name=name, data=data)
# import module snippets
if __name__ == '__main__':
main()
|
gpl-3.0
|
sachinpro/sachinpro.github.io
|
tensorflow/python/framework/importer_test.py
|
6
|
28589
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.importer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.core.framework import op_def_pb2
from tensorflow.python.framework import device
from tensorflow.python.framework import op_def_registry
_op_list = op_def_pb2.OpList()
text_format.Merge("""
op {
name: 'None'
}
op {
name: 'Oi'
output_arg { name: 'a' type: DT_INT32 }
}
op {
name: 'Or'
output_arg { name: 'a' type: DT_INT32 is_ref: true }
}
op {
name: 'Of'
output_arg { name: 'a' type: DT_FLOAT }
}
op {
name: 'Ii'
input_arg { name: 'a' type: DT_INT32 }
}
op {
name: 'If'
input_arg { name: 'a' type: DT_FLOAT }
}
op {
name: 'Oii'
output_arg { name: 'a' type: DT_INT32 }
output_arg { name: 'b' type: DT_INT32 }
}
op {
name: 'Oif'
output_arg { name: 'a' type: DT_INT32 }
output_arg { name: 'b' type: DT_FLOAT }
}
op {
name: 'Iii'
input_arg { name: 'a' type: DT_INT32 }
input_arg { name: 'b' type: DT_INT32 }
}
op {
name: 'Iff'
input_arg { name: 'a' type: DT_FLOAT }
input_arg { name: 'b' type: DT_FLOAT }
}
op {
name: 'Iif'
input_arg { name: 'a' type: DT_INT32 }
input_arg { name: 'b' type: DT_FLOAT }
}
op {
name: 'Iri'
input_arg { name: 'a' type: DT_INT32 is_ref: true }
input_arg { name: 'b' type: DT_INT32 }
}
op {
name: 'In'
input_arg { name: 'a' number_attr: 'N' type_attr: 'T' }
attr { name: 'N' type: 'int' minimum: 1 }
attr { name: 'T' type: 'type' }
}
op {
name: 'Otl'
output_arg { name: 'a' type_list_attr: 't' }
attr { name: 'T' type: 'list(type)' minimum: 1 }
}
op {
name: 'Unary'
input_arg { name: 'a' type_attr: 'T' }
output_arg { name: 'b' type_attr: 'T' }
attr { name: 'T' type: 'type' }
}
op {
name: 'OpWithDefaultAttr'
output_arg { name: 'a' type: DT_INT32 }
attr { name: 'default_float' type: 'float' default_value { f: 123.0 } }
}
op {
name: 'OpWithFutureDefaultAttr'
}
""", _op_list)
op_def_registry.register_op_list(_op_list)
# NOTE(mrry): Dummy shape registrations for ops used in the tests.
for op_def in _op_list.op:
tf.RegisterShape(op_def.name)(None)
class ImportGraphDefTest(tf.test.TestCase):
def _MakeGraphDef(self, text, producer=tf.GRAPH_DEF_VERSION,
min_consumer=tf.GRAPH_DEF_VERSION_MIN_CONSUMER):
text = "versions: { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, text)
ret = tf.GraphDef()
text_format.Merge(text, ret)
return ret
def testBasic(self):
with tf.Graph().as_default():
a, b, c, d = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oif' }
node { name: 'B' op: 'Otl'
attr { key: 't'
value { list { type: DT_INT32 type: DT_FLOAT } } } }
node { name: 'C' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_FLOAT } }
input: 'A:1' input: 'B:1' }
"""),
return_elements=["A", "B", "C", "D"],
name="import")
# Assert that the import process creates distinct tensors.
self.assertNotEqual(a.outputs[0].name, a.outputs[1].name)
self.assertNotEqual(b.outputs[0].name, b.outputs[1].name)
self.assertNotEqual(a.outputs[0].name, b.outputs[0].name)
self.assertNotEqual(a.outputs[0].name, b.outputs[1].name)
self.assertNotEqual(a.outputs[1].name, b.outputs[0].name)
self.assertNotEqual(a.outputs[1].name, b.outputs[1].name)
# Assert that the ops are connected according to the GraphDef topology.
self.assertEqual(c.inputs[0], a.outputs[0])
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], b.outputs[1])
# Check the types of the returned ops and tensors.
self.assertEqual(a.type, "Oif")
self.assertEqual(b.type, "Otl")
self.assertEqual(c.type, "In")
self.assertEqual(d.type, "In")
self.assertEqual(a.outputs[0].dtype, tf.int32)
self.assertEqual(a.outputs[1].dtype, tf.float32)
self.assertEqual(b.outputs[0].dtype, tf.int32)
self.assertEqual(b.outputs[1].dtype, tf.float32)
# Check the names of the returned ops.
self.assertEqual(a.name, "import/A")
self.assertEqual(b.name, "import/B")
self.assertEqual(c.name, "import/C")
self.assertEqual(d.name, "import/D")
# Check that the op_def is still available.
self.assertNotEqual(None, a.op_def)
def testInputMap(self):
with tf.Graph().as_default():
feed_a_0 = tf.constant(0, dtype=tf.int32)
feed_b_1 = tf.constant(1, dtype=tf.int32)
a, b, c, d = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Oii' }
node { name: 'C' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={"A:0": feed_a_0, "B:1": feed_b_1},
return_elements=["A", "B", "C", "D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testInputMapBytes(self):
with tf.Graph().as_default():
feed_a_0 = tf.constant(0, dtype=tf.int32)
feed_b_1 = tf.constant(1, dtype=tf.int32)
a, b, c, d = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Oii' }
node { name: 'C' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={b"A:0": feed_a_0, b"B:1": feed_b_1},
return_elements=[b"A", b"B", b"C", b"D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testInputMapUnicode(self):
with tf.Graph().as_default():
feed_a_0 = tf.constant(0, dtype=tf.int32)
feed_b_1 = tf.constant(1, dtype=tf.int32)
a, b, c, d = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Oii' }
node { name: 'C' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'In'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={u"A:0": feed_a_0, u"B:1": feed_b_1},
return_elements=[u"A", u"B", u"C", u"D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testImplicitZerothOutput(self):
with tf.Graph().as_default():
a, b = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Ii' input: 'A' }
"""),
return_elements=["A", "B"])
self.assertEqual(b.inputs[0], a.outputs[0])
def testInputMapImplicitZerothOutput(self):
with tf.Graph().as_default():
feed_a_0 = tf.constant(0, dtype=tf.int32)
b, = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oii' }
node { name: 'B' op: 'Ii' input: 'A:0' }
"""),
input_map={"A": feed_a_0},
return_elements=["B"])
self.assertEqual(b.inputs[0], feed_a_0)
def testWithControlDependency(self):
with tf.Graph().as_default():
a, b = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' input: '^A' }
"""),
return_elements=["A", "B"])
self.assertEqual(b.control_inputs, [a])
def testWithRefs(self):
with tf.Graph().as_default():
a, b, c, d = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Or' }
node { name: 'B' op: 'Oi' }
node { name: 'C' op: 'Iii' input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'Iri' input: 'A:0' input: 'B:0' }
"""),
return_elements=["A", "B", "C", "D"])
self.assertEqual(c.inputs[0], a.outputs[0])
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[0])
self.assertEqual(d.inputs[1], b.outputs[0])
self.assertEqual(a.outputs[0].dtype, tf.int32_ref)
self.assertEqual(c._input_dtypes, [tf.int32, tf.int32])
self.assertEqual(c.outputs, [])
self.assertEqual(d._input_dtypes,
[tf.int32_ref, tf.int32])
self.assertEqual(d.outputs, [])
def testCyclic(self):
with tf.Graph().as_default():
a, b = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Unary'
attr { key: 'T' value { type: DT_INT32 } } input: 'B:0' }
node { name: 'B' op: 'Unary'
attr { key: 'T' value { type: DT_INT32 } } input: 'A:0' }
"""),
return_elements=["A", "B"])
self.assertEqual(a.inputs[0], b.outputs[0])
self.assertEqual(b.inputs[0], a.outputs[0])
def testTypeMismatchInGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
node { name: 'B' op: 'If' input: 'A:0' }
"""))
self.assertTrue(
"Cannot convert a tensor of type int32 to an input of type float" in
str(e.exception))
def testInvalidSignatureTooManyInputsInGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
node { name: 'B' op: 'None' input: 'A:0' }
"""))
self.assertTrue("More inputs specified ('A:0') than the op expects" in
str(e.exception))
def testInvalidSignatureNotEnoughInputsInGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
node { name: 'B' op: 'Iif' input: 'A:0' }
"""))
self.assertTrue("Input types mismatch (expected 'int32, float32' but "
"got 'int32')" in str(e.exception))
def testMissingInputOpInGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'If' input: 'A:0' }
"""))
self.assertTrue("Input tensor 'A:0' not found" in str(e.exception))
def testMissingInputOpInGraphDefButAppearsInInputMap(self):
with tf.Graph().as_default():
feed_a_0 = tf.constant(5.0)
b, = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'If' input: 'A:0' }
"""),
input_map={"A:0": feed_a_0},
return_elements=["B"])
self.assertEqual(b.inputs[0], feed_a_0)
def testMissingInputTensorInGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Of' }
node { name: 'B' op: 'If' input: 'A:1' }
"""))
self.assertTrue("Input tensor 'A:1' not found" in str(e.exception))
def testMissingControlInputInGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: '^A' }
"""))
self.assertTrue("Control input '^A' not found" in str(e.exception))
def testInvalidTensorNameOutputIndexInGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: 'A:B' }
"""))
self.assertEqual("Cannot convert 'A:B' to a tensor name.",
str(e.exception))
def testInvalidTensorNameInGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: 'A:B:0' }
"""))
self.assertEqual("Cannot convert 'A:B:0' to a tensor name.",
str(e.exception))
def testMissingReturnOperation(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=["B"])
self.assertTrue("return_element 'B' not found in graph_def." in
str(e.exception))
def testMissingReturnTensor(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
"""),
return_elements=["A:1"])
self.assertTrue("return_element 'A:1' not found in graph_def." in
str(e.exception))
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
"""),
return_elements=["B:0"])
self.assertTrue("return_element 'B:0' not found in graph_def." in
str(e.exception))
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
"""),
return_elements=["A:B:0"])
self.assertTrue("return_element 'A:B:0' not found in graph_def." in
str(e.exception))
def testMissingInputMap(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
input_map={"B:0": tf.constant(5.0)})
self.assertTrue("not found in graph_def: [B:0]" in str(e.exception))
def testInputMapTypeMismatch(self):
with tf.Graph().as_default():
with self.assertRaises(ValueError) as e:
tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Oi' }
node { name: 'B' op: 'Ii' input: 'A:0' }
"""),
input_map={"A:0": tf.constant(5.0)})
self.assertTrue(
"Cannot convert a tensor of type float32 to an input of type int32."
in str(e.exception))
def testNoReturns(self):
with tf.Graph().as_default() as g:
ret = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""))
self.assertEqual(ret, None)
a = g.get_operation_by_name("import/A")
self.assertEqual(a.type, "None")
def testOverrideNamePrefix(self):
with tf.Graph().as_default():
a, = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=["A"], name="imported_graph")
self.assertEqual(a.name, "imported_graph/A")
def testNamePrefixColocationAttrs(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with tf.Graph().as_default():
b, = tf.import_graph_def(original_graph_def,
return_elements=["B"], name="imported_graph")
self.assertProtoEqualsVersion("""
node { name: 'imported_graph/A' op: 'None' }
node { name: 'imported_graph/B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@imported_graph/A' } }
} }""", b.graph.as_graph_def())
def testNamePrefixColocationAttrsMultipleImport(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with tf.Graph().as_default():
b, = tf.import_graph_def(original_graph_def,
return_elements=["B"], name="")
_, = tf.import_graph_def(original_graph_def,
return_elements=["B"], name="")
self.assertProtoEqualsVersion("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }
node { name: 'A_1' op: 'None' }
node { name: 'B_1' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A_1' } }
} }""", b.graph.as_graph_def())
def testNamePrefixColocationAttrsNotFound(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with tf.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "does not exist during import"):
tf.import_graph_def(original_graph_def,
return_elements=["B"], name="imported_graph")
def testEmptyGraph(self):
with tf.Graph().as_default() as g:
init_version = g.version
tf.import_graph_def(self._MakeGraphDef(""))
self.assertEqual(init_version, g.version)
def testInvalidInputForGraphDef(self):
with tf.Graph().as_default():
with self.assertRaises(TypeError) as e:
tf.import_graph_def("")
self.assertEqual(
"graph_def must be a GraphDef proto.", str(e.exception))
def testInvalidInputForInputMap(self):
with tf.Graph().as_default():
with self.assertRaises(TypeError) as e:
tf.import_graph_def(self._MakeGraphDef(""),
input_map=[tf.constant(5.0)])
self.assertEqual("input_map must be a dictionary mapping strings to "
"Tensor objects.", str(e.exception))
def testInvalidInputForReturnOperations(self):
with tf.Graph().as_default():
with self.assertRaises(TypeError) as e:
tf.import_graph_def(self._MakeGraphDef(""), return_elements=[7])
self.assertEqual(
"return_elements must be a list of strings.", str(e.exception))
def testWithExtensionAndAttr(self):
with tf.Graph().as_default() as g:
c = tf.constant(5.0, dtype=tf.float32, name="c")
tf.pack([c, c], name="pack")
gdef = g.as_graph_def()
with self.test_session():
pack, = tf.import_graph_def(gdef, return_elements=["pack"])
self.assertAllEqual(pack.outputs[0].eval(), [5.0, 5.0])
def testWithDevice(self):
with tf.Graph().as_default() as g:
# No device.
a = tf.constant(3.0, name="a")
with tf.device("/cpu:0"):
b = tf.constant(4.0, name="b")
with tf.device("/job:worker"):
c = tf.constant(5.0, name="c")
gdef = g.as_graph_def()
with tf.Graph().as_default():
a2, b2, c2 = tf.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual(a.device, a2.device)
self.assertEqual(b.device, b2.device)
self.assertEqual(c.device, c2.device)
with tf.Graph().as_default():
with tf.device(device.merge_device("/task:0")):
a3, b3, c3 = tf.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/task:0", a3.device)
self.assertEqual("/task:0/device:CPU:0", b3.device) # canonicalized.
self.assertEqual(c.device + "/task:0", c3.device)
with tf.Graph().as_default():
with tf.device(device.merge_device("/job:ps")):
a4, b4, c4 = tf.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/job:ps", a4.device)
self.assertEqual("/job:ps/device:CPU:0", b4.device) # canonicalized.
self.assertEqual(c.device, c4.device) # worker overrides ps.
with tf.Graph().as_default():
with tf.device(device.merge_device("/gpu:0")):
a5, b5, c5 = tf.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/device:GPU:0", a5.device)
self.assertEqual("/device:CPU:0", b5.device) # cpu overrides gpu.
self.assertEqual(c.device + "/device:GPU:0", c5.device)
def testWithDeviceFunctionDependingOnInputs(self):
with tf.Graph().as_default() as g:
with tf.device("/job:ps"):
v = tf.Variable(1.0)
unused_assign_op = v.assign(2.0)
unused_assign_2_op = v.assign(3.0)
unused_add_t = v + v
gdef = g.as_graph_def()
# We'll use the following device function to observe ops with two inputs.
ops_with_two_inputs = []
def input_counter(op):
if any(in_t.dtype.is_ref_dtype for in_t in op.inputs):
ops_with_two_inputs.append(op)
return ""
with tf.Graph().as_default() as g:
with tf.device(input_counter):
tf.import_graph_def(gdef)
# We expect to see the initializer, two assign operations, and the add op.
self.assertEqual(4, len(ops_with_two_inputs))
def testGradient(self):
with tf.Graph().as_default() as g:
inputs = tf.placeholder(tf.float32, shape=[None, 100], name="input")
weights = tf.placeholder(tf.float32, shape=[100, 10], name="weights")
biases = tf.placeholder(tf.float32, shape=[10], name="biases")
activations = tf.nn.relu(tf.matmul(inputs, weights) + biases,
name="activations")
loss = tf.reduce_mean(activations, name="loss")
gdef = g.as_graph_def()
with tf.Graph().as_default() as g:
input_placeholder = tf.placeholder(tf.float32, shape=[32, 100])
weights_var = tf.Variable(tf.truncated_normal([100, 10]), name="weights")
biases_var = tf.Variable(tf.zeros([10]), name="biases")
activations, loss = tf.import_graph_def(
gdef,
input_map={"input:0": input_placeholder,
"weights:0": weights_var,
"biases:0": biases_var},
return_elements=["activations:0", "loss:0"])
self.assertEqual([32, 10], activations.get_shape())
self.assertEqual([], loss.get_shape())
weights_grad, biases_grad = tf.gradients(loss, [weights_var, biases_var])
self.assertEqual([100, 10], weights_grad.get_shape())
self.assertEqual([10], biases_grad.get_shape())
def testLargeGraph(self):
with self.test_session():
# The default message byte limit is 64M. Ours is 2G with a warning at 512.
# Adding a 150M entries float32 tensor should blow through the warning,
# but not the hard limit.
input_shape = [150, 1024, 1024]
tensor_input = np.random.rand(*input_shape).astype(np.float32)
t = tf.constant(tensor_input, shape=input_shape)
g = tf.identity(t)
g.eval()
def testVersion(self):
v0 = tf.GRAPH_DEF_VERSION_MIN_CONSUMER
v2 = tf.GRAPH_DEF_VERSION
v1 = (v0 + v2) // 2
for producer in v0, v1, v2:
for min_consumer in v0, v1, v2:
with tf.Graph().as_default():
a, = tf.import_graph_def(
self._MakeGraphDef("node { name: 'A' op: 'Oii' }",
producer=producer, min_consumer=min_consumer),
return_elements=["A"])
self.assertEqual(a.graph.graph_def_versions.producer, producer)
self.assertEqual(a.graph.graph_def_versions.min_consumer,
min_consumer)
def testVersionLow(self):
with tf.Graph().as_default() as g:
pat = (r"GraphDef producer version -1 below min producer %d supported "
r"by TensorFlow \S+\. Please regenerate your graph.$" %
tf.GRAPH_DEF_VERSION_MIN_PRODUCER)
tf.import_graph_def(self._MakeGraphDef("", producer=-1))
x = tf.constant(7) # Need at least one op to get a C++ graph generated
with self.test_session(graph=g) as sess:
with self.assertRaisesRegexp(Exception, pat):
sess.run(x)
def testVersionHigh(self):
with tf.Graph().as_default() as g:
pat = (r"GraphDef min consumer version %d above current version %d "
r"for TensorFlow \S+\. Please upgrade TensorFlow\.$" %
(1 << 30, tf.GRAPH_DEF_VERSION))
tf.import_graph_def(self._MakeGraphDef("", min_consumer=1 << 30))
x = tf.constant(7) # Need at least one op to get a C++ graph generated
with self.test_session(graph=g) as sess:
with self.assertRaisesRegexp(Exception, pat):
sess.run(x)
def testDefaultAttrsAdded(self):
with tf.Graph().as_default():
a = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithDefaultAttr' }
"""),
return_elements=["A"])
self.assertEqual(123.0, a[0].get_attr("default_float"))
def testDefaultAttrsRemoved(self):
producer_op_list = op_def_pb2.OpList()
text_format.Merge("""
op {
name: 'OpWithFutureDefaultAttr'
attr { name: 'default_int' type: 'int' default_value { i: 456 } }
}
""", producer_op_list)
# Attr only in producer_op_list with default value gets removed.
with tf.Graph().as_default():
a = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithFutureDefaultAttr'
attr { key: 'default_int' value { i: 456 } } }
"""),
return_elements=["A"], producer_op_list=producer_op_list)
with self.assertRaisesRegexp(ValueError, "No attr named 'default_int'"):
a[0].get_attr("default_int")
# Attr only in producer_op_list with non-default value is preserved.
with tf.Graph().as_default():
a = tf.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithFutureDefaultAttr'
attr { key: 'default_int' value { i: 987 } } }
"""),
return_elements=["A"], producer_op_list=producer_op_list)
self.assertEqual(987, a[0].get_attr("default_int"))
if __name__ == "__main__":
tf.test.main()
|
apache-2.0
|
temasek/android_external_chromium_org_third_party_WebKit
|
Tools/Scripts/webkitpy/tool/commands/queries.py
|
49
|
9746
|
# Copyright (c) 2009 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
# Copyright (c) 2012 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import fnmatch
import logging
import re
from optparse import make_option
from webkitpy.common.system.crashlogs import CrashLogs
from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
from webkitpy.layout_tests.models.test_expectations import TestExpectations
from webkitpy.layout_tests.port import platform_options
_log = logging.getLogger(__name__)
class CrashLog(AbstractDeclarativeCommand):
name = "crash-log"
help_text = "Print the newest crash log for the given process"
show_in_main_help = True
long_help = """Finds the newest crash log matching the given process name
and PID and prints it to stdout."""
argument_names = "PROCESS_NAME [PID]"
def execute(self, options, args, tool):
crash_logs = CrashLogs(tool)
pid = None
if len(args) > 1:
pid = int(args[1])
print crash_logs.find_newest_log(args[0], pid)
class PrintExpectations(AbstractDeclarativeCommand):
name = 'print-expectations'
help_text = 'Print the expected result for the given test(s) on the given port(s)'
show_in_main_help = True
def __init__(self):
options = [
make_option('--all', action='store_true', default=False,
help='display the expectations for *all* tests'),
make_option('-x', '--exclude-keyword', action='append', default=[],
help='limit to tests not matching the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'),
make_option('-i', '--include-keyword', action='append', default=[],
help='limit to tests with the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'),
make_option('--csv', action='store_true', default=False,
help='Print a CSV-style report that includes the port name, bugs, specifiers, tests, and expectations'),
make_option('-f', '--full', action='store_true', default=False,
help='Print a full TestExpectations-style line for every match'),
make_option('--paths', action='store_true', default=False,
help='display the paths for all applicable expectation files'),
] + platform_options(use_globs=True)
AbstractDeclarativeCommand.__init__(self, options=options)
self._expectation_models = {}
def execute(self, options, args, tool):
if not options.paths and not args and not options.all:
print "You must either specify one or more test paths or --all."
return
if options.platform:
port_names = fnmatch.filter(tool.port_factory.all_port_names(), options.platform)
if not port_names:
default_port = tool.port_factory.get(options.platform)
if default_port:
port_names = [default_port.name()]
else:
print "No port names match '%s'" % options.platform
return
else:
default_port = tool.port_factory.get(port_names[0])
else:
default_port = tool.port_factory.get(options=options)
port_names = [default_port.name()]
if options.paths:
files = default_port.expectations_files()
layout_tests_dir = default_port.layout_tests_dir()
for file in files:
if file.startswith(layout_tests_dir):
file = file.replace(layout_tests_dir, 'LayoutTests')
print file
return
tests = set(default_port.tests(args))
for port_name in port_names:
model = self._model(options, port_name, tests)
tests_to_print = self._filter_tests(options, model, tests)
lines = [model.get_expectation_line(test) for test in sorted(tests_to_print)]
if port_name != port_names[0]:
print
print '\n'.join(self._format_lines(options, port_name, lines))
def _filter_tests(self, options, model, tests):
filtered_tests = set()
if options.include_keyword:
for keyword in options.include_keyword:
filtered_tests.update(model.get_test_set_for_keyword(keyword))
else:
filtered_tests = tests
for keyword in options.exclude_keyword:
filtered_tests.difference_update(model.get_test_set_for_keyword(keyword))
return filtered_tests
def _format_lines(self, options, port_name, lines):
output = []
if options.csv:
for line in lines:
output.append("%s,%s" % (port_name, line.to_csv()))
elif lines:
include_modifiers = options.full
include_expectations = options.full or len(options.include_keyword) != 1 or len(options.exclude_keyword)
output.append("// For %s" % port_name)
for line in lines:
output.append("%s" % line.to_string(None, include_modifiers, include_expectations, include_comment=False))
return output
def _model(self, options, port_name, tests):
port = self._tool.port_factory.get(port_name, options)
return TestExpectations(port, tests).model()
class PrintBaselines(AbstractDeclarativeCommand):
name = 'print-baselines'
help_text = 'Prints the baseline locations for given test(s) on the given port(s)'
show_in_main_help = True
def __init__(self):
options = [
make_option('--all', action='store_true', default=False,
help='display the baselines for *all* tests'),
make_option('--csv', action='store_true', default=False,
help='Print a CSV-style report that includes the port name, test_name, test platform, baseline type, baseline location, and baseline platform'),
make_option('--include-virtual-tests', action='store_true',
help='Include virtual tests'),
] + platform_options(use_globs=True)
AbstractDeclarativeCommand.__init__(self, options=options)
self._platform_regexp = re.compile('platform/([^\/]+)/(.+)')
def execute(self, options, args, tool):
if not args and not options.all:
print "You must either specify one or more test paths or --all."
return
default_port = tool.port_factory.get()
if options.platform:
port_names = fnmatch.filter(tool.port_factory.all_port_names(), options.platform)
if not port_names:
print "No port names match '%s'" % options.platform
else:
port_names = [default_port.name()]
if options.include_virtual_tests:
tests = sorted(default_port.tests(args))
else:
# FIXME: make real_tests() a public method.
tests = sorted(default_port._real_tests(args))
for port_name in port_names:
if port_name != port_names[0]:
print
if not options.csv:
print "// For %s" % port_name
port = tool.port_factory.get(port_name)
for test_name in tests:
self._print_baselines(options, port_name, test_name, port.expected_baselines_by_extension(test_name))
def _print_baselines(self, options, port_name, test_name, baselines):
for extension in sorted(baselines.keys()):
baseline_location = baselines[extension]
if baseline_location:
if options.csv:
print "%s,%s,%s,%s,%s,%s" % (port_name, test_name, self._platform_for_path(test_name),
extension[1:], baseline_location, self._platform_for_path(baseline_location))
else:
print baseline_location
def _platform_for_path(self, relpath):
platform_matchobj = self._platform_regexp.match(relpath)
if platform_matchobj:
return platform_matchobj.group(1)
return None
|
bsd-3-clause
|
DooMLoRD/android_kernel_sony_msm8974ab
|
Documentation/target/tcm_mod_builder.py
|
4981
|
41422
|
#!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
gpl-2.0
|
jasonwzhy/django
|
tests/admin_scripts/tests.py
|
37
|
91484
|
# -*- coding: utf-8 -*-
"""
A series of tests to establish that the command-line management tools work as
advertised - especially with regards to the handling of the
DJANGO_SETTINGS_MODULE and default settings.py files.
"""
from __future__ import unicode_literals
import codecs
import os
import re
import shutil
import socket
import subprocess
import sys
import tempfile
import unittest
import django
from django import conf, get_version
from django.conf import settings
from django.core.management import (
BaseCommand, CommandError, call_command, color,
)
from django.db import ConnectionHandler
from django.db.migrations.exceptions import MigrationSchemaMissing
from django.db.migrations.recorder import MigrationRecorder
from django.test import (
LiveServerTestCase, SimpleTestCase, mock, override_settings,
)
from django.test.runner import DiscoverRunner
from django.utils._os import npath, upath
from django.utils.encoding import force_text
from django.utils.six import PY3, StringIO
test_dir = os.path.realpath(os.path.join(tempfile.gettempdir(), 'test_project'))
if not os.path.exists(test_dir):
os.mkdir(test_dir)
open(os.path.join(test_dir, '__init__.py'), 'w').close()
custom_templates_dir = os.path.join(os.path.dirname(upath(__file__)), 'custom_templates')
SYSTEM_CHECK_MSG = 'System check identified no issues'
class AdminScriptTestCase(unittest.TestCase):
def write_settings(self, filename, apps=None, is_dir=False, sdict=None, extra=None):
if is_dir:
settings_dir = os.path.join(test_dir, filename)
os.mkdir(settings_dir)
settings_file_path = os.path.join(settings_dir, '__init__.py')
else:
settings_file_path = os.path.join(test_dir, filename)
with open(settings_file_path, 'w') as settings_file:
settings_file.write('# -*- coding: utf-8 -*\n')
settings_file.write('# Settings file automatically generated by admin_scripts test case\n')
if extra:
settings_file.write("%s\n" % extra)
exports = [
'DATABASES',
'ROOT_URLCONF',
'SECRET_KEY',
'TEST_RUNNER', # We need to include TEST_RUNNER, otherwise we get a compatibility warning.
'MIDDLEWARE_CLASSES', # We need to include MIDDLEWARE_CLASSES, otherwise we get a compatibility warning.
]
for s in exports:
if hasattr(settings, s):
o = getattr(settings, s)
if not isinstance(o, (dict, tuple, list)):
o = "'%s'" % o
settings_file.write("%s = %s\n" % (s, o))
if apps is None:
apps = ['django.contrib.auth', 'django.contrib.contenttypes', 'admin_scripts']
settings_file.write("INSTALLED_APPS = %s\n" % apps)
if sdict:
for k, v in sdict.items():
settings_file.write("%s = %s\n" % (k, v))
def remove_settings(self, filename, is_dir=False):
full_name = os.path.join(test_dir, filename)
if is_dir:
shutil.rmtree(full_name)
else:
os.remove(full_name)
# Also try to remove the compiled file; if it exists, it could
# mess up later tests that depend upon the .py file not existing
try:
if sys.platform.startswith('java'):
# Jython produces module$py.class files
os.remove(re.sub(r'\.py$', '$py.class', full_name))
else:
# CPython produces module.pyc files
os.remove(full_name + 'c')
except OSError:
pass
# Also remove a __pycache__ directory, if it exists
cache_name = os.path.join(test_dir, '__pycache__')
if os.path.isdir(cache_name):
shutil.rmtree(cache_name)
def _ext_backend_paths(self):
"""
Returns the paths for any external backend packages.
"""
paths = []
first_package_re = re.compile(r'(^[^\.]+)\.')
for backend in settings.DATABASES.values():
result = first_package_re.findall(backend['ENGINE'])
if result and result != ['django']:
backend_pkg = __import__(result[0])
backend_dir = os.path.dirname(backend_pkg.__file__)
paths.append(os.path.dirname(backend_dir))
return paths
def run_test(self, script, args, settings_file=None, apps=None):
base_dir = os.path.dirname(test_dir)
# The base dir for Django's tests is one level up.
tests_dir = os.path.dirname(os.path.dirname(upath(__file__)))
# The base dir for Django is one level above the test dir. We don't use
# `import django` to figure that out, so we don't pick up a Django
# from site-packages or similar.
django_dir = os.path.dirname(tests_dir)
ext_backend_base_dirs = self._ext_backend_paths()
# Define a temporary environment for the subprocess
test_environ = os.environ.copy()
if sys.platform.startswith('java'):
python_path_var_name = 'JYTHONPATH'
else:
python_path_var_name = 'PYTHONPATH'
old_cwd = os.getcwd()
# Set the test environment
if settings_file:
test_environ['DJANGO_SETTINGS_MODULE'] = str(settings_file)
elif 'DJANGO_SETTINGS_MODULE' in test_environ:
del test_environ['DJANGO_SETTINGS_MODULE']
python_path = [base_dir, django_dir, tests_dir]
python_path.extend(ext_backend_base_dirs)
# Use native strings for better compatibility
test_environ[str(python_path_var_name)] = npath(os.pathsep.join(python_path))
test_environ[str('PYTHONWARNINGS')] = str('')
# Move to the test directory and run
os.chdir(test_dir)
out, err = subprocess.Popen([sys.executable, script] + args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=test_environ, universal_newlines=True).communicate()
# Move back to the old working directory
os.chdir(old_cwd)
return out, err
def run_django_admin(self, args, settings_file=None):
script_dir = os.path.abspath(os.path.join(os.path.dirname(upath(django.__file__)), 'bin'))
return self.run_test(os.path.join(script_dir, 'django-admin.py'), args, settings_file)
def run_manage(self, args, settings_file=None):
def safe_remove(path):
try:
os.remove(path)
except OSError:
pass
conf_dir = os.path.dirname(upath(conf.__file__))
template_manage_py = os.path.join(conf_dir, 'project_template', 'manage.py')
test_manage_py = os.path.join(test_dir, 'manage.py')
shutil.copyfile(template_manage_py, test_manage_py)
with open(test_manage_py, 'r') as fp:
manage_py_contents = fp.read()
manage_py_contents = manage_py_contents.replace(
"{{ project_name }}", "test_project")
with open(test_manage_py, 'w') as fp:
fp.write(manage_py_contents)
self.addCleanup(safe_remove, test_manage_py)
return self.run_test('./manage.py', args, settings_file)
def assertNoOutput(self, stream):
"Utility assertion: assert that the given stream is empty"
self.assertEqual(len(stream), 0, "Stream should be empty: actually contains '%s'" % stream)
def assertOutput(self, stream, msg, regex=False):
"Utility assertion: assert that the given message exists in the output"
stream = force_text(stream)
if regex:
self.assertIsNotNone(re.search(msg, stream),
"'%s' does not match actual output text '%s'" % (msg, stream))
else:
self.assertIn(msg, stream, "'%s' does not match actual output text '%s'" % (msg, stream))
def assertNotInOutput(self, stream, msg):
"Utility assertion: assert that the given message doesn't exist in the output"
stream = force_text(stream)
self.assertNotIn(msg, stream, "'%s' matches actual output text '%s'" % (msg, stream))
##########################################################################
# DJANGO ADMIN TESTS
# This first series of test classes checks the environment processing
# of the django-admin.py script
##########################################################################
class DjangoAdminNoSettings(AdminScriptTestCase):
"A series of tests for django-admin.py when there is no settings.py file."
def test_builtin_command(self):
"no settings: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_bad_settings(self):
"no settings: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"no settings: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
class DjangoAdminDefaultSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings.py file that
contains the test application.
"""
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"default: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"default: django-admin builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"default: django-admin builtin commands succeed if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"default: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"default: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"default: django-admin can't execute user commands if it isn't provided settings"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No Django settings specified")
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"default: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"default: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class DjangoAdminFullPathDefaultSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings.py file that
contains the test application specified using a full path.
"""
def setUp(self):
self.write_settings('settings.py', ['django.contrib.auth', 'django.contrib.contenttypes',
'admin_scripts', 'admin_scripts.complex_app'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"fulldefault: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"fulldefault: django-admin builtin commands succeed if a settings file is provided"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"fulldefault: django-admin builtin commands succeed if the environment contains settings"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"fulldefault: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"fulldefault: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"fulldefault: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No Django settings specified")
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"fulldefault: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"fulldefault: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class DjangoAdminMinimalSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings.py file that
doesn't contain the test application.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"minimal: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"minimal: django-admin builtin commands fail if settings are provided as argument"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_environment(self):
"minimal: django-admin builtin commands fail if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_bad_settings(self):
"minimal: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"minimal: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"minimal: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No Django settings specified")
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"minimal: django-admin can't execute user commands, even if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_environment(self):
"minimal: django-admin can't execute user commands, even if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
class DjangoAdminAlternateSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings file
with a name other than 'settings.py'.
"""
def setUp(self):
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"alternate: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"alternate: django-admin builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=test_project.alternate_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"alternate: django-admin builtin commands succeed if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"alternate: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"alternate: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"alternate: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No Django settings specified")
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"alternate: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.alternate_settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"alternate: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class DjangoAdminMultipleSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when multiple settings files
(including the default 'settings.py') are available. The default settings
file is insufficient for performing the operations described, so the
alternate settings must be used by the running script.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('settings.py')
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"alternate: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_settings(self):
"alternate: django-admin builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=test_project.alternate_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"alternate: django-admin builtin commands succeed if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"alternate: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"alternate: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"alternate: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No Django settings specified")
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"alternate: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.alternate_settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"alternate: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args, 'test_project.alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class DjangoAdminSettingsDirectory(AdminScriptTestCase):
"""
A series of tests for django-admin.py when the settings file is in a
directory. (see #9751).
"""
def setUp(self):
self.write_settings('settings', is_dir=True)
def tearDown(self):
self.remove_settings('settings', is_dir=True)
def test_setup_environ(self):
"directory: startapp creates the correct directory"
args = ['startapp', 'settings_test']
app_path = os.path.join(test_dir, 'settings_test')
out, err = self.run_django_admin(args, 'test_project.settings')
self.addCleanup(shutil.rmtree, app_path)
self.assertNoOutput(err)
self.assertTrue(os.path.exists(app_path))
with open(os.path.join(app_path, 'apps.py'), 'r') as f:
content = f.read()
self.assertIn("class SettingsTestConfig(AppConfig)", content)
self.assertIn("name = 'settings_test'", content)
with open(os.path.join(app_path, '__init__.py'), 'r') as f:
content = f.read()
expected_content = "default_app_config = 'settings_test.apps.SettingsTestConfig'"
self.assertIn(expected_content, content)
if not PY3:
with open(os.path.join(app_path, 'models.py'), 'r') as fp:
content = fp.read()
self.assertIn(
"from __future__ import unicode_literals\n",
content,
)
def test_setup_environ_custom_template(self):
"directory: startapp creates the correct directory with a custom template"
template_path = os.path.join(custom_templates_dir, 'app_template')
args = ['startapp', '--template', template_path, 'custom_settings_test']
app_path = os.path.join(test_dir, 'custom_settings_test')
out, err = self.run_django_admin(args, 'test_project.settings')
self.addCleanup(shutil.rmtree, app_path)
self.assertNoOutput(err)
self.assertTrue(os.path.exists(app_path))
self.assertTrue(os.path.exists(os.path.join(app_path, 'api.py')))
def test_builtin_command(self):
"directory: django-admin builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'settings are not configured')
def test_builtin_with_bad_settings(self):
"directory: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"directory: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"directory: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No Django settings specified")
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_builtin_with_settings(self):
"directory: django-admin builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"directory: django-admin builtin commands succeed if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_django_admin(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
##########################################################################
# MANAGE.PY TESTS
# This next series of test classes checks the environment processing
# of the generated manage.py script
##########################################################################
class ManageNoSettings(AdminScriptTestCase):
"A series of tests for manage.py when there is no settings.py file."
def test_builtin_command(self):
"no settings: manage.py builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?(test_project\.)?settings'?", regex=True)
def test_builtin_with_bad_settings(self):
"no settings: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"no settings: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
class ManageDefaultSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
contains the test application.
"""
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"default: manage.py builtin commands succeed when default settings are appropriate"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_settings(self):
"default: manage.py builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"default: manage.py builtin commands succeed if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"default: manage.py builtin commands succeed if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"default: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"default: manage.py can execute user commands when default settings are appropriate"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_settings(self):
"default: manage.py can execute user commands when settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"default: manage.py can execute user commands when settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class ManageFullPathDefaultSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
contains the test application specified using a full path.
"""
def setUp(self):
self.write_settings('settings.py', ['django.contrib.auth', 'django.contrib.contenttypes', 'admin_scripts'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"fulldefault: manage.py builtin commands succeed when default settings are appropriate"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_settings(self):
"fulldefault: manage.py builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"fulldefault: manage.py builtin commands succeed if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"fulldefault: manage.py builtin commands succeed if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"fulldefault: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"fulldefault: manage.py can execute user commands when default settings are appropriate"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_settings(self):
"fulldefault: manage.py can execute user commands when settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"fulldefault: manage.py can execute user commands when settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class ManageMinimalSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
doesn't contain the test application.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"minimal: manage.py builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_settings(self):
"minimal: manage.py builtin commands fail if settings are provided as argument"
args = ['check', '--settings=test_project.settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_environment(self):
"minimal: manage.py builtin commands fail if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_bad_settings(self):
"minimal: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"minimal: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"minimal: manage.py can't execute user commands without appropriate settings"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"minimal: manage.py can't execute user commands, even if settings are provided as argument"
args = ['noargs_command', '--settings=test_project.settings']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_environment(self):
"minimal: manage.py can't execute user commands, even if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'test_project.settings')
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
class ManageAlternateSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings file
with a name other than 'settings.py'.
"""
def setUp(self):
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"alternate: manage.py builtin commands fail with an error when no default settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?(test_project\.)?settings'?", regex=True)
def test_builtin_with_settings(self):
"alternate: manage.py builtin commands work with settings provided as argument"
args = ['check', '--settings=alternate_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertOutput(out, SYSTEM_CHECK_MSG)
self.assertNoOutput(err)
def test_builtin_with_environment(self):
"alternate: manage.py builtin commands work if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'alternate_settings')
self.assertOutput(out, SYSTEM_CHECK_MSG)
self.assertNoOutput(err)
def test_builtin_with_bad_settings(self):
"alternate: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"alternate: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"alternate: manage.py can't execute user commands without settings"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?(test_project\.)?settings'?", regex=True)
def test_custom_command_with_settings(self):
"alternate: manage.py can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertOutput(out, "EXECUTE: noargs_command options=[('no_color', False), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', False), ('verbosity', 1)]")
self.assertNoOutput(err)
def test_custom_command_with_environment(self):
"alternate: manage.py can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'alternate_settings')
self.assertOutput(out, "EXECUTE: noargs_command options=[('no_color', False), ('pythonpath', None), ('settings', None), ('traceback', False), ('verbosity', 1)]")
self.assertNoOutput(err)
def test_custom_command_output_color(self):
"alternate: manage.py output syntax color can be deactivated with the `--no-color` option"
args = ['noargs_command', '--no-color', '--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertOutput(out, "EXECUTE: noargs_command options=[('no_color', True), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', False), ('verbosity', 1)]")
self.assertNoOutput(err)
class ManageMultipleSettings(AdminScriptTestCase):
"""A series of tests for manage.py when multiple settings files
(including the default 'settings.py') are available. The default settings
file is insufficient for performing the operations described, so the
alternate settings must be used by the running script.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('settings.py')
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"multiple: manage.py builtin commands fail with an error when no settings provided"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No installed app with label 'admin_scripts'.")
def test_builtin_with_settings(self):
"multiple: manage.py builtin commands succeed if settings are provided as argument"
args = ['check', '--settings=alternate_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"multiple: manage.py can execute builtin commands if settings are provided in the environment"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"multiple: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['check', '--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"multiple: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['check', 'admin_scripts']
out, err = self.run_manage(args, 'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"multiple: manage.py can't execute user commands using default settings"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"multiple: manage.py can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"multiple: manage.py can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args, 'alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
class ManageSettingsWithSettingsErrors(AdminScriptTestCase):
"""
Tests for manage.py when using the default settings.py file containing
runtime errors.
"""
def tearDown(self):
self.remove_settings('settings.py')
def write_settings_with_import_error(self, filename):
settings_file_path = os.path.join(test_dir, filename)
with open(settings_file_path, 'w') as settings_file:
settings_file.write('# Settings file automatically generated by admin_scripts test case\n')
settings_file.write('# The next line will cause an import error:\nimport foo42bar\n')
def test_import_error(self):
"""
import error: manage.py builtin commands shows useful diagnostic info
when settings with import errors is provided (#14130).
"""
self.write_settings_with_import_error('settings.py')
args = ['check', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named")
self.assertOutput(err, "foo42bar")
def test_attribute_error(self):
"""
manage.py builtin commands does not swallow attribute error due to bad
settings (#18845).
"""
self.write_settings('settings.py', sdict={'BAD_VAR': 'INSTALLED_APPS.crash'})
args = ['collectstatic', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "AttributeError: 'list' object has no attribute 'crash'")
def test_key_error(self):
self.write_settings('settings.py', sdict={'BAD_VAR': 'DATABASES["blah"]'})
args = ['collectstatic', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "KeyError: 'blah'")
def test_help(self):
"""
Test listing available commands output note when only core commands are
available.
"""
self.write_settings('settings.py', sdict={'MEDIA_URL': '"/no_ending_slash"'})
args = ['help']
out, err = self.run_manage(args)
self.assertOutput(out, 'only Django core commands are listed')
self.assertNoOutput(err)
class ManageCheck(AdminScriptTestCase):
def tearDown(self):
self.remove_settings('settings.py')
def test_nonexistent_app(self):
""" manage.py check reports an error on a non-existent app in
INSTALLED_APPS """
self.write_settings('settings.py',
apps=['admin_scriptz.broken_app'],
sdict={'USE_I18N': False})
args = ['check']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'ImportError')
self.assertOutput(err, 'No module named')
self.assertOutput(err, 'admin_scriptz')
def test_broken_app(self):
""" manage.py check reports an ImportError if an app's models.py
raises one on import """
self.write_settings('settings.py', apps=['admin_scripts.broken_app'])
args = ['check']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'ImportError')
def test_complex_app(self):
""" manage.py check does not raise an ImportError validating a
complex app with nested calls to load_app """
self.write_settings(
'settings.py',
apps=[
'admin_scripts.complex_app',
'admin_scripts.simple_app',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.auth',
'django.contrib.contenttypes',
],
sdict={
'DEBUG': True
}
)
args = ['check']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertEqual(out, 'System check identified no issues (0 silenced).\n')
def test_app_with_import(self):
""" manage.py check does not raise errors when an app imports a base
class that itself has an abstract base. """
self.write_settings('settings.py',
apps=['admin_scripts.app_with_import',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sites'],
sdict={'DEBUG': True})
args = ['check']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertEqual(out, 'System check identified no issues (0 silenced).\n')
def test_output_format(self):
""" All errors/warnings should be sorted by level and by message. """
self.write_settings('settings.py',
apps=['admin_scripts.app_raising_messages',
'django.contrib.auth',
'django.contrib.contenttypes'],
sdict={'DEBUG': True})
args = ['check']
out, err = self.run_manage(args)
expected_err = (
"SystemCheckError: System check identified some issues:\n"
"\n"
"ERRORS:\n"
"?: An error\n"
"\tHINT: Error hint\n"
"\n"
"WARNINGS:\n"
"a: Second warning\n"
"obj: First warning\n"
"\tHINT: Hint\n"
"\n"
"System check identified 3 issues (0 silenced).\n"
)
self.assertEqual(err, expected_err)
self.assertNoOutput(out)
def test_warning_does_not_halt(self):
"""
When there are only warnings or less serious messages, then Django
should not prevent user from launching their project, so `check`
command should not raise `CommandError` exception.
In this test we also test output format.
"""
self.write_settings('settings.py',
apps=['admin_scripts.app_raising_warning',
'django.contrib.auth',
'django.contrib.contenttypes'],
sdict={'DEBUG': True})
args = ['check']
out, err = self.run_manage(args)
expected_err = (
"System check identified some issues:\n" # No "CommandError: " part
"\n"
"WARNINGS:\n"
"?: A warning\n"
"\n"
"System check identified 1 issue (0 silenced).\n"
)
self.assertEqual(err, expected_err)
self.assertNoOutput(out)
class CustomTestRunner(DiscoverRunner):
def __init__(self, *args, **kwargs):
assert 'liveserver' not in kwargs
super(CustomTestRunner, self).__init__(*args, **kwargs)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
pass
class ManageTestCommand(AdminScriptTestCase):
def setUp(self):
from django.core.management.commands.test import Command as TestCommand
self.cmd = TestCommand()
def test_liveserver(self):
"""
Ensure that the --liveserver option sets the environment variable
correctly.
Refs #2879.
"""
# Backup original state
address_predefined = 'DJANGO_LIVE_TEST_SERVER_ADDRESS' in os.environ
old_address = os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS')
self.cmd.handle(verbosity=0, testrunner='admin_scripts.tests.CustomTestRunner')
# Original state hasn't changed
self.assertEqual('DJANGO_LIVE_TEST_SERVER_ADDRESS' in os.environ, address_predefined)
self.assertEqual(os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS'), old_address)
self.cmd.handle(verbosity=0, testrunner='admin_scripts.tests.CustomTestRunner',
liveserver='blah')
# Variable was correctly set
self.assertEqual(os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'], 'blah')
# Restore original state
if address_predefined:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = old_address
else:
del os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS']
class ManageRunserver(AdminScriptTestCase):
def setUp(self):
from django.core.management.commands.runserver import Command
def monkey_run(*args, **options):
return
self.output = StringIO()
self.cmd = Command(stdout=self.output)
self.cmd.run = monkey_run
def assertServerSettings(self, addr, port, ipv6=None, raw_ipv6=False):
self.assertEqual(self.cmd.addr, addr)
self.assertEqual(self.cmd.port, port)
self.assertEqual(self.cmd.use_ipv6, ipv6)
self.assertEqual(self.cmd._raw_ipv6, raw_ipv6)
def test_runserver_addrport(self):
self.cmd.handle()
self.assertServerSettings('127.0.0.1', '8000')
self.cmd.handle(addrport="1.2.3.4:8000")
self.assertServerSettings('1.2.3.4', '8000')
self.cmd.handle(addrport="7000")
self.assertServerSettings('127.0.0.1', '7000')
@unittest.skipUnless(socket.has_ipv6, "platform doesn't support IPv6")
def test_runner_addrport_ipv6(self):
self.cmd.handle(addrport="", use_ipv6=True)
self.assertServerSettings('::1', '8000', ipv6=True, raw_ipv6=True)
self.cmd.handle(addrport="7000", use_ipv6=True)
self.assertServerSettings('::1', '7000', ipv6=True, raw_ipv6=True)
self.cmd.handle(addrport="[2001:0db8:1234:5678::9]:7000")
self.assertServerSettings('2001:0db8:1234:5678::9', '7000', ipv6=True, raw_ipv6=True)
def test_runner_hostname(self):
self.cmd.handle(addrport="localhost:8000")
self.assertServerSettings('localhost', '8000')
self.cmd.handle(addrport="test.domain.local:7000")
self.assertServerSettings('test.domain.local', '7000')
@unittest.skipUnless(socket.has_ipv6, "platform doesn't support IPv6")
def test_runner_hostname_ipv6(self):
self.cmd.handle(addrport="test.domain.local:7000", use_ipv6=True)
self.assertServerSettings('test.domain.local', '7000', ipv6=True)
def test_runner_ambiguous(self):
# Only 4 characters, all of which could be in an ipv6 address
self.cmd.handle(addrport="beef:7654")
self.assertServerSettings('beef', '7654')
# Uses only characters that could be in an ipv6 address
self.cmd.handle(addrport="deadbeef:7654")
self.assertServerSettings('deadbeef', '7654')
def test_no_database(self):
"""
Ensure runserver.check_migrations doesn't choke on empty DATABASES.
"""
tested_connections = ConnectionHandler({})
with mock.patch('django.core.management.commands.runserver.connections', new=tested_connections):
self.cmd.check_migrations()
def test_readonly_database(self):
"""
Ensure runserver.check_migrations doesn't choke when a database is read-only
(with possibly no django_migrations table).
"""
with mock.patch.object(
MigrationRecorder, 'ensure_schema',
side_effect=MigrationSchemaMissing()):
self.cmd.check_migrations()
# Check a warning is emitted
self.assertIn("Not checking migrations", self.output.getvalue())
class ManageRunserverEmptyAllowedHosts(AdminScriptTestCase):
def setUp(self):
self.write_settings('settings.py', sdict={
'ALLOWED_HOSTS': [],
'DEBUG': False,
})
def tearDown(self):
self.remove_settings('settings.py')
def test_empty_allowed_hosts_error(self):
out, err = self.run_manage(['runserver'])
self.assertNoOutput(out)
self.assertOutput(err, 'CommandError: You must set settings.ALLOWED_HOSTS if DEBUG is False.')
class ManageTestserver(AdminScriptTestCase):
from django.core.management.commands.testserver import Command as TestserverCommand
@mock.patch.object(TestserverCommand, 'handle')
def test_testserver_handle_params(self, mock_handle):
out = StringIO()
call_command('testserver', 'blah.json', stdout=out)
mock_handle.assert_called_with(
'blah.json',
stdout=out, settings=None, pythonpath=None, verbosity=1,
traceback=False, addrport='', no_color=False, use_ipv6=False,
skip_checks=True, interactive=True,
)
##########################################################################
# COMMAND PROCESSING TESTS
# Check that user-space commands are correctly handled - in particular,
# that arguments to the commands are correctly parsed and processed.
##########################################################################
class CommandTypes(AdminScriptTestCase):
"Tests for the various types of base command types that can be defined."
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_version(self):
"version is handled as a special case"
args = ['version']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, get_version())
def test_version_alternative(self):
"--version is equivalent to version"
args1, args2 = ['version'], ['--version']
# It's possible one outputs on stderr and the other on stdout, hence the set
self.assertEqual(set(self.run_manage(args1)), set(self.run_manage(args2)))
def test_help(self):
"help is handled as a special case"
args = ['help']
out, err = self.run_manage(args)
self.assertOutput(out, "Type 'manage.py help <subcommand>' for help on a specific subcommand.")
self.assertOutput(out, '[django]')
self.assertOutput(out, 'startapp')
self.assertOutput(out, 'startproject')
def test_help_commands(self):
"help --commands shows the list of all available commands"
args = ['help', '--commands']
out, err = self.run_manage(args)
self.assertNotInOutput(out, 'usage:')
self.assertNotInOutput(out, 'Options:')
self.assertNotInOutput(out, '[django]')
self.assertOutput(out, 'startapp')
self.assertOutput(out, 'startproject')
self.assertNotInOutput(out, '\n\n')
def test_help_alternative(self):
"--help is equivalent to help"
args1, args2 = ['help'], ['--help']
self.assertEqual(self.run_manage(args1), self.run_manage(args2))
def test_help_short_altert(self):
"-h is handled as a short form of --help"
args1, args2 = ['--help'], ['-h']
self.assertEqual(self.run_manage(args1), self.run_manage(args2))
def test_specific_help(self):
"--help can be used on a specific command"
args = ['check', '--help']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "Checks the entire Django project for potential problems.")
def test_color_style(self):
style = color.no_style()
self.assertEqual(style.ERROR('Hello, world!'), 'Hello, world!')
style = color.make_style('nocolor')
self.assertEqual(style.ERROR('Hello, world!'), 'Hello, world!')
style = color.make_style('dark')
self.assertIn('Hello, world!', style.ERROR('Hello, world!'))
self.assertNotEqual(style.ERROR('Hello, world!'), 'Hello, world!')
# Default palette has color.
style = color.make_style('')
self.assertIn('Hello, world!', style.ERROR('Hello, world!'))
self.assertNotEqual(style.ERROR('Hello, world!'), 'Hello, world!')
def test_command_color(self):
class Command(BaseCommand):
requires_system_checks = False
def handle(self, *args, **options):
self.stdout.write('Hello, world!', self.style.ERROR)
self.stderr.write('Hello, world!', self.style.ERROR)
out = StringIO()
err = StringIO()
command = Command(stdout=out, stderr=err)
command.execute()
if color.supports_color():
self.assertIn('Hello, world!\n', out.getvalue())
self.assertIn('Hello, world!\n', err.getvalue())
self.assertNotEqual(out.getvalue(), 'Hello, world!\n')
self.assertNotEqual(err.getvalue(), 'Hello, world!\n')
else:
self.assertEqual(out.getvalue(), 'Hello, world!\n')
self.assertEqual(err.getvalue(), 'Hello, world!\n')
def test_command_no_color(self):
"--no-color prevent colorization of the output"
class Command(BaseCommand):
requires_system_checks = False
def handle(self, *args, **options):
self.stdout.write('Hello, world!', self.style.ERROR)
self.stderr.write('Hello, world!', self.style.ERROR)
out = StringIO()
err = StringIO()
command = Command(stdout=out, stderr=err, no_color=True)
command.execute()
self.assertEqual(out.getvalue(), 'Hello, world!\n')
self.assertEqual(err.getvalue(), 'Hello, world!\n')
out = StringIO()
err = StringIO()
command = Command(stdout=out, stderr=err)
command.execute(no_color=True)
self.assertEqual(out.getvalue(), 'Hello, world!\n')
self.assertEqual(err.getvalue(), 'Hello, world!\n')
def test_custom_stdout(self):
class Command(BaseCommand):
requires_system_checks = False
def handle(self, *args, **options):
self.stdout.write("Hello, World!")
out = StringIO()
command = Command(stdout=out)
command.execute()
self.assertEqual(out.getvalue(), "Hello, World!\n")
out.truncate(0)
new_out = StringIO()
command.execute(stdout=new_out)
self.assertEqual(out.getvalue(), "")
self.assertEqual(new_out.getvalue(), "Hello, World!\n")
def test_custom_stderr(self):
class Command(BaseCommand):
requires_system_checks = False
def handle(self, *args, **options):
self.stderr.write("Hello, World!")
err = StringIO()
command = Command(stderr=err)
command.execute()
self.assertEqual(err.getvalue(), "Hello, World!\n")
err.truncate(0)
new_err = StringIO()
command.execute(stderr=new_err)
self.assertEqual(err.getvalue(), "")
self.assertEqual(new_err.getvalue(), "Hello, World!\n")
def test_base_command(self):
"User BaseCommands can execute when a label is provided"
args = ['base_command', 'testlabel']
expected_labels = "('testlabel',)"
self._test_base_command(args, expected_labels)
def test_base_command_no_label(self):
"User BaseCommands can execute when no labels are provided"
args = ['base_command']
expected_labels = "()"
self._test_base_command(args, expected_labels)
def test_base_command_multiple_label(self):
"User BaseCommands can execute when no labels are provided"
args = ['base_command', 'testlabel', 'anotherlabel']
expected_labels = "('testlabel', 'anotherlabel')"
self._test_base_command(args, expected_labels)
def test_base_command_with_option(self):
"User BaseCommands can execute with options when a label is provided"
args = ['base_command', 'testlabel', '--option_a=x']
expected_labels = "('testlabel',)"
self._test_base_command(args, expected_labels, option_a="'x'")
def test_base_command_with_options(self):
"User BaseCommands can execute with multiple options when a label is provided"
args = ['base_command', 'testlabel', '-a', 'x', '--option_b=y']
expected_labels = "('testlabel',)"
self._test_base_command(args, expected_labels, option_a="'x'", option_b="'y'")
def test_base_command_with_wrong_option(self):
"User BaseCommands outputs command usage when wrong option is specified"
args = ['base_command', '--invalid']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "usage: manage.py base_command")
self.assertOutput(err, "error: unrecognized arguments: --invalid")
def _test_base_command(self, args, labels, option_a="'1'", option_b="'2'"):
out, err = self.run_manage(args)
expected_out = (
"EXECUTE:BaseCommand labels=%s, "
"options=[('no_color', False), ('option_a', %s), ('option_b', %s), "
"('option_c', '3'), ('pythonpath', None), ('settings', None), "
"('traceback', False), ('verbosity', 1)]") % (labels, option_a, option_b)
self.assertNoOutput(err)
self.assertOutput(out, expected_out)
def test_base_run_from_argv(self):
"""
Test run_from_argv properly terminates even with custom execute() (#19665)
Also test proper traceback display.
"""
err = StringIO()
command = BaseCommand(stderr=err)
def raise_command_error(*args, **kwargs):
raise CommandError("Custom error")
command.execute = lambda args: args # This will trigger TypeError
# If the Exception is not CommandError it should always
# raise the original exception.
with self.assertRaises(TypeError):
command.run_from_argv(['', ''])
# If the Exception is CommandError and --traceback is not present
# this command should raise a SystemExit and don't print any
# traceback to the stderr.
command.execute = raise_command_error
err.truncate(0)
with self.assertRaises(SystemExit):
command.run_from_argv(['', ''])
err_message = err.getvalue()
self.assertNotIn("Traceback", err_message)
self.assertIn("CommandError", err_message)
# If the Exception is CommandError and --traceback is present
# this command should raise the original CommandError as if it
# were not a CommandError.
err.truncate(0)
with self.assertRaises(CommandError):
command.run_from_argv(['', '', '--traceback'])
def test_run_from_argv_non_ascii_error(self):
"""
Test that non-ASCII message of CommandError does not raise any
UnicodeDecodeError in run_from_argv.
"""
def raise_command_error(*args, **kwargs):
raise CommandError("Erreur personnalisée")
command = BaseCommand(stderr=StringIO())
command.execute = raise_command_error
with self.assertRaises(SystemExit):
command.run_from_argv(['', ''])
def test_run_from_argv_closes_connections(self):
"""
A command called from the command line should close connections after
being executed (#21255).
"""
command = BaseCommand(stderr=StringIO())
command.check = lambda: []
command.handle = lambda *args, **kwargs: args
with mock.patch('django.core.management.base.connections') as mock_connections:
command.run_from_argv(['', ''])
# Test connections have been closed
self.assertTrue(mock_connections.close_all.called)
def test_noargs(self):
"NoArg Commands can be executed"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command options=[('no_color', False), ('pythonpath', None), ('settings', None), ('traceback', False), ('verbosity', 1)]")
def test_noargs_with_args(self):
"NoArg Commands raise an error if an argument is provided"
args = ['noargs_command', 'argument']
out, err = self.run_manage(args)
self.assertOutput(err, "error: unrecognized arguments: argument")
def test_app_command(self):
"User AppCommands can execute when a single app name is provided"
args = ['app_command', 'auth']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:AppCommand name=django.contrib.auth, options=")
self.assertOutput(out, ", options=[('no_color', False), ('pythonpath', None), ('settings', None), ('traceback', False), ('verbosity', 1)]")
def test_app_command_no_apps(self):
"User AppCommands raise an error when no app name is provided"
args = ['app_command']
out, err = self.run_manage(args)
self.assertOutput(err, 'error: Enter at least one application label.')
def test_app_command_multiple_apps(self):
"User AppCommands raise an error when multiple app names are provided"
args = ['app_command', 'auth', 'contenttypes']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:AppCommand name=django.contrib.auth, options=")
self.assertOutput(out, ", options=[('no_color', False), ('pythonpath', None), ('settings', None), ('traceback', False), ('verbosity', 1)]")
self.assertOutput(out, "EXECUTE:AppCommand name=django.contrib.contenttypes, options=")
self.assertOutput(out, ", options=[('no_color', False), ('pythonpath', None), ('settings', None), ('traceback', False), ('verbosity', 1)]")
def test_app_command_invalid_app_label(self):
"User AppCommands can execute when a single app name is provided"
args = ['app_command', 'NOT_AN_APP']
out, err = self.run_manage(args)
self.assertOutput(err, "No installed app with label 'NOT_AN_APP'.")
def test_app_command_some_invalid_app_labels(self):
"User AppCommands can execute when some of the provided app names are invalid"
args = ['app_command', 'auth', 'NOT_AN_APP']
out, err = self.run_manage(args)
self.assertOutput(err, "No installed app with label 'NOT_AN_APP'.")
def test_label_command(self):
"User LabelCommands can execute when a label is provided"
args = ['label_command', 'testlabel']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:LabelCommand label=testlabel, options=[('no_color', False), ('pythonpath', None), ('settings', None), ('traceback', False), ('verbosity', 1)]")
def test_label_command_no_label(self):
"User LabelCommands raise an error if no label is provided"
args = ['label_command']
out, err = self.run_manage(args)
self.assertOutput(err, 'Enter at least one label')
def test_label_command_multiple_label(self):
"User LabelCommands are executed multiple times if multiple labels are provided"
args = ['label_command', 'testlabel', 'anotherlabel']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:LabelCommand label=testlabel, options=[('no_color', False), ('pythonpath', None), ('settings', None), ('traceback', False), ('verbosity', 1)]")
self.assertOutput(out, "EXECUTE:LabelCommand label=anotherlabel, options=[('no_color', False), ('pythonpath', None), ('settings', None), ('traceback', False), ('verbosity', 1)]")
class Discovery(SimpleTestCase):
def test_precedence(self):
"""
Apps listed first in INSTALLED_APPS have precedence.
"""
with self.settings(INSTALLED_APPS=['admin_scripts.complex_app',
'admin_scripts.simple_app',
'django.contrib.auth',
'django.contrib.contenttypes']):
out = StringIO()
call_command('duplicate', stdout=out)
self.assertEqual(out.getvalue().strip(), 'complex_app')
with self.settings(INSTALLED_APPS=['admin_scripts.simple_app',
'admin_scripts.complex_app',
'django.contrib.auth',
'django.contrib.contenttypes']):
out = StringIO()
call_command('duplicate', stdout=out)
self.assertEqual(out.getvalue().strip(), 'simple_app')
class ArgumentOrder(AdminScriptTestCase):
"""Tests for 2-stage argument parsing scheme.
django-admin command arguments are parsed in 2 parts; the core arguments
(--settings, --traceback and --pythonpath) are parsed using a basic parser,
ignoring any unknown options. Then the full settings are
passed to the command parser, which extracts commands of interest to the
individual command.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth', 'django.contrib.contenttypes'])
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('settings.py')
self.remove_settings('alternate_settings.py')
def test_setting_then_option(self):
""" Options passed after settings are correctly handled. """
args = ['base_command', 'testlabel', '--settings=alternate_settings', '--option_a=x']
self._test(args)
def test_setting_then_short_option(self):
""" Short options passed after settings are correctly handled. """
args = ['base_command', 'testlabel', '--settings=alternate_settings', '-a', 'x']
self._test(args)
def test_option_then_setting(self):
""" Options passed before settings are correctly handled. """
args = ['base_command', 'testlabel', '--option_a=x', '--settings=alternate_settings']
self._test(args)
def test_short_option_then_setting(self):
""" Short options passed before settings are correctly handled. """
args = ['base_command', 'testlabel', '-a', 'x', '--settings=alternate_settings']
self._test(args)
def test_option_then_setting_then_option(self):
""" Options are correctly handled when they are passed before and after
a setting. """
args = ['base_command', 'testlabel', '--option_a=x', '--settings=alternate_settings', '--option_b=y']
self._test(args, option_b="'y'")
def _test(self, args, option_b="'2'"):
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('no_color', False), ('option_a', 'x'), ('option_b', %s), ('option_c', '3'), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', False), ('verbosity', 1)]" % option_b)
@override_settings(ROOT_URLCONF='admin_scripts.urls')
class StartProject(LiveServerTestCase, AdminScriptTestCase):
available_apps = [
'admin_scripts',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
]
def test_wrong_args(self):
"Make sure passing the wrong kinds of arguments outputs an error and prints usage"
out, err = self.run_django_admin(['startproject'])
self.assertNoOutput(out)
self.assertOutput(err, "usage:")
self.assertOutput(err, "You must provide a project name.")
def test_simple_project(self):
"Make sure the startproject management command creates a project"
args = ['startproject', 'testproject']
testproject_dir = os.path.join(test_dir, 'testproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
# running again..
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "already exists")
def test_invalid_project_name(self):
"Make sure the startproject management command validates a project name"
for bad_name in ('7testproject', '../testproject'):
args = ['startproject', bad_name]
testproject_dir = os.path.join(test_dir, bad_name)
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertOutput(err, "Error: '%s' is not a valid project name. "
"Please make sure the name begins with a letter or underscore." % bad_name)
self.assertFalse(os.path.exists(testproject_dir))
def test_simple_project_different_directory(self):
"Make sure the startproject management command creates a project in a specific directory"
args = ['startproject', 'testproject', 'othertestproject']
testproject_dir = os.path.join(test_dir, 'othertestproject')
os.mkdir(testproject_dir)
self.addCleanup(shutil.rmtree, testproject_dir)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'manage.py')))
# running again..
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "already exists")
def test_custom_project_template(self):
"Make sure the startproject management command is able to use a different project template"
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, 'customtestproject']
testproject_dir = os.path.join(test_dir, 'customtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'additional_dir')))
def test_template_dir_with_trailing_slash(self):
"Ticket 17475: Template dir passed has a trailing path separator"
template_path = os.path.join(custom_templates_dir, 'project_template' + os.sep)
args = ['startproject', '--template', template_path, 'customtestproject']
testproject_dir = os.path.join(test_dir, 'customtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'additional_dir')))
def test_custom_project_template_from_tarball_by_path(self):
"Make sure the startproject management command is able to use a different project template from a tarball"
template_path = os.path.join(custom_templates_dir, 'project_template.tgz')
args = ['startproject', '--template', template_path, 'tarballtestproject']
testproject_dir = os.path.join(test_dir, 'tarballtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py')))
def test_custom_project_template_from_tarball_to_alternative_location(self):
"Startproject can use a project template from a tarball and create it in a specified location"
template_path = os.path.join(custom_templates_dir, 'project_template.tgz')
args = ['startproject', '--template', template_path, 'tarballtestproject', 'altlocation']
testproject_dir = os.path.join(test_dir, 'altlocation')
os.mkdir(testproject_dir)
self.addCleanup(shutil.rmtree, testproject_dir)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py')))
def test_custom_project_template_from_tarball_by_url(self):
"Make sure the startproject management command is able to use a different project template from a tarball via a url"
template_url = '%s/custom_templates/project_template.tgz' % self.live_server_url
args = ['startproject', '--template', template_url, 'urltestproject']
testproject_dir = os.path.join(test_dir, 'urltestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py')))
def test_project_template_tarball_url(self):
"Startproject management command handles project template tar/zip balls from non-canonical urls"
template_url = '%s/custom_templates/project_template.tgz/' % self.live_server_url
args = ['startproject', '--template', template_url, 'urltestproject']
testproject_dir = os.path.join(test_dir, 'urltestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'run.py')))
def test_file_without_extension(self):
"Make sure the startproject management command is able to render custom files"
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, 'customtestproject', '-e', 'txt', '-n', 'Procfile']
testproject_dir = os.path.join(test_dir, 'customtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
self.assertTrue(os.path.exists(os.path.join(testproject_dir, 'additional_dir')))
base_path = os.path.join(testproject_dir, 'additional_dir')
for f in ('Procfile', 'additional_file.py', 'requirements.txt'):
self.assertTrue(os.path.exists(os.path.join(base_path, f)))
with open(os.path.join(base_path, f)) as fh:
self.assertEqual(fh.read().strip(),
'# some file for customtestproject test project')
def test_custom_project_template_context_variables(self):
"Make sure template context variables are rendered with proper values"
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, 'another_project', 'project_dir']
testproject_dir = os.path.join(test_dir, 'project_dir')
os.mkdir(testproject_dir)
self.addCleanup(shutil.rmtree, testproject_dir)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
test_manage_py = os.path.join(testproject_dir, 'manage.py')
with open(test_manage_py, 'r') as fp:
content = force_text(fp.read())
self.assertIn("project_name = 'another_project'", content)
self.assertIn("project_directory = '%s'" % testproject_dir, content)
def test_no_escaping_of_project_variables(self):
"Make sure template context variables are not html escaped"
# We're using a custom command so we need the alternate settings
self.write_settings('alternate_settings.py')
self.addCleanup(self.remove_settings, 'alternate_settings.py')
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['custom_startproject', '--template', template_path, 'another_project', 'project_dir', '--extra', '<&>', '--settings=alternate_settings']
testproject_dir = os.path.join(test_dir, 'project_dir')
os.mkdir(testproject_dir)
self.addCleanup(shutil.rmtree, testproject_dir)
out, err = self.run_manage(args)
self.assertNoOutput(err)
test_manage_py = os.path.join(testproject_dir, 'additional_dir', 'extra.py')
with open(test_manage_py, 'r') as fp:
content = fp.read()
self.assertIn("<&>", content)
def test_custom_project_destination_missing(self):
"""
Make sure an exception is raised when the provided
destination directory doesn't exist
"""
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, 'yet_another_project', 'project_dir2']
testproject_dir = os.path.join(test_dir, 'project_dir2')
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Destination directory '%s' does not exist, please create it first." % testproject_dir)
self.assertFalse(os.path.exists(testproject_dir))
def test_custom_project_template_with_non_ascii_templates(self):
"Ticket 18091: Make sure the startproject management command is able to render templates with non-ASCII content"
template_path = os.path.join(custom_templates_dir, 'project_template')
args = ['startproject', '--template', template_path, '--extension=txt', 'customtestproject']
testproject_dir = os.path.join(test_dir, 'customtestproject')
self.addCleanup(shutil.rmtree, testproject_dir, True)
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertTrue(os.path.isdir(testproject_dir))
path = os.path.join(testproject_dir, 'ticket-18091-non-ascii-template.txt')
with codecs.open(path, 'r', encoding='utf-8') as f:
self.assertEqual(f.read().splitlines(False), [
'Some non-ASCII text for testing ticket #18091:',
'üäö €'])
class DiffSettings(AdminScriptTestCase):
"""Tests for diffsettings management command."""
def test_basic(self):
"""Runs without error and emits settings diff."""
self.write_settings('settings_to_diff.py', sdict={'FOO': '"bar"'})
self.addCleanup(self.remove_settings, 'settings_to_diff.py')
args = ['diffsettings', '--settings=settings_to_diff']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "FOO = 'bar' ###")
def test_all(self):
"""The all option also shows settings with the default value."""
self.write_settings('settings_to_diff.py', sdict={'STATIC_URL': 'None'})
self.addCleanup(self.remove_settings, 'settings_to_diff.py')
args = ['diffsettings', '--settings=settings_to_diff', '--all']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "### STATIC_URL = None")
class Dumpdata(AdminScriptTestCase):
"""Tests for dumpdata management command."""
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_pks_parsing(self):
"""Regression for #20509
Test would raise an exception rather than printing an error message.
"""
args = ['dumpdata', '--pks=1']
out, err = self.run_manage(args)
self.assertOutput(err, "You can only use --pks option with one model")
self.assertNoOutput(out)
|
bsd-3-clause
|
cchurch/ansible
|
lib/ansible/modules/windows/win_copy.py
|
71
|
6764
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Jon Hawkesworth (@jhawkesworth) <[email protected]>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_copy
version_added: '1.9.2'
short_description: Copies files to remote locations on windows hosts
description:
- The C(win_copy) module copies a file on the local box to remote windows locations.
- For non-Windows targets, use the M(copy) module instead.
options:
content:
description:
- When used instead of C(src), sets the contents of a file directly to the
specified value.
- This is for simple values, for anything complex or with formatting please
switch to the M(template) module.
type: str
version_added: '2.3'
decrypt:
description:
- This option controls the autodecryption of source files using vault.
type: bool
default: yes
version_added: '2.5'
dest:
description:
- Remote absolute path where the file should be copied to.
- If C(src) is a directory, this must be a directory too.
- Use \ for path separators or \\ when in "double quotes".
- If C(dest) ends with \ then source or the contents of source will be
copied to the directory without renaming.
- If C(dest) is a nonexistent path, it will only be created if C(dest) ends
with "/" or "\", or C(src) is a directory.
- If C(src) and C(dest) are files and if the parent directory of C(dest)
doesn't exist, then the task will fail.
type: path
required: yes
backup:
description:
- Determine whether a backup should be created.
- When set to C(yes), create a backup file including the timestamp information
so you can get the original file back if you somehow clobbered it incorrectly.
- No backup is taken when C(remote_src=False) and multiple files are being
copied.
type: bool
default: no
version_added: '2.8'
force:
description:
- If set to C(yes), the file will only be transferred if the content
is different than destination.
- If set to C(no), the file will only be transferred if the
destination does not exist.
- If set to C(no), no checksuming of the content is performed which can
help improve performance on larger files.
type: bool
default: yes
version_added: '2.3'
local_follow:
description:
- This flag indicates that filesystem links in the source tree, if they
exist, should be followed.
type: bool
default: yes
version_added: '2.4'
remote_src:
description:
- If C(no), it will search for src at originating/master machine.
- If C(yes), it will go to the remote/target machine for the src.
type: bool
default: no
version_added: '2.3'
src:
description:
- Local path to a file to copy to the remote server; can be absolute or
relative.
- If path is a directory, it is copied (including the source folder name)
recursively to C(dest).
- If path is a directory and ends with "/", only the inside contents of
that directory are copied to the destination. Otherwise, if it does not
end with "/", the directory itself with all contents is copied.
- If path is a file and dest ends with "\", the file is copied to the
folder with the same filename.
- Required unless using C(content).
type: path
notes:
- Currently win_copy does not support copying symbolic links from both local to
remote and remote to remote.
- It is recommended that backslashes C(\) are used instead of C(/) when dealing
with remote paths.
- Because win_copy runs over WinRM, it is not a very efficient transfer
mechanism. If sending large files consider hosting them on a web service and
using M(win_get_url) instead.
seealso:
- module: assemble
- module: copy
- module: win_get_url
- module: win_robocopy
author:
- Jon Hawkesworth (@jhawkesworth)
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: Copy a single file
win_copy:
src: /srv/myfiles/foo.conf
dest: C:\Temp\renamed-foo.conf
- name: Copy a single file, but keep a backup
win_copy:
src: /srv/myfiles/foo.conf
dest: C:\Temp\renamed-foo.conf
backup: yes
- name: Copy a single file keeping the filename
win_copy:
src: /src/myfiles/foo.conf
dest: C:\Temp\
- name: Copy folder to C:\Temp (results in C:\Temp\temp_files)
win_copy:
src: files/temp_files
dest: C:\Temp
- name: Copy folder contents recursively
win_copy:
src: files/temp_files/
dest: C:\Temp
- name: Copy a single file where the source is on the remote host
win_copy:
src: C:\Temp\foo.txt
dest: C:\ansible\foo.txt
remote_src: yes
- name: Copy a folder recursively where the source is on the remote host
win_copy:
src: C:\Temp
dest: C:\ansible
remote_src: yes
- name: Set the contents of a file
win_copy:
content: abc123
dest: C:\Temp\foo.txt
- name: Copy a single file as another user
win_copy:
src: NuGet.config
dest: '%AppData%\NuGet\NuGet.config'
vars:
ansible_become_user: user
ansible_become_password: pass
# The tmp dir must be set when using win_copy as another user
# This ensures the become user will have permissions for the operation
# Make sure to specify a folder both the ansible_user and the become_user have access to (i.e not %TEMP% which is user specific and requires Admin)
ansible_remote_tmp: 'c:\tmp'
'''
RETURN = r'''
backup_file:
description: Name of the backup file that was created.
returned: if backup=yes
type: str
sample: C:\Path\To\File.txt.11540.20150212-220915.bak
dest:
description: Destination file/path.
returned: changed
type: str
sample: C:\Temp\
src:
description: Source file used for the copy on the target machine.
returned: changed
type: str
sample: /home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source
checksum:
description: SHA1 checksum of the file after running copy.
returned: success, src is a file
type: str
sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
size:
description: Size of the target, after execution.
returned: changed, src is a file
type: int
sample: 1220
operation:
description: Whether a single file copy took place or a folder copy.
returned: success
type: str
sample: file_copy
original_basename:
description: Basename of the copied file.
returned: changed, src is a file
type: str
sample: foo.txt
'''
|
gpl-3.0
|
andrewyoung1991/abjad
|
abjad/tools/pitchtools/PitchArrayRow.py
|
2
|
16038
|
# -*- encoding: utf-8 -*-
import copy
from abjad.tools import durationtools
from abjad.tools import indicatortools
from abjad.tools.abctools import AbjadObject
class PitchArrayRow(AbjadObject):
r'''A pitch array row.
::
>>> array = pitchtools.PitchArray([[1, 2, 1], [2, 1, 1]])
>>> array[0].cells[0].pitches.append(0)
>>> array[0].cells[1].pitches.append(2)
>>> array[1].cells[2].pitches.append(4)
>>> print(array)
[c'] [d' ] [ ]
[ ] [ ] [e']
::
>>> array[0]
PitchArrayRow(c', d' x2, x1)
::
>>> array[0].cell_widths
(1, 2, 1)
::
>>> array[0].dimensions
(1, 4)
::
>>> array[0].pitches
(NamedPitch("c'"), NamedPitch("d'"))
'''
### INITIALIZER ###
def __init__(self, cells=None):
from abjad.tools import pitchtools
self._parent_array = None
#self._pitch_range = pitchtools.PitchRange(None, None)
self._pitch_range = pitchtools.PitchRange()
self._cells = []
cells = cells or []
self.extend(cells)
### SPECIAL METHODS ###
def __add__(self, arg):
r'''Concatenates `arg` to pitch array row.
Returns new pitch array row.
'''
if not isinstance(arg, PitchArrayRow):
message = 'must be pitch array row.'
raise TypeError(message)
self_copy = copy.copy(self)
arg_copy = copy.copy(arg)
new_row = PitchArrayRow([])
new_row.extend(self_copy.cells)
new_row.extend(arg_copy.cells)
return new_row
def __copy__(self):
r'''Copies pitch array row.
Returns new pitch array row.
'''
new_cells = []
for cell in self.cells:
new_cell = copy.copy(cell)
new_cells.append(new_cell)
return PitchArrayRow(new_cells)
def __eq__(self, arg):
r'''Is true when `arg` is a pitch array row with contents equal to that of
this pitch array row. Otherwise false.
Returns boolean.
'''
if isinstance(arg, PitchArrayRow):
for self_cell, arg_cell in zip(self.cells, arg.cells):
if not self_cell.matches_cell(arg_cell):
return False
return True
return False
def __getitem__(self, arg):
r'''Gets pitch array cell `arg` from pitch array row.
Returns pitch array cell.
'''
if isinstance(arg, int):
if 0 <= arg < self.width:
accumulated_width = 0
for cell in self.cells:
total_width = accumulated_width + cell.width
if accumulated_width <= arg < total_width:
return cell
accumulated_width = total_width
elif 0 < abs(arg) < self.width:
accumulated_width = 0
abs_arg = abs(arg)
for cell in reversed(self.cells):
total_width = accumulated_width + cell.width
if accumulated_width < abs_arg <= total_width:
return cell
accumulated_width = total_width
else:
message = 'no such cell in row.'
raise IndexError(message)
elif isinstance(arg, slice):
cells = []
start, stop, step = arg.indices(self.width)
for cell_index in range(start, stop, step):
cell = self[cell_index]
if len(cells) == 0:
cells.append(cell)
else:
if cells[-1] is not cell:
cells.append(cell)
cells = tuple(cells)
return cells
else:
message = 'must be int or slice.'
raise ValueError(message)
def __getstate__(self):
r'''Gets state of pitch array row.
'''
return vars(self)
def __hash__(self):
r'''Hashes pitch array row.
Required to be explicitly re-defined on Python 3 if __eq__ changes.
Returns integer.
'''
return super(PitchArrayRow, self).__hash__()
def __iadd__(self, arg):
r'''Adds `arg` to pitch array row in place.
Returns pitch array row.
'''
if not isinstance(arg, PitchArrayRow):
message = 'must be pitch array row.'
raise TypeError(message)
copy_arg = copy.copy(arg)
self.extend(copy_arg.cells)
return self
def __len__(self):
r'''Length of pitch array row.
Defined equal to the width of pitch array row.
Returns nonnegative integer.
'''
return self.width
def __ne__(self, arg):
r'''Is true when pitch array row does not equal `arg`. Otherwise false.
Returns boolean.
'''
return not self == arg
def __repr__(self):
r'''Gets interpreter representation of pitch array row.
Returns string.
'''
return '{}({})'.format(type(self).__name__, self._compact_summary)
def __str__(self):
r'''String representation of pitch array row.
Returns string.
'''
result = [str(cell) for cell in self.cells]
result = ' '.join(result)
return result
### PRIVATE PROPERTIES ###
@property
def _compact_summary(self):
len_self = len(self.cells)
if not len_self:
return ''
elif 0 < len_self <= 8:
result = [
cell._format_row_column_repr_string for cell in self.cells]
return ', '.join(result)
else:
left = ', '.join(
[x._format_row_column_repr_string for x in self.cells[:2]])
right = ', '.join(
[x._format_row_column_repr_string for x in self.cells[-2:]])
number_in_middle = len_self - 4
middle = ', ... [%s] ..., '% number_in_middle
return left + middle + right
@property
def _format_contents_string(self):
result = []
for cell in self.cells:
result.append(cell._format_row_column_repr_string)
result = ', '.join(result)
return result
### PUBLIC PROPERTIES ###
@property
def cell_tokens(self):
r'''Cell items of pitch array row.
Returns tuple.
'''
return tuple([cell.item for cell in self.cells])
@property
def cell_widths(self):
r'''Cell widths of pitch array row.
Returns tuple.
'''
return tuple([cell.width for cell in self.cells])
@property
def cells(self):
r'''Cells of pitch array row.
Returns tuple.
'''
return tuple(self._cells)
@property
def depth(self):
r'''Depth of pitch array row.
Defined equal to ``1``.
Returns ``1``.
'''
return 1
@property
def dimensions(self):
r'''Dimensions of pitch array row.
Returns pair.
'''
return self.depth, self.width
@property
def is_defective(self):
r'''Is true when width of pitch array row does not equal width of parent
pitch array. Otherwise false.
Returns boolean.
'''
if self.parent_array is not None:
return not self.width == self.parent_array.width
return False
@property
def is_in_range(self):
r'''Is true when all pitches in pitch array row are in pitch range of
pitch array row. Otherwise false.
Returns boolean.
'''
return all(pitch in self.pitch_range for pitch in self.pitches)
@property
def parent_array(self):
r'''Parent pitch array housing pitch array row.
Returns pitch array or none.
'''
return self._parent_array
@property
def pitch_range(self):
r'''Gets and set pitch range of pitch array row.
Returns pitch range.
'''
return self._pitch_range
@pitch_range.setter
def pitch_range(self, arg):
from abjad.tools import pitchtools
if not isinstance(arg, pitchtools.PitchRange):
message = 'must be pitch range.'
raise TypeError(message)
self._pitch_range = arg
@property
def pitches(self):
r'''Pitches in pitch array row.
Returns tuple.
'''
pitches = []
for cell in self.cells:
pitches.extend(cell.pitches)
return tuple(pitches)
@property
def row_index(self):
r'''Row index of pitch array row in parent pitch array.
Returns nonnegative integer.
'''
parent_array = self.parent_array
if parent_array is not None:
return parent_array._rows.index(self)
message = 'row has no parent array.'
raise IndexError(message)
@property
def weight(self):
r'''Weight of pitch array row.
Defined equal to sum of weights of pitch array cells in pitch array
row.
Returns nonnegative integer.
'''
return sum([cell.weight for cell in self.cells])
@property
def width(self):
r'''Width of pitch array row.
Defined equal to sum of widths of pitch array cells in pitch array row.
Returns nonnegative integer.
'''
return sum([cell.width for cell in self.cells])
### PUBLIC METHODS ###
def append(self, cell_token):
r'''Appends `cell_token` to pitch array row.
Returns none.
'''
from abjad.tools import pitchtools
cell = pitchtools.PitchArrayCell(cell_token)
cell._parent_row = self
self._cells.append(cell)
def apply_pitches(self, pitch_tokens):
r'''Applies `pitch_tokens` to pitch cells in pitch array row.
Returns none.
'''
pitch_tokens = pitch_tokens[:]
if pitch_tokens:
for cell in self.cells:
if cell.pitches:
cell.pitches = [pitch_tokens.pop(0)]
else:
self.empty_pitches()
def copy_subrow(self, start=None, stop=None):
r'''Copies subrow of pitch array row from `start` to `stop`.
Returns new pitch array row.
'''
arg = slice(start, stop)
start, stop, step = arg.indices(self.width)
if not step == 1:
message = 'step no implemented.'
raise NotImplementedError(message)
column_indices = set(range(start, stop, step))
row = PitchArrayRow([])
cells = self[arg]
new_cells = []
for cell in cells:
if not cell in new_cells:
trim = [
x for x in cell.column_indices if x not in column_indices]
new_width = cell.width - len(trim)
new_cell = copy.copy(cell)
new_cell._width = new_width
new_cells.append(new_cell)
row.extend(new_cells)
return row
def empty_pitches(self):
r'''Empties pitches in pitch array row.
Returns none.
'''
for cell in self.cells:
cell.pitches = []
def extend(self, cell_tokens):
r'''Extends `cell_tokens` against pitch array row.
Returns none.
'''
for cell_token in cell_tokens:
self.append(cell_token)
def has_spanning_cell_over_index(self, i):
r'''Is true when pitch array row has one or more cells spanning over
index `i`. Otherwise false.
Returns boolean.
'''
try:
cell = self[i]
return cell.column_indices[0] < i
except IndexError:
return False
def index(self, cell):
r'''Index of pitch array `cell` in pitch array row.
Retunrs nonnegative integer.
'''
return self._cells.index(cell)
def merge(self, cells):
r'''Merges `cells`.
Returns pitch array cell.
'''
from abjad.tools import pitchtools
column_indices = []
pitches = []
width = 0
for cell in cells:
if not isinstance(cell, pitchtools.PitchArrayCell):
raise TypeError
if not cell.parent_row is self:
message = 'cells must belong to row.'
raise ValueError(message)
column_indices.extend(cell.column_indices)
pitches.extend(cell.pitches)
width += cell.width
start = min(column_indices)
stop = start + len(column_indices)
strict_series = list(range(start, stop))
if not column_indices == strict_series:
message = 'cells must be contiguous.'
raise ValueError(message)
first_cell = cells[0]
for cell in cells[1:]:
self.remove(cell)
first_cell._pitches = pitches
first_cell._width = width
return first_cell
def pad_to_width(self, width):
r'''Pads pitch array row to `width`.
Returns none.
'''
from abjad.tools import pitchtools
self_width = self.width
if width < self_width:
message = 'pad width must not be less than row width.'
raise ValueError(message)
missing_width = width - self_width
for i in range(missing_width):
cell = pitchtools.PitchArrayCell()
self.append(cell)
def pop(self, cell_index):
r'''Pops cell `cell_index` from pitch array row.
Returns pitch array cell.
'''
cell = self.pop(cell_index)
cell._parent_row = None
return cell
def remove(self, cell):
r'''Removes `cell` form pitch array row.
Returns none.
'''
for i, x in enumerate(self.cells):
if x is cell:
self._cells.pop(i)
break
cell._parent_row = None
def to_measure(self, cell_duration_denominator=8):
r'''Changes pitch array row to measure with time signature
numerator equal to pitch array row width and
time signature denominator equal to `cell_duration_denominator`.
::
>>> array = pitchtools.PitchArray([
... [1, (2, 1), ([-2, -1.5], 2)],
... [(7, 2), (6, 1), 1]])
::
>>> print(array)
[ ] [d'] [bf bqf ]
[g' ] [fs' ] [ ]
::
>>> measure = array.rows[0].to_measure()
.. doctest::
>>> print(format(measure))
{
\time 4/8
r8
d'8
<bf bqf>4
}
Returns measure.
'''
from abjad.tools import scoretools
time_signature = indicatortools.TimeSignature(
(self.width, cell_duration_denominator))
measure = scoretools.Measure(time_signature, [])
basic_cell_duration = \
durationtools.Duration(1, cell_duration_denominator)
measure_pitches, measure_durations = [], []
for cell in self.cells:
cell_pitches = cell.pitches
if not cell_pitches:
measure_pitches.append(None)
elif len(cell_pitches) == 1:
measure_pitches.append(cell_pitches[0])
else:
measure_pitches.append(cell_pitches)
measure_duration = cell.width * basic_cell_duration
measure_durations.append(measure_duration)
leaves = scoretools.make_leaves(measure_pitches, measure_durations)
measure.extend(leaves)
return measure
def withdraw(self):
r'''Withdraws pitch array row from parent pitch array.
Returns pitch array row.
'''
if self.parent_array is not None:
self.parent_array.remove_row(self)
return self
|
gpl-3.0
|
ita1024/samba
|
third_party/dnspython/dns/edns.py
|
47
|
4318
|
# Copyright (C) 2009, 2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""EDNS Options"""
NSID = 3
class Option(object):
"""Base class for all EDNS option types.
"""
def __init__(self, otype):
"""Initialize an option.
@param rdtype: The rdata type
@type rdtype: int
"""
self.otype = otype
def to_wire(self, file):
"""Convert an option to wire format.
"""
raise NotImplementedError
def from_wire(cls, otype, wire, current, olen):
"""Build an EDNS option object from wire format
@param otype: The option type
@type otype: int
@param wire: The wire-format message
@type wire: string
@param current: The offet in wire of the beginning of the rdata.
@type current: int
@param olen: The length of the wire-format option data
@type olen: int
@rtype: dns.ends.Option instance"""
raise NotImplementedError
from_wire = classmethod(from_wire)
def _cmp(self, other):
"""Compare an ENDS option with another option of the same type.
Return < 0 if self < other, 0 if self == other, and > 0 if self > other.
"""
raise NotImplementedError
def __eq__(self, other):
if not isinstance(other, Option):
return False
if self.otype != other.otype:
return False
return self._cmp(other) == 0
def __ne__(self, other):
if not isinstance(other, Option):
return False
if self.otype != other.otype:
return False
return self._cmp(other) != 0
def __lt__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) < 0
def __le__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) <= 0
def __ge__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) >= 0
def __gt__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) > 0
class GenericOption(Option):
"""Generate Rdata Class
This class is used for EDNS option types for which we have no better
implementation.
"""
def __init__(self, otype, data):
super(GenericOption, self).__init__(otype)
self.data = data
def to_wire(self, file):
file.write(self.data)
def from_wire(cls, otype, wire, current, olen):
return cls(otype, wire[current : current + olen])
from_wire = classmethod(from_wire)
def _cmp(self, other):
return cmp(self.data, other.data)
_type_to_class = {
}
def get_option_class(otype):
cls = _type_to_class.get(otype)
if cls is None:
cls = GenericOption
return cls
def option_from_wire(otype, wire, current, olen):
"""Build an EDNS option object from wire format
@param otype: The option type
@type otype: int
@param wire: The wire-format message
@type wire: string
@param current: The offet in wire of the beginning of the rdata.
@type current: int
@param olen: The length of the wire-format option data
@type olen: int
@rtype: dns.ends.Option instance"""
cls = get_option_class(otype)
return cls.from_wire(otype, wire, current, olen)
|
gpl-3.0
|
Ted1993/Flasky
|
venv/lib/python2.7/site-packages/mako/lexer.py
|
39
|
16121
|
# mako/lexer.py
# Copyright (C) 2006-2015 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides the Lexer class for parsing template strings into parse trees."""
import re
import codecs
from mako import parsetree, exceptions, compat
from mako.pygen import adjust_whitespace
_regexp_cache = {}
class Lexer(object):
def __init__(self, text, filename=None,
disable_unicode=False,
input_encoding=None, preprocessor=None):
self.text = text
self.filename = filename
self.template = parsetree.TemplateNode(self.filename)
self.matched_lineno = 1
self.matched_charpos = 0
self.lineno = 1
self.match_position = 0
self.tag = []
self.control_line = []
self.ternary_stack = []
self.disable_unicode = disable_unicode
self.encoding = input_encoding
if compat.py3k and disable_unicode:
raise exceptions.UnsupportedError(
"Mako for Python 3 does not "
"support disabling Unicode")
if preprocessor is None:
self.preprocessor = []
elif not hasattr(preprocessor, '__iter__'):
self.preprocessor = [preprocessor]
else:
self.preprocessor = preprocessor
@property
def exception_kwargs(self):
return {'source': self.text,
'lineno': self.matched_lineno,
'pos': self.matched_charpos,
'filename': self.filename}
def match(self, regexp, flags=None):
"""compile the given regexp, cache the reg, and call match_reg()."""
try:
reg = _regexp_cache[(regexp, flags)]
except KeyError:
if flags:
reg = re.compile(regexp, flags)
else:
reg = re.compile(regexp)
_regexp_cache[(regexp, flags)] = reg
return self.match_reg(reg)
def match_reg(self, reg):
"""match the given regular expression object to the current text
position.
if a match occurs, update the current text and line position.
"""
mp = self.match_position
match = reg.match(self.text, self.match_position)
if match:
(start, end) = match.span()
if end == start:
self.match_position = end + 1
else:
self.match_position = end
self.matched_lineno = self.lineno
lines = re.findall(r"\n", self.text[mp:self.match_position])
cp = mp - 1
while (cp >= 0 and cp < self.textlength and self.text[cp] != '\n'):
cp -= 1
self.matched_charpos = mp - cp
self.lineno += len(lines)
# print "MATCHED:", match.group(0), "LINE START:",
# self.matched_lineno, "LINE END:", self.lineno
# print "MATCH:", regexp, "\n", self.text[mp : mp + 15], \
# (match and "TRUE" or "FALSE")
return match
def parse_until_text(self, *text):
startpos = self.match_position
text_re = r'|'.join(text)
brace_level = 0
while True:
match = self.match(r'#.*\n')
if match:
continue
match = self.match(r'(\"\"\"|\'\'\'|\"|\')((?<!\\)\\\1|.)*?\1',
re.S)
if match:
continue
match = self.match(r'(%s)' % text_re)
if match:
if match.group(1) == '}' and brace_level > 0:
brace_level -= 1
continue
return \
self.text[startpos:
self.match_position - len(match.group(1))],\
match.group(1)
match = self.match(r"(.*?)(?=\"|\'|#|%s)" % text_re, re.S)
if match:
brace_level += match.group(1).count('{')
brace_level -= match.group(1).count('}')
continue
raise exceptions.SyntaxException(
"Expected: %s" %
','.join(text),
**self.exception_kwargs)
def append_node(self, nodecls, *args, **kwargs):
kwargs.setdefault('source', self.text)
kwargs.setdefault('lineno', self.matched_lineno)
kwargs.setdefault('pos', self.matched_charpos)
kwargs['filename'] = self.filename
node = nodecls(*args, **kwargs)
if len(self.tag):
self.tag[-1].nodes.append(node)
else:
self.template.nodes.append(node)
# build a set of child nodes for the control line
# (used for loop variable detection)
# also build a set of child nodes on ternary control lines
# (used for determining if a pass needs to be auto-inserted
if self.control_line:
control_frame = self.control_line[-1]
control_frame.nodes.append(node)
if not (isinstance(node, parsetree.ControlLine) and
control_frame.is_ternary(node.keyword)):
if self.ternary_stack and self.ternary_stack[-1]:
self.ternary_stack[-1][-1].nodes.append(node)
if isinstance(node, parsetree.Tag):
if len(self.tag):
node.parent = self.tag[-1]
self.tag.append(node)
elif isinstance(node, parsetree.ControlLine):
if node.isend:
self.control_line.pop()
self.ternary_stack.pop()
elif node.is_primary:
self.control_line.append(node)
self.ternary_stack.append([])
elif self.control_line and \
self.control_line[-1].is_ternary(node.keyword):
self.ternary_stack[-1].append(node)
elif self.control_line and \
not self.control_line[-1].is_ternary(node.keyword):
raise exceptions.SyntaxException(
"Keyword '%s' not a legal ternary for keyword '%s'" %
(node.keyword, self.control_line[-1].keyword),
**self.exception_kwargs)
_coding_re = re.compile(r'#.*coding[:=]\s*([-\w.]+).*\r?\n')
def decode_raw_stream(self, text, decode_raw, known_encoding, filename):
"""given string/unicode or bytes/string, determine encoding
from magic encoding comment, return body as unicode
or raw if decode_raw=False
"""
if isinstance(text, compat.text_type):
m = self._coding_re.match(text)
encoding = m and m.group(1) or known_encoding or 'ascii'
return encoding, text
if text.startswith(codecs.BOM_UTF8):
text = text[len(codecs.BOM_UTF8):]
parsed_encoding = 'utf-8'
m = self._coding_re.match(text.decode('utf-8', 'ignore'))
if m is not None and m.group(1) != 'utf-8':
raise exceptions.CompileException(
"Found utf-8 BOM in file, with conflicting "
"magic encoding comment of '%s'" % m.group(1),
text.decode('utf-8', 'ignore'),
0, 0, filename)
else:
m = self._coding_re.match(text.decode('utf-8', 'ignore'))
if m:
parsed_encoding = m.group(1)
else:
parsed_encoding = known_encoding or 'ascii'
if decode_raw:
try:
text = text.decode(parsed_encoding)
except UnicodeDecodeError:
raise exceptions.CompileException(
"Unicode decode operation of encoding '%s' failed" %
parsed_encoding,
text.decode('utf-8', 'ignore'),
0, 0, filename)
return parsed_encoding, text
def parse(self):
self.encoding, self.text = self.decode_raw_stream(
self.text,
not self.disable_unicode,
self.encoding,
self.filename)
for preproc in self.preprocessor:
self.text = preproc(self.text)
# push the match marker past the
# encoding comment.
self.match_reg(self._coding_re)
self.textlength = len(self.text)
while (True):
if self.match_position > self.textlength:
break
if self.match_end():
break
if self.match_expression():
continue
if self.match_control_line():
continue
if self.match_comment():
continue
if self.match_tag_start():
continue
if self.match_tag_end():
continue
if self.match_python_block():
continue
if self.match_text():
continue
if self.match_position > self.textlength:
break
raise exceptions.CompileException("assertion failed")
if len(self.tag):
raise exceptions.SyntaxException("Unclosed tag: <%%%s>" %
self.tag[-1].keyword,
**self.exception_kwargs)
if len(self.control_line):
raise exceptions.SyntaxException(
"Unterminated control keyword: '%s'" %
self.control_line[-1].keyword,
self.text,
self.control_line[-1].lineno,
self.control_line[-1].pos, self.filename)
return self.template
def match_tag_start(self):
match = self.match(r'''
\<% # opening tag
([\w\.\:]+) # keyword
((?:\s+\w+|\s*=\s*|".*?"|'.*?')*) # attrname, = \
# sign, string expression
\s* # more whitespace
(/)?> # closing
''',
re.I | re.S | re.X)
if match:
keyword, attr, isend = match.groups()
self.keyword = keyword
attributes = {}
if attr:
for att in re.findall(
r"\s*(\w+)\s*=\s*(?:'([^']*)'|\"([^\"]*)\")", attr):
key, val1, val2 = att
text = val1 or val2
text = text.replace('\r\n', '\n')
attributes[key] = text
self.append_node(parsetree.Tag, keyword, attributes)
if isend:
self.tag.pop()
else:
if keyword == 'text':
match = self.match(r'(.*?)(?=\</%text>)', re.S)
if not match:
raise exceptions.SyntaxException(
"Unclosed tag: <%%%s>" %
self.tag[-1].keyword,
**self.exception_kwargs)
self.append_node(parsetree.Text, match.group(1))
return self.match_tag_end()
return True
else:
return False
def match_tag_end(self):
match = self.match(r'\</%[\t ]*(.+?)[\t ]*>')
if match:
if not len(self.tag):
raise exceptions.SyntaxException(
"Closing tag without opening tag: </%%%s>" %
match.group(1),
**self.exception_kwargs)
elif self.tag[-1].keyword != match.group(1):
raise exceptions.SyntaxException(
"Closing tag </%%%s> does not match tag: <%%%s>" %
(match.group(1), self.tag[-1].keyword),
**self.exception_kwargs)
self.tag.pop()
return True
else:
return False
def match_end(self):
match = self.match(r'\Z', re.S)
if match:
string = match.group()
if string:
return string
else:
return True
else:
return False
def match_text(self):
match = self.match(r"""
(.*?) # anything, followed by:
(
(?<=\n)(?=[ \t]*(?=%|\#\#)) # an eval or line-based
# comment preceded by a
# consumed newline and whitespace
|
(?=\${) # an expression
|
(?=</?[%&]) # a substitution or block or call start or end
# - don't consume
|
(\\\r?\n) # an escaped newline - throw away
|
\Z # end of string
)""", re.X | re.S)
if match:
text = match.group(1)
if text:
self.append_node(parsetree.Text, text)
return True
else:
return False
def match_python_block(self):
match = self.match(r"<%(!)?")
if match:
line, pos = self.matched_lineno, self.matched_charpos
text, end = self.parse_until_text(r'%>')
# the trailing newline helps
# compiler.parse() not complain about indentation
text = adjust_whitespace(text) + "\n"
self.append_node(
parsetree.Code,
text,
match.group(1) == '!', lineno=line, pos=pos)
return True
else:
return False
def match_expression(self):
match = self.match(r"\${")
if match:
line, pos = self.matched_lineno, self.matched_charpos
text, end = self.parse_until_text(r'\|', r'}')
if end == '|':
escapes, end = self.parse_until_text(r'}')
else:
escapes = ""
text = text.replace('\r\n', '\n')
self.append_node(
parsetree.Expression,
text, escapes.strip(),
lineno=line, pos=pos)
return True
else:
return False
def match_control_line(self):
match = self.match(
r"(?<=^)[\t ]*(%(?!%)|##)[\t ]*((?:(?:\\r?\n)|[^\r\n])*)"
r"(?:\r?\n|\Z)", re.M)
if match:
operator = match.group(1)
text = match.group(2)
if operator == '%':
m2 = re.match(r'(end)?(\w+)\s*(.*)', text)
if not m2:
raise exceptions.SyntaxException(
"Invalid control line: '%s'" %
text,
**self.exception_kwargs)
isend, keyword = m2.group(1, 2)
isend = (isend is not None)
if isend:
if not len(self.control_line):
raise exceptions.SyntaxException(
"No starting keyword '%s' for '%s'" %
(keyword, text),
**self.exception_kwargs)
elif self.control_line[-1].keyword != keyword:
raise exceptions.SyntaxException(
"Keyword '%s' doesn't match keyword '%s'" %
(text, self.control_line[-1].keyword),
**self.exception_kwargs)
self.append_node(parsetree.ControlLine, keyword, isend, text)
else:
self.append_node(parsetree.Comment, text)
return True
else:
return False
def match_comment(self):
"""matches the multiline version of a comment"""
match = self.match(r"<%doc>(.*?)</%doc>", re.S)
if match:
self.append_node(parsetree.Comment, match.group(1))
return True
else:
return False
|
mit
|
vrv/tensorflow
|
tensorflow/python/kernel_tests/scatter_nd_ops_test.py
|
48
|
19446
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter_nd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _FlatInnerDims(tensor, ndims=2):
shape = list(tensor.shape)
return tensor.reshape([
functools.reduce(lambda x, y: x * y, shape[:-ndims + 1], 1)
] + shape[-ndims + 1:])
def _FlatOuterDims(tensor, ndims=2):
shape = list(tensor.shape)
return tensor.reshape(shape[:ndims - 1] + [
functools.reduce(lambda x, y: x * y, shape[ndims - 1:], 1)
])
def _NumpyScatterNd(ref, indices, updates, op):
ixdim = indices.shape[-1]
num_updates = indices.size // ixdim
total_nd = len(ref.shape)
slice_size = 1
for i in range(ixdim, total_nd):
slice_size *= ref.shape[i]
flat_indices = _FlatInnerDims(indices)
flat_updates = updates.reshape((num_updates, slice_size))
output_flat = _FlatOuterDims(ref, ixdim + 1)
for ix_updates, ix_output in enumerate(flat_indices):
ix_output = tuple(ix_output)
output_flat[ix_output] = op(output_flat[ix_output],
flat_updates[ix_updates])
return output_flat.reshape(ref.shape)
def _NumpyUpdate(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: u)
def _NumpyAdd(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p + u)
def _NumpySub(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p - u)
def _NumpyMul(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p * u)
def _NumpyDiv(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p / u)
class ScatterNdTest(test.TestCase):
def _VariableRankTest(self,
np_scatter,
tf_scatter,
vtype,
itype,
repeat_indices=False):
np.random.seed(8)
ref_shapes = [(3, 6), (3, 6), (3, 6, 9), (3, 6, 9), (3, 6, 9), (3, 6, 9)]
indices_shapes = [(2,), (2, 2), (2,), (2, 2), (2, 3), (2, 3, 3)]
with self.test_session(use_gpu=True):
for ref_shape, indices_shape in zip(ref_shapes, indices_shapes):
num_updates = indices_shape[0]
ixdim = indices_shape[-1]
indexable_area_shape = ()
for i in range(ixdim):
indexable_area_shape += (ref_shape[i],)
all_indices = [
list(coord)
for coord, _ in np.ndenumerate(
np.empty(indexable_area_shape, vtype))
]
np.random.shuffle(all_indices)
indices = np.array(all_indices[:num_updates])
if num_updates > 1 and repeat_indices:
indices = indices[:num_updates // 2]
for _ in range(num_updates - num_updates // 2):
indices = np.append(
indices, [indices[np.random.randint(num_updates // 2)]], axis=0)
np.random.shuffle(indices)
indices = _AsType(indices[:num_updates], itype)
updates_shape = (num_updates,)
for i in range(ixdim, len(ref_shape)):
updates_shape += (ref_shape[i],)
updates = _AsType(np.random.randn(*(updates_shape)), vtype)
ref = _AsType(np.random.randn(*(ref_shape)), vtype)
# Scatter via numpy
new = ref.copy()
np_scatter(new, indices, updates)
# Scatter via tensorflow
ref_var = variables.Variable(ref)
ref_var.initializer.run()
tf_scatter(ref_var, indices, updates).eval()
# Compare
self.assertAllClose(new, ref_var.eval())
def _VariableRankTests(self, np_scatter, tf_scatter):
for vtype in (np.float32, np.float64):
for itype in (np.int32, np.int64):
self._VariableRankTest(np_scatter, tf_scatter, vtype, itype)
def testSimple(self):
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
ref = variables.Variable([0, 0, 0, 0, 0, 0, 0, 0], dtype=dtypes.float32)
expected = np.array([0, 11, 0, 10, 9, 0, 0, 12])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.test_session(use_gpu=True) as sess:
sess.run(init)
result = sess.run(scatter)
self.assertAllClose(result, expected)
def testSimple2(self):
indices = constant_op.constant([[1, 0], [1, 1]], dtype=dtypes.int32)
updates = constant_op.constant([11., 12.], dtype=dtypes.float32)
ref = variables.Variable(
[[0., 0.], [0., 0.], [0., 0.]], dtype=dtypes.float32)
expected = np.array([[0., 0.], [11., 12.], [0., 0.]])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.test_session(use_gpu=True) as sess:
sess.run(init)
result = sess.run(scatter)
self.assertAllClose(result, expected)
def testSimple3(self):
indices = constant_op.constant([[1]], dtype=dtypes.int32)
updates = constant_op.constant([[11., 12.]], dtype=dtypes.float32)
ref = variables.Variable(
[[0., 0.], [0., 0.], [0., 0.]], dtype=dtypes.float32)
expected = np.array([[0., 0.], [11., 12.], [0., 0.]])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.test_session(use_gpu=True) as sess:
sess.run(init)
result = sess.run(scatter)
self.assertAllClose(result, expected)
def testVariableRankUpdate(self):
self._VariableRankTests(_NumpyUpdate, state_ops.scatter_nd_update)
def testVariableRankAdd(self):
self._VariableRankTests(_NumpyAdd, state_ops.scatter_nd_add)
def testVariableRankSub(self):
self._VariableRankTests(_NumpySub, state_ops.scatter_nd_sub)
# TODO(simister): Re-enable once binary size increase due to
# scatter_nd ops is under control.
# def testVariableRankMul(self):
# self._VariableRankTests(_NumpyMul, tf.scatter_nd_mul)
# def testVariableRankDiv(self):
# self._VariableRankTests(_NumpyDiv, tf.scatter_nd_div)
def _ScatterRepeatIndicesTest(self, np_scatter, tf_scatter):
for vtype in (np.float32, np.float64):
for itype in (np.int32, np.int64):
self._VariableRankTest(
np_scatter, tf_scatter, vtype, itype, repeat_indices=True)
def testScatterRepeatIndices(self):
"""This tests scatter_add using indices that repeat."""
self._ScatterRepeatIndicesTest(_NumpyAdd, state_ops.scatter_nd_add)
self._ScatterRepeatIndicesTest(_NumpySub, state_ops.scatter_nd_sub)
# TODO(simister): Re-enable once binary size increase due to
# extra templating is back under control.
# self._ScatterRepeatIndicesTest(_NumpyMul, tf.scatter_nd_mul)
# self._ScatterRepeatIndicesTest(_NumpyDiv, tf.scatter_nd_div)
# TODO(simister): Re-enable once binary size increase due to
# extra templating is back under control and this op is re-enabled
# def testBooleanScatterUpdate(self):
# with self.test_session(use_gpu=False) as session:
# var = tf.Variable([True, False])
# update0 = tf.scatter_nd_update(var, [[1]], [True])
# update1 = tf.scatter_nd_update(
# var, tf.constant(
# [[0]], dtype=tf.int64), [False])
# var.initializer.run()
# session.run([update0, update1])
# self.assertAllEqual([False, True], var.eval())
def testScatterOutOfRangeCpu(self):
# TODO(simister): Re-enable once binary size increase due to
# scatter_nd ops is under control.
# tf.scatter_nd_mul, tf.scatter_nd_div,
for op in (state_ops.scatter_nd_add, state_ops.scatter_nd_sub,
state_ops.scatter_nd_update):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
with self.test_session(use_gpu=False):
ref = variables.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([[2], [0], [5]])
op(ref, indices, updates).eval()
# Test some out of range errors.
indices = np.array([[-1], [0], [5]])
with self.assertRaisesOpError(
r"Invalid indices: \[0,0\] = \[-1\] is not in \[0, 6\)"):
op(ref, indices, updates).eval()
indices = np.array([[2], [0], [6]])
with self.assertRaisesOpError(
r"Invalid indices: \[2,0\] = \[6\] is not in \[0, 6\)"):
op(ref, indices, updates).eval()
def testRank3ValidShape(self):
indices = array_ops.zeros([2, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
self.assertAllEqual(
array_ops.scatter_nd(indices, updates, shape).get_shape().as_list(),
shape)
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
self.assertAllEqual(
state_ops.scatter_nd_update(ref, indices,
updates).get_shape().as_list(), shape)
def testExtraIndicesDimensions(self):
indices = array_ops.zeros([1, 1, 2], dtypes.int32)
updates = array_ops.zeros([1, 1], dtypes.int32)
shape = np.array([2, 2])
scatter = array_ops.scatter_nd(indices, updates, shape)
self.assertAllEqual(scatter.get_shape().as_list(), shape)
expected_result = np.zeros([2, 2], dtype=np.int32)
with self.test_session():
self.assertAllEqual(expected_result, scatter.eval())
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
scatter_update = state_ops.scatter_nd_update(ref, indices, updates)
self.assertAllEqual(scatter_update.get_shape().as_list(), shape)
with self.test_session():
ref.initializer.run()
self.assertAllEqual(expected_result, scatter_update.eval())
def testUndefinedIndicesShape(self):
indices = array_ops.placeholder(dtypes.int32, shape=None)
updates = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
shape = constant_op.constant([2, 2, 2], dtypes.int32)
array_ops.scatter_nd(indices, updates, shape)
def testUndefinedUpdatesShape(self):
indices = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
updates = array_ops.placeholder(dtypes.int32, shape=None)
shape = constant_op.constant([2, 2, 2], dtypes.int32)
array_ops.scatter_nd(indices, updates, shape)
def testUndefinedOutputShape(self):
indices = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
updates = array_ops.placeholder(dtypes.int32, shape=[2, 2, 2])
shape = array_ops.placeholder(dtypes.int32, shape=[None])
array_ops.scatter_nd(indices, updates, shape)
def testEmptyOutputShape1(self):
indices = array_ops.zeros([2, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = constant_op.constant([0, 3, 2], dtypes.int32)
with self.assertRaisesWithPredicateMatch(
ValueError, "Indices and updates specified for empty output shape"):
array_ops.scatter_nd(indices, updates, shape)
def testEmptyOutputShape2(self):
indices = array_ops.placeholder(dtypes.int32, shape=None)
updates = array_ops.placeholder(dtypes.int32, shape=None)
shape = constant_op.constant([0, 3, 2], dtypes.int32)
with self.test_session():
array_ops.scatter_nd(indices, updates, shape).eval(feed_dict={
indices: np.zeros(
[2, 2, 2], dtype=np.int32),
updates: np.zeros(
[2, 2, 2], dtype=np.int32)
})
def testEmptyOutputShape3(self):
indices = array_ops.zeros([0], dtypes.int32)
updates = array_ops.zeros([0], dtypes.int32)
shape = constant_op.constant([0], dtypes.int32)
scatter = array_ops.scatter_nd(indices, updates, shape)
with self.test_session():
self.assertEqual(scatter.eval().size, 0)
def testRank3InvalidShape1(self):
indices = array_ops.zeros([3, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
with self.assertRaisesWithPredicateMatch(
ValueError, "The outer \\d+ dimensions of indices\\.shape="):
array_ops.scatter_nd(indices, updates, shape)
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
with self.assertRaisesWithPredicateMatch(
ValueError, "The outer \\d+ dimensions of indices\\.shape="):
state_ops.scatter_nd_update(ref, indices, updates)
def testRank3InvalidShape2(self):
indices = array_ops.zeros([2, 2, 1], dtypes.int32)
updates = array_ops.zeros([2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
with self.assertRaisesWithPredicateMatch(
ValueError, "The inner \\d+ dimensions of output\\.shape="):
array_ops.scatter_nd(indices, updates, shape)
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
with self.assertRaisesWithPredicateMatch(
ValueError, "The inner \\d+ dimensions of ref\\.shape="):
state_ops.scatter_nd_update(ref, indices, updates)
def testGradientsRank2ElementUpdate(self):
indices = constant_op.constant([[0, 0], [1, 1]], dtype=dtypes.int32)
updates = constant_op.constant([1, 4], dtype=dtypes.float64)
shape = constant_op.constant([2, 2], dtype=dtypes.int32)
outputs = array_ops.scatter_nd(indices, updates, shape)
grad_vals = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [updates], [grad_vals])[0]
expected_grads = np.array([1, 4], dtype=np.float64)
with self.test_session():
self.assertAllEqual(expected_grads, grads.eval())
def testGradientsRank2SliceUpdate(self):
indices = constant_op.constant([[1], [0]], dtype=dtypes.int32)
updates = constant_op.constant([[3, 4], [1, 2]], dtype=dtypes.float64)
shape = constant_op.constant([2, 2], dtype=dtypes.int32)
outputs = array_ops.scatter_nd(indices, updates, shape)
grad_vals = constant_op.constant([[3, 4], [1, 2]], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [updates], [grad_vals])[0]
expected_grads = np.array([[1, 2], [3, 4]], dtype=np.float64)
with self.test_session():
self.assertAllEqual(expected_grads, grads.eval())
def testGradientsRank3SliceUpdate(self):
indices = constant_op.constant(
[[[0, 1], [1, 0]], [[0, 0], [1, 1]]], dtype=dtypes.int32)
updates = constant_op.constant(
[[[5, 7], [2, 4]], [[1, 3], [6, 8]]], dtype=dtypes.float64)
shape = constant_op.constant([2, 2, 2], dtype=dtypes.int32)
outputs = array_ops.scatter_nd(indices, updates, shape)
grad_vals = constant_op.constant(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [updates], [grad_vals])[0]
expected_grads = np.array(
[[[3, 4], [5, 6]], [[1, 2], [7, 8]]], dtype=np.float64)
with self.test_session():
self.assertAllEqual(expected_grads, grads.eval())
def testConcurrentUpdates(self):
num_updates = 10000
update_values = np.random.rand(num_updates)
ref = variables.Variable(np.zeros([2, 2]), dtype=dtypes.float64)
indices = constant_op.constant([[0, 1]] * num_updates, dtype=dtypes.int32)
updates = constant_op.constant(update_values, dtype=dtypes.float64)
expected_result = np.zeros([2, 2], dtype=np.float64)
expected_result[0, 1] = np.sum(update_values)
scatter = state_ops.scatter_nd_add(ref, indices, updates)
init = variables.global_variables_initializer()
with session.Session() as sess:
sess.run(init)
result = sess.run(scatter)
assert np.allclose(result, expected_result)
# TODO(fpmc): Re-enable this test when gpu_pip test actually runs on a GPU.
def _disabledTestScatterOutOfRangeGpu(self):
if not test.IsBuiltWithCuda():
return
# TODO(simister): Re-enable once binary size increase due to
# scatter_nd ops is under control.
# tf.scatter_nd_mul, tf.scatter_nd_div,
for op in (state_ops.scatter_nd_add, state_ops.scatter_nd_sub,
state_ops.scatter_nd_update):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
# With GPU, the code ignores indices that are out of range.
# We don't test the implementation; just test there's no failures.
with self.test_session(force_gpu=True):
ref = variables.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
op(ref, indices, updates).eval()
# Indicies out of range should not fail.
indices = np.array([-1, 0, 5])
op(ref, indices, updates).eval()
indices = np.array([2, 0, 6])
op(ref, indices, updates).eval()
def testScatterNdRepatedIndicesAdd(self):
indices = array_ops.zeros([100000, 1], dtypes.int32)
values = np.random.randn(100000)
shape = [1]
with self.test_session():
val = array_ops.scatter_nd(indices, values, shape).eval()
self.assertAllClose([np.sum(values)], val)
def testSmokeScatterNdBatch2DSliceDim2(self):
with self.test_session():
indices = array_ops.zeros([3, 5, 2], dtype=dtypes.int32)
values = array_ops.zeros([3, 5, 7])
shape = [4, 6, 7]
array_ops.scatter_nd(indices, values, shape).eval()
def testSmokeScatterNdBatch1DSliceDim2(self):
with self.test_session():
indices = array_ops.zeros([0, 2], dtype=dtypes.int32)
values = array_ops.zeros([0, 7])
shape = [4, 6, 7]
array_ops.scatter_nd(indices, values, shape).eval()
def testSmokeScatterNdBatch1DSliceDim3ShapeRank7(self):
with self.test_session():
indices = array_ops.zeros([1, 3], dtype=dtypes.int32)
values = array_ops.zeros([1, 6, 7, 8, 9])
shape = [3, 4, 5, 6, 7, 8, 9]
array_ops.scatter_nd(indices, values, shape).eval()
def testSmokeScatterNdBatch2DSliceDim3ShapeRank7(self):
with self.test_session():
indices = array_ops.zeros([1, 2, 3], dtype=dtypes.int32)
values = array_ops.zeros([1, 2, 6, 7, 8, 9])
shape = [3, 4, 5, 6, 7, 8, 9]
array_ops.scatter_nd(indices, values, shape).eval()
if __name__ == "__main__":
test.main()
|
apache-2.0
|
pinobatch/thwaite-nes
|
tools/dte.py
|
1
|
12624
|
#!/usr/bin/env python3
from __future__ import with_statement, division, print_function, unicode_literals
from collections import defaultdict
import sys, heapq
dte_problem_definition = """
Byte pair encoding, dual tile encoding, or digram coding is a static
dictionary compression method first disclosed to the public by Philip
Gage in 1994. Each symbol in the compressed data represents a
sequence of two symbols, which may be compressed symbols or literals.
Its size performance is comparable to LZW but without needing RAM
for a dynamic dictionary.
http://www.drdobbs.com/a-new-algorithm-for-data-compression/184402829
The decompression is as follows:
for each symbol in the input:
push the symbol on the stack
while the stack is not empty:
pop a symbol from the stack
if the symbol is literal:
emit the symbol
else:
push the second child
push the first child
I'm not guaranteeing that it's optimal, but here's a greedy
compressor:
scan for frequencies of all symbol pairs
while a pair has high enough frequency:
allocate a new symbol
replace the old pair with the new symbol
decrease count of replaced pairs
increase count of newly created pairs
This is O(k*n) because of the replacements.
"""
lipsum = """"But I must explain to you how all this mistaken idea of denouncing pleasure and praising pain was born and I will give you a complete account of the system, and expound the actual teachings of the great explorer of the truth, the master-builder of human happiness. No one rejects, dislikes, or avoids pleasure itself, because it is pleasure, but because those who do not know how to pursue pleasure rationally encounter consequences that are extremely painful. Nor again is there anyone who loves or pursues or desires to obtain pain of itself, because it is pain, but because occasionally circumstances occur in which toil and pain can procure him some great pleasure. To take a trivial example, which of us ever undertakes laborious physical exercise, except to obtain some advantage from it? But who has any right to find fault with a man who chooses to enjoy a pleasure that has no annoying consequences, or one who avoids a pain that produces no resultant pleasure?
On the other hand, we denounce with righteous indignation and dislike men who are so beguiled and demoralized by the charms of pleasure of the moment, so blinded by desire, that they cannot foresee the pain and trouble that are bound to ensue; and equal blame belongs to those who fail in their duty through weakness of will, which is the same as saying through shrinking from toil and pain. These cases are perfectly simple and easy to distinguish. In a free hour, when our power of choice is untrammelled and when nothing prevents our being able to do what we like best, every pleasure is to be welcomed and every pain avoided. But in certain circumstances and owing to the claims of duty or the obligations of business it will frequently occur that pleasures have to be repudiated and annoyances accepted. The wise man therefore always holds in these matters to this principle of selection: he rejects pleasures to secure other greater pleasures, or else he endures pains to avoid worse pains.
--M. T. Cicero, "Extremes of Good and Evil", tr. H. Rackham"""
# Minimum frequency to compress a pair
MINFREQ = 3
# First code unit that is not considered a control character;
# should match value in vwf_draw.s
FIRST_PRINTABLE_CU = 0x14
def olcount(haystack, needle):
"""Count times the 2-character needle appears in haystack, including overlaps."""
if needle[0] != needle[1]:
return haystack.count(needle)
return len([c0 for c0, c1 in zip(haystack[:-1], haystack[1:])
if c0 == c1 == needle[0]])
def dte_count_changes(s, pairfrom, pairto):
"""Count changes to pair frequencies after replacing a given string."""
# Collect pair frequency updates
# Assuming hi->$:
# ghij -> gh -1, g$ +1, ij -1, $j + 1
# ghihij -> gh -1, g$ -1, ih -1, $$ + 1, ij -1, $j +1
assert isinstance(s, bytes)
assert isinstance(pairfrom, bytes)
assert isinstance(pairto, int)
assert len(pairfrom) == 2
newpairfreqs = defaultdict(lambda: 0)
i, ilen = 0, len(s)
lastsym = None
revpairfrom = pairfrom[::-1]
while i < ilen - 1:
if s[i:i + 2] != pairfrom:
lastsym = s[i]
i += 1
continue
# Handle the pair before the replacement
if i > 0:
newpairfreqs[bytes([lastsym, pairto])] += 1
newpairfreqs[bytes([lastsym, pairfrom[0]])] -= 1
lastsym = pairto
# Handle consecutive replaced pairs
# First count nonoverlapping pairs of the replacement
# ghij -> g$j
# ghihij -> g$$j
# ghihihij -> g$$$j
nollen = 0
while i < ilen:
i += 2
nollen += 1
nextsym = s[i:i + 2]
if nextsym != pairfrom:
break
# FIXME: This part needs to be replaced to handle
# overlapping semantics
if nollen >= 2:
newpairfreqs[bytes([pairto, pairto])] += nollen - 1
newpairfreqs[revpairfrom] -= nollen - 1
if nextsym:
newpairfreqs[bytes([pairto, nextsym[0]])] += 1
newpairfreqs[bytes([pairfrom[1], nextsym[0]])] -= 1
return newpairfreqs
def dte_newsymbol(lines, replacements, pairfreqs, compctrl=False, mincodeunit=128):
"""Find the biggest pair frequency and turn it into a new symbol."""
# I don't know how to move elements around in the heap, so instead,
# I'm recomputing the highest value every time. When frequencies
# are equal, prefer low numbered symbols for a less deep stack.
if compctrl:
useful_items = pairfreqs.items()
else:
useful_items = ((k, v)
for k, v in pairfreqs.items()
if all(c >= FIRST_PRINTABLE_CU for c in k))
strpair, freq = min(useful_items, key=lambda x: (-x[1], x[0]))
if freq < MINFREQ:
## print("Done. freq is", freq)
return True
try:
expected_freq = sum(olcount(line, strpair) for line in lines)
assert freq == expected_freq
except AssertionError:
print("frequency of %s in pairfreqs: %d\nfrequency in inputdata: %d"
% (repr(strpair), freq, expected_freq),
file=sys.stderr)
raise
# Allocate new symbol
newsym = mincodeunit + len(replacements)
replacements.append(strpair)
# Update pair frequencies
for line in lines:
for k, v in dte_count_changes(line, strpair, newsym).items():
if v:
pairfreqs[k] += v
del pairfreqs[strpair]
return False
def dte_compress(lines, compctrl=False, checkfreqs=True, mincodeunit=128):
"""Compress a set of byte strings with DTE.
lines -- a list of byte strings to compress, where no code unit
is greater than mincodeunit
compctrl -- if False, exclude control characters ('\x00'-'\x1F')
from compression; if True, compress them as any other
"""
# Initial frequency pair scan
# pairfreqs[c] represents the number of times the two-character
# sequence c occurs throughout lines
pairfreqs = defaultdict(lambda: 0)
for line in lines:
for i in range(len(line) - 1):
key = line[i:i + 2]
pairfreqs[key] += 1
# now we use overlapping matches: oooo is three of oo
# and we leave the compctrl exclusion for dte_newsymbol
replacements = []
lastmaxpairs = 0
done = False
while len(replacements) < 256 - mincodeunit and not done:
if checkfreqs:
inputdata = b'\xff'.join(lines)
numfailed = 0
for strpair, freq in pairfreqs.items():
expectfreq = olcount(inputdata, strpair)
if expectfreq != freq:
print("Frequency pair scan problem: %s %d!=expected %d"
% (repr(strpair), freq, expectfreq),
file=sys.stderr)
numfailed += 1
if numfailed > 0:
if replacements:
print("Last replacement was %s with \\x%02x"
% (repr(replacements[-1]),
len(replacements) + mincodeunit - 1),
file=sys.stderr)
assert False
curinputlen = sum(len(line) + 1 for line in lines)
## print("text:%5d bytes; dict:%4d bytes; pairs:%5d"
## % (curinputlen, 2 * len(replacements), len(pairfreqs)),
## file=sys.stderr)
done = dte_newsymbol(lines, replacements, pairfreqs,
mincodeunit=mincodeunit)
if done:
break
newsymbol = bytes([len(replacements) + mincodeunit - 1])
for i in range(len(lines)):
lines[i] = lines[i].replace(replacements[-1], newsymbol)
if len(pairfreqs) >= lastmaxpairs * 2:
for i in list(pairfreqs):
if pairfreqs[i] <= 0:
del pairfreqs[i]
lastmaxpairs = len(pairfreqs)
return lines, replacements, pairfreqs
def dte_uncompress(line, replacements, mincodeunit=128):
outbuf = bytearray()
s = []
maxstack = 0
for c in line:
s.append(c)
while s:
maxstack = max(len(s), maxstack)
c = s.pop()
if 0 <= c - mincodeunit < len(replacements):
repl = replacements[c - mincodeunit]
s.extend(reversed(repl))
## print("%02x: %s" % (c, repr(repl)), file=sys.stderr)
## print(repr(s), file=sys.stderr)
else:
outbuf.append(c)
return bytes(outbuf), maxstack
def dte_tests():
import a53charset
inputdatas = [
"The fat cat sat on the mat.",
'boooooobies booooooobies',
lipsum,
]
with open("../src/helppages.txt", "r") as infp:
inputdatas.append(infp.read())
for text in inputdatas:
lines = [text.encode("action53")]
ctxt, replacements, pairfreqs = dte_compress(lines)
print("compressed %d chaacters to %d bytes and %d replacements"
% (len(text), len(ctxt[0]), len(replacements)))
dtxt, stkd = dte_uncompress(ctxt[0], replacements)
outtxt = dtxt.decode("action53")
print("decompressed to %d characters with %d stack depth"
% (len(dtxt), stkd))
assert outtxt == text
# Compress for for robotfindskitten
def nki_main(argv=None):
# Load input files
argv = argv or sys.argv
lines = []
for filename in argv[1:]:
with open(filename, 'rU') as infp:
lines.extend(row.strip() for row in infp)
# Remove blank lines and comments
lines = [row.encode('ascii')
for row in lines
if row and not row.startswith('#')]
# Diagnostic for line length (RFK RFC forbids lines longer than 72)
lgst = heapq.nlargest(10, lines, len)
if len(lgst[0]) > 72:
print("Some NKIs are too long (more than 72 characters):", file=sys.stderr)
print("\n".join(line for line in lgst if len(line) > 72), file=sys.stderr)
else:
print("Longest NKI is OK at %d characters. Don't let it get any longer."
% len(lgst[0]), file=sys.stderr)
print(lgst[0], file=sys.stderr)
oldinputlen = sum(len(line) + 1 for line in lines)
lines, replacements, pairfreqs = dte_compress(lines)
print("%d replacements; highest remaining frequency is %d"
% (len(replacements), max(pairfreqs.values())), file=sys.stderr)
finallen = len(replacements) * 2 + sum(len(line) + 1 for line in lines)
stkd = max(dte_uncompress(line, replacements)[1] for line in lines)
print("from %d to %d bytes with peak stack depth: %d"
% (oldinputlen, finallen, stkd), file=sys.stderr)
replacements = b''.join(replacements)
num_nkis = len(lines)
lines = b''.join(line + b'\x00' for line in lines)
from vwfbuild import ca65_bytearray
outfp = sys.stdout
outfp.write("""; Generated with dte.py; do not edit
.export NUM_NKIS, nki_descriptions, nki_replacements
NUM_NKIS = %d
.segment "NKIDATA"
nki_descriptions:
%s
nki_replacements:
%s
""" % (num_nkis, ca65_bytearray(lines), ca65_bytearray(replacements)))
if __name__=='__main__':
## main()
## main([sys.argv[0], "../../rfk/src/fixed.nki", "../../rfk/src/default.nki"])
dte_tests()
|
gpl-3.0
|
wjt/markgown
|
markgown/editor.py
|
1
|
3156
|
#!/usr/bin/python
# vim: set fileencoding=utf-8 :
#
# © 2012 Will Thompson <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import GLib, Gtk, Gio, GObject
from markdownview import MarkdownView
from pips import Pips
from sourceview import MarkdownSourceView, MarkdownBuffer
from rebuilder import Rebuilder
import os.path
import sys
class MarkgownWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self)
self.filename = os.path.realpath(sys.argv[1])
self.loaded = False
base, _ = os.path.splitext(self.filename)
# FIXME: make this a dotfile? delete it on quit?
self.html_file = "%s.html" % base
self.set_default_size(1000, 600)
paned = Gtk.Paned.new(Gtk.Orientation.HORIZONTAL)
self.add(paned)
sw = Gtk.ScrolledWindow()
self.b = MarkdownBuffer(self.filename)
view = MarkdownSourceView(self.b)
sw.add(view)
overlay = Gtk.Overlay()
overlay.add(sw)
self.spinner = Pips()
self.spinner.set_halign(Gtk.Align.END)
self.spinner.set_valign(Gtk.Align.END)
overlay.add_overlay(self.spinner)
paned.add1(overlay)
self.markdownview = MarkdownView(url=('file://' + self.html_file))
self.markdownview.connect('title_changed', self.title_changed_cb)
paned.add2(self.markdownview)
paned.set_position(600)
self.show_all()
self.rebuilder = Rebuilder(self.filename, self.html_file)
self.rebuilder.connect('rebuilt', lambda *args: self.markdownview.refresh())
self.rebuilder.rebuild()
self.b.connect('modified-changed', lambda args: self.check_modified())
self.check_modified()
self.connect('delete-event', self.__check_save)
def __check_save(self, *args):
if self.b.get_modified():
self.save()
return False
def save(self):
self.b.save()
return False
def check_modified(self):
if self.b.get_modified():
GLib.timeout_add_seconds(5, self.save)
self.spinner.count_down_from(5)
self.spinner.show()
else:
self.spinner.hide()
def title_changed_cb(self, view, title):
if title is None:
title = self.filename
else:
title = '%s (%s)' % (title, self.filename)
self.set_title(title)
if __name__ == '__main__':
window = MarkgownWindow()
window.connect('destroy', Gtk.main_quit)
Gtk.main()
|
gpl-3.0
|
OpenNetworkingFoundation/Snowmass-ONFOpenTransport
|
RI/flask_server/tapi_server/models/tapi_notification_service_affecting.py
|
4
|
1187
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server import util
class TapiNotificationServiceAffecting(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
"""
allowed enum values
"""
SERVICE_AFFECTING = "SERVICE_AFFECTING"
NOT_SERVICE_AFFECTING = "NOT_SERVICE_AFFECTING"
UNKNOWN = "UNKNOWN"
def __init__(self): # noqa: E501
"""TapiNotificationServiceAffecting - a model defined in OpenAPI
"""
self.openapi_types = {
}
self.attribute_map = {
}
@classmethod
def from_dict(cls, dikt) -> 'TapiNotificationServiceAffecting':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.notification.ServiceAffecting of this TapiNotificationServiceAffecting. # noqa: E501
:rtype: TapiNotificationServiceAffecting
"""
return util.deserialize_model(dikt, cls)
|
apache-2.0
|
hiro2016/ergodox_gui_configurator
|
Xlib/keysymdef/latin2.py
|
14
|
1075
|
XK_Aogonek = 0x1a1
XK_breve = 0x1a2
XK_Lstroke = 0x1a3
XK_Lcaron = 0x1a5
XK_Sacute = 0x1a6
XK_Scaron = 0x1a9
XK_Scedilla = 0x1aa
XK_Tcaron = 0x1ab
XK_Zacute = 0x1ac
XK_Zcaron = 0x1ae
XK_Zabovedot = 0x1af
XK_aogonek = 0x1b1
XK_ogonek = 0x1b2
XK_lstroke = 0x1b3
XK_lcaron = 0x1b5
XK_sacute = 0x1b6
XK_caron = 0x1b7
XK_scaron = 0x1b9
XK_scedilla = 0x1ba
XK_tcaron = 0x1bb
XK_zacute = 0x1bc
XK_doubleacute = 0x1bd
XK_zcaron = 0x1be
XK_zabovedot = 0x1bf
XK_Racute = 0x1c0
XK_Abreve = 0x1c3
XK_Lacute = 0x1c5
XK_Cacute = 0x1c6
XK_Ccaron = 0x1c8
XK_Eogonek = 0x1ca
XK_Ecaron = 0x1cc
XK_Dcaron = 0x1cf
XK_Dstroke = 0x1d0
XK_Nacute = 0x1d1
XK_Ncaron = 0x1d2
XK_Odoubleacute = 0x1d5
XK_Rcaron = 0x1d8
XK_Uring = 0x1d9
XK_Udoubleacute = 0x1db
XK_Tcedilla = 0x1de
XK_racute = 0x1e0
XK_abreve = 0x1e3
XK_lacute = 0x1e5
XK_cacute = 0x1e6
XK_ccaron = 0x1e8
XK_eogonek = 0x1ea
XK_ecaron = 0x1ec
XK_dcaron = 0x1ef
XK_dstroke = 0x1f0
XK_nacute = 0x1f1
XK_ncaron = 0x1f2
XK_odoubleacute = 0x1f5
XK_udoubleacute = 0x1fb
XK_rcaron = 0x1f8
XK_uring = 0x1f9
XK_tcedilla = 0x1fe
XK_abovedot = 0x1ff
|
gpl-2.0
|
hujiajie/chromium-crosswalk
|
tools/ipc_fuzzer/scripts/remove_close_messages.py
|
40
|
2548
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Removes ViewHostMsg_Close and alike from testcases. These messages are an
annoyance for corpus distillation. They cause the browser to exit, so no
further messages are processed. On the other hand, ViewHostMsg_Close is useful
for fuzzing - many found bugs are related to a renderer disappearing. So the
fuzzer should be crafting random ViewHostMsg_Close messages.
"""
import argparse
import os
import platform
import shutil
import subprocess
import sys
import tempfile
def create_temp_file():
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.close()
return temp_file.name
def main():
desc = 'Remove ViewHostMsg_Close and alike from the testcases.'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--out-dir', dest='out_dir', default='out',
help='ouput directory under src/ directory')
parser.add_argument('--build-type', dest='build_type', default='Release',
help='Debug vs. Release build')
parser.add_argument('testcase_dir',
help='Directory containing testcases')
parsed = parser.parse_args()
message_util_binary = 'ipc_message_util'
script_path = os.path.realpath(__file__)
ipc_fuzzer_dir = os.path.join(os.path.dirname(script_path), os.pardir)
src_dir = os.path.abspath(os.path.join(ipc_fuzzer_dir, os.pardir, os.pardir))
out_dir = os.path.join(src_dir, parsed.out_dir);
build_dir = os.path.join(out_dir, parsed.build_type)
message_util_path = os.path.join(build_dir, message_util_binary)
if not os.path.exists(message_util_path):
print 'ipc_message_util executable not found at ', message_util_path
return 1
filter_command = [
message_util_path,
'--invert',
'--regexp=ViewHostMsg_Close|ViewHostMsg_ClosePage_ACK',
'input',
'output',
]
testcase_list = os.listdir(parsed.testcase_dir)
testcase_count = len(testcase_list)
index = 0
for testcase in testcase_list:
index += 1
print '[%d/%d] Processing %s' % (index, testcase_count, testcase)
testcase_path = os.path.join(parsed.testcase_dir, testcase)
filtered_path = create_temp_file()
filter_command[-2] = testcase_path
filter_command[-1] = filtered_path
subprocess.call(filter_command)
shutil.move(filtered_path, testcase_path)
return 0
if __name__ == "__main__":
sys.exit(main())
|
bsd-3-clause
|
followcat/predator
|
jobs/definition/batchconvert_cloudshare_jingying.py
|
1
|
1798
|
import Queue
import functools
import utils.builtin
import jobs.definition.cloudshare_jingying
from jobs.definition.batchconvert import *
class Batchconvert(BatchconvertCloudshare,
jobs.definition.cloudshare_jingying.Jingying):
CVDB_PATH = 'convert/jingying'
ORIGIN_CVDB_PATH = 'output/jingying'
def jobgenerator(self, idlist):
for classify_id in idlist:
yamlname = classify_id + '.yaml'
yamldata = utils.builtin.load_yaml('output/jingying/JOBTITLES', yamlname)
sorted_id = sorted(yamldata,
key = lambda cvid: yamldata[cvid]['peo'][-1],
reverse=True)
for cv_id in sorted_id:
if self.oristorage.existsraw(cv_id) and not self.cvstorage.existscv(cv_id):
cv_info = yamldata[cv_id]
job_process = functools.partial(self.convertjob, cv_info)
yield job_process
def extract_details(self, uploaded_details, cv_content):
details = super(Batchconvert, self).extract_details(uploaded_details, cv_content)
details['date'] = uploaded_details['date']
return details
if __name__ == '__main__':
industry_yamls = jobs.definition.cloudshare_jingying.industry_yamls
instance = Batchconvert()
PROCESS_GEN = instance.jobgenerator(industry_yamls)
queue_saver = Queue.Queue(0)
t1 = ThreadConverter('1', queue_saver, PROCESS_GEN)
t2 = ThreadConverter('2', queue_saver, PROCESS_GEN)
t3 = ThreadConverter('3', queue_saver, PROCESS_GEN)
t4 = ThreadConverter('4', queue_saver, PROCESS_GEN)
saver = ThreadSaver('saver', queue_saver, instance.cvstorage)
saver.start()
t1.start()
t2.start()
t3.start()
t4.start()
|
lgpl-3.0
|
florian-dacosta/OpenUpgrade
|
addons/payment_paypal/controllers/main.py
|
66
|
3201
|
# -*- coding: utf-8 -*-
try:
import simplejson as json
except ImportError:
import json
import logging
import pprint
import urllib2
import werkzeug
from openerp import http, SUPERUSER_ID
from openerp.http import request
_logger = logging.getLogger(__name__)
class PaypalController(http.Controller):
_notify_url = '/payment/paypal/ipn/'
_return_url = '/payment/paypal/dpn/'
_cancel_url = '/payment/paypal/cancel/'
def _get_return_url(self, **post):
""" Extract the return URL from the data coming from paypal. """
return_url = post.pop('return_url', '')
if not return_url:
custom = json.loads(post.pop('custom', '{}'))
return_url = custom.get('return_url', '/')
return return_url
def paypal_validate_data(self, **post):
""" Paypal IPN: three steps validation to ensure data correctness
- step 1: return an empty HTTP 200 response -> will be done at the end
by returning ''
- step 2: POST the complete, unaltered message back to Paypal (preceded
by cmd=_notify-validate), with same encoding
- step 3: paypal send either VERIFIED or INVALID (single word)
Once data is validated, process it. """
res = False
new_post = dict(post, cmd='_notify-validate')
urequest = urllib2.Request("https://www.sandbox.paypal.com/cgi-bin/webscr", werkzeug.url_encode(new_post))
uopen = urllib2.urlopen(urequest)
resp = uopen.read()
if resp == 'VERIFIED':
_logger.info('Paypal: validated data')
cr, uid, context = request.cr, SUPERUSER_ID, request.context
res = request.registry['payment.transaction'].form_feedback(cr, uid, post, 'paypal', context=context)
elif resp == 'INVALID':
_logger.warning('Paypal: answered INVALID on data verification')
else:
_logger.warning('Paypal: unrecognized paypal answer, received %s instead of VERIFIED or INVALID' % resp.text)
return res
@http.route('/payment/paypal/ipn/', type='http', auth='none', methods=['POST'])
def paypal_ipn(self, **post):
""" Paypal IPN. """
_logger.info('Beginning Paypal IPN form_feedback with post data %s', pprint.pformat(post)) # debug
self.paypal_validate_data(**post)
return ''
@http.route('/payment/paypal/dpn', type='http', auth="none", methods=['POST'])
def paypal_dpn(self, **post):
""" Paypal DPN """
_logger.info('Beginning Paypal DPN form_feedback with post data %s', pprint.pformat(post)) # debug
return_url = self._get_return_url(**post)
self.paypal_validate_data(**post)
return werkzeug.utils.redirect(return_url)
@http.route('/payment/paypal/cancel', type='http', auth="none")
def paypal_cancel(self, **post):
""" When the user cancels its Paypal payment: GET on this route """
cr, uid, context = request.cr, SUPERUSER_ID, request.context
_logger.info('Beginning Paypal cancel with post data %s', pprint.pformat(post)) # debug
return_url = self._get_return_url(**post)
return werkzeug.utils.redirect(return_url)
|
agpl-3.0
|
wangjun/odoo
|
addons/hr_payroll/__init__.py
|
433
|
1137
|
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_payroll
import report
import wizard
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
RyanChinSang/ECNG3020-ORSS4SCVI
|
BETA/TestCode/Tensorflow/Test/object_detection/core/region_similarity_calculator.py
|
23
|
3687
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Region Similarity Calculators for BoxLists.
Region Similarity Calculators compare a pairwise measure of similarity
between the boxes in two BoxLists.
"""
from abc import ABCMeta
from abc import abstractmethod
import tensorflow as tf
from object_detection.core import box_list_ops
class RegionSimilarityCalculator(object):
"""Abstract base class for region similarity calculator."""
__metaclass__ = ABCMeta
def compare(self, boxlist1, boxlist2, scope=None):
"""Computes matrix of pairwise similarity between BoxLists.
This op (to be overriden) computes a measure of pairwise similarity between
the boxes in the given BoxLists. Higher values indicate more similarity.
Note that this method simply measures similarity and does not explicitly
perform a matching.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
scope: Op scope name. Defaults to 'Compare' if None.
Returns:
a (float32) tensor of shape [N, M] with pairwise similarity score.
"""
with tf.name_scope(scope, 'Compare', [boxlist1, boxlist2]) as scope:
return self._compare(boxlist1, boxlist2)
@abstractmethod
def _compare(self, boxlist1, boxlist2):
pass
class IouSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on Intersection over Union (IOU) metric.
This class computes pairwise similarity between two BoxLists based on IOU.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise IOU similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing pairwise iou scores.
"""
return box_list_ops.iou(boxlist1, boxlist2)
class NegSqDistSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on the squared distance metric.
This class computes pairwise similarity between two BoxLists based on the
negative squared distance metric.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute matrix of (negated) sq distances.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing negated pairwise squared distance.
"""
return -1 * box_list_ops.sq_dist(boxlist1, boxlist2)
class IoaSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on Intersection over Area (IOA) metric.
This class computes pairwise similarity between two BoxLists based on their
pairwise intersections divided by the areas of second BoxLists.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise IOA similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing pairwise IOA scores.
"""
return box_list_ops.ioa(boxlist1, boxlist2)
|
gpl-3.0
|
davebshow/gremlinclient
|
tests/test_tornado.py
|
1
|
16608
|
import uuid
import unittest
from datetime import timedelta
import tornado
from tornado import gen
from tornado.concurrent import Future
from tornado.websocket import WebSocketClientConnection
from tornado.testing import gen_test, AsyncTestCase
from gremlinclient.connection import Stream
from gremlinclient.tornado_client import (
submit, GraphDatabase, Pool, create_connection, Response, RemoteConnection)
from gremlin_python import PythonGraphTraversalSource, GroovyTranslator
class RemoteConnectionTest(unittest.TestCase):
def setUp(self):
super(RemoteConnectionTest, self).setUp()
self.conn = RemoteConnection("ws://localhost:8182/")
def tearDown(self):
super(RemoteConnectionTest, self).tearDown()
self.conn.close()
def test_submit(self):
result = self.conn.submit("gremlin-groovy", "x + x", bindings={"x": 1})
result = list(result)
self.assertEqual(result[0].object, 2)
def test_traversal(self):
g = PythonGraphTraversalSource(GroovyTranslator("g"),
remote_connection=self.conn)
node = g.addV('person').property('name','stephen').next()
self.assertTrue(node['properties']['name'][0]['value'], 'stephen')
class TornadoFactoryConnectTest(AsyncTestCase):
def setUp(self):
super(TornadoFactoryConnectTest, self).setUp()
self.graph = GraphDatabase("ws://localhost:8182/",
username="stephen",
password="password")
@gen_test
def test_connect(self):
connection = yield self.graph.connect()
conn = connection.conn._conn
self.assertIsNotNone(conn.protocol)
self.assertIsInstance(conn, WebSocketClientConnection)
conn.close()
@gen_test
def test_bad_port_exception(self):
graph = GraphDatabase("ws://localhost:81/")
with self.assertRaises(RuntimeError):
connection = yield graph.connect()
@gen_test
def test_wrong_protocol_exception(self):
graph = GraphDatabase("wss://localhost:8182/")
with self.assertRaises(RuntimeError):
connection = yield graph.connect()
# Check this out
# @gen_test
# def test_bad_host_exception(self):
# graph = GraphDatabase("ws://locaost:8182/")
# with self.assertRaises(RuntimeError):
# connection = yield graph.connect()
@gen_test
def test_send(self):
connection = yield self.graph.connect()
resp = connection.send("1 + 1")
while True:
msg = yield resp.read()
print(msg)
if msg is None:
break
self.assertEqual(msg.status_code, 200)
self.assertEqual(msg.data[0], 2)
resp2 = connection.send("2 + 2")
while True:
msg = yield resp2.read()
print(msg)
if msg is None:
break
self.assertEqual(msg.status_code, 200)
self.assertEqual(msg.data[0], 4)
connection.conn.close()
@gen_test
def test_handler(self):
connection = yield self.graph.connect()
resp = connection.send("1 + 1", handler=lambda x: x[0] * 2)
while True:
msg = yield resp.read()
if msg is None:
break
self.assertEqual(msg, 4)
connection.conn.close()
@gen_test
def test_add_handler(self):
connection = yield self.graph.connect()
resp = connection.send("1 + 1", handler=lambda x: x[0] * 2)
resp.add_handler(lambda x: x ** 2)
while True:
msg = yield resp.read()
if msg is None:
break
self.assertEqual(msg, 16)
connection.conn.close()
@gen_test
def test_read_one_on_closed(self):
connection = yield self.graph.connect()
resp = connection.send("1 + 1")
connection.close()
with self.assertRaises(RuntimeError):
msg = yield resp.read()
@gen_test
def test_null_read_on_closed(self):
connection = yield self.graph.connect()
# build connection
connection.close()
stream = Stream(connection, None, "processor", None, None, "stephen",
"password", False, False, Future)
with self.assertRaises(RuntimeError):
msg = yield stream.read()
# @gen_test
# def test_creditials_error(self):
# graph = GraphDatabase("ws://localhost:8182/",
# username="stephen",
# password="passwor")
# connection = yield graph.connect()
# resp = connection.send("1 + 1")
# with self.assertRaises(RuntimeError):
# msg = yield resp.read()
# connection.conn.close()
@gen_test
def test_force_close(self):
connection = yield self.graph.connect(force_close=True)
resp = connection.send("1 + 1")
while True:
msg = yield resp.read()
if msg is None:
break
self.assertEqual(msg.status_code, 200)
self.assertEqual(msg.data[0], 2)
self.assertTrue(connection.conn.closed)
class TornadoPoolTest(AsyncTestCase):
@gen_test
def test_acquire(self):
pool = Pool("ws://localhost:8182/",
maxsize=2,
username="stephen",
password="password")
connection = yield pool.acquire()
conn = connection.conn
self.assertFalse(conn.closed)
self.assertIsInstance(conn, Response)
self.assertEqual(pool.size, 1)
self.assertTrue(connection in pool._acquired)
connection2 = yield pool.acquire()
conn2 = connection2.conn
self.assertFalse(conn2.closed)
self.assertIsInstance(conn2, Response)
self.assertEqual(pool.size, 2)
self.assertTrue(connection2 in pool._acquired)
self.assertNotEqual(conn, conn2)
conn.close()
conn2.close()
@gen_test
def test_acquire_send(self):
pool = Pool("ws://localhost:8182/",
maxsize=2,
username="stephen",
password="password")
connection = yield pool.acquire()
resp = connection.send("1 + 1")
while True:
msg = yield resp.read()
if msg is None:
break
self.assertEqual(msg.status_code, 200)
self.assertEqual(msg.data[0], 2)
connection.conn.close()
@gen_test
def test_maxsize(self):
pool = Pool("ws://localhost:8182/",
maxsize=2,
username="stephen",
password="password")
c1 = yield pool.acquire()
c2 = yield pool.acquire()
c3 = pool.acquire()
self.assertIsInstance(c3, Future)
with self.assertRaises(tornado.gen.TimeoutError):
yield gen.with_timeout(timedelta(seconds=0.1), c3)
c1.conn.close()
c2.conn.close()
@gen_test
def test_release(self):
pool = Pool("ws://localhost:8182/",
maxsize=2,
username="stephen",
password="password")
self.assertEqual(len(pool.pool), 0)
c1 = yield pool.acquire()
self.assertEqual(len(pool._acquired), 1)
yield pool.release(c1)
self.assertEqual(len(pool.pool), 1)
self.assertEqual(len(pool._acquired), 0)
@gen_test
def test_release_acquire(self):
pool = Pool("ws://localhost:8182/",
maxsize=2,
username="stephen",
password="password")
c1 = yield pool.acquire()
yield pool.release(c1)
c2 = yield pool.acquire()
self.assertEqual(c1, c2)
@gen_test
def test_pool_too_big(self):
pool1 = Pool("ws://localhost:8182/",
maxsize=2,
username="stephen",
password="password")
pool2 = Pool("ws://localhost:8182/",
username="stephen",
password="password")
conn1 = yield pool1.acquire()
conn2 = yield pool2.acquire()
conn3 = yield pool2.acquire()
conn4 = yield pool2.acquire()
pool1.pool.append(conn2)
pool1.pool.append(conn3)
pool1.pool.append(conn4)
yield pool1.release(conn1)
self.assertTrue(conn1.closed)
@gen_test
def test_release_closed(self):
pool = Pool("ws://localhost:8182/",
maxsize=2,
username="stephen",
password="password")
self.assertEqual(len(pool.pool), 0)
c1 = yield pool.acquire()
self.assertEqual(len(pool._acquired), 1)
c1.close()
yield pool.release(c1)
self.assertEqual(len(pool.pool), 0)
self.assertEqual(len(pool._acquired), 0)
@gen_test
def test_self_release(self):
pool = Pool("ws://localhost:8182/",
maxsize=2,
username="stephen",
password="password",
force_release=True)
self.assertEqual(len(pool.pool), 0)
c1 = yield pool.acquire()
self.assertEqual(len(pool._acquired), 1)
stream = c1.send("1 + 1")
resp = yield stream.read()
self.assertEqual(len(pool.pool), 1)
self.assertEqual(len(pool._acquired), 0)
@gen_test
def test_maxsize_release(self):
pool = Pool("ws://localhost:8182/",
maxsize=2,
username="stephen",
password="password")
c1 = yield pool.acquire()
c2 = yield pool.acquire()
c3 = pool.acquire()
self.assertIsInstance(c3, Future)
with self.assertRaises(tornado.gen.TimeoutError):
yield gen.with_timeout(timedelta(seconds=0.1), c3)
yield pool.release(c2)
c3 = yield c3
self.assertEqual(c2, c3)
c1.conn.close()
c2.conn.close()
c3.conn.close()
@gen_test
def test_close(self):
pool = Pool("ws://localhost:8182/",
maxsize=2,
username="stephen",
password="password")
c1 = yield pool.acquire()
c2 = yield pool.acquire()
yield pool.release(c2)
pool.close()
self.assertTrue(c2.conn.closed)
self.assertFalse(c1.conn.closed)
c1.close()
self.assertTrue(pool.closed)
@gen_test
def test_cancelled(self):
pool = Pool("ws://localhost:8182/",
maxsize=2,
username="stephen",
password="password")
c1 = yield pool.acquire()
c2 = yield pool.acquire()
c3 = pool.acquire()
pool.close()
# Tornado futures do not support cancellation!
# self.assertTrue(c3.cancelled())
c1.close()
c2.close()
@gen_test
def test_future_class(self):
pool = Pool(url="ws://localhost:8182/",
maxsize=2,
username="stephen",
password="password")
self.assertTrue(hasattr(pool, 'future_class'))
self.assertEqual(pool.future_class, Future)
# class TornadoCtxtMngrTest(AsyncTestCase):
#
# @gen_test
# def test_pool_manager(self):
# pool = Pool("ws://localhost:8182/",
# maxsize=2,
# username="stephen",
# password="password")
# with (yield pool) as conn:
# self.assertFalse(conn.closed)
# self.assertEqual(len(pool.pool), 1)
# self.assertEqual(len(pool._acquired), 0)
# pool.close()
#
# @gen_test
# def test_graph_manager(self):
# graph = GraphDatabase("ws://localhost:8182/",
# username="stephen",
# password="password")
# with (yield graph) as conn:
# self.assertFalse(conn.closed)
#
# @gen_test
# def test_pool_enter_runtime_error(self):
# pool = Pool("ws://localhost:8182/",
# maxsize=2,
# username="stephen",
# password="password")
# with self.assertRaises(RuntimeError):
# with pool as conn:
# self.assertFalse(conn.closed)
#
# @gen_test
# def test_conn_enter_runtime_error(self):
# graph = GraphDatabase("ws://localhost:8182/",
# username="stephen",
# password="password")
# with self.assertRaises(RuntimeError):
# with graph as conn:
# self.assertFalse(conn.closed)
class TornadoCallbackStyleTest(AsyncTestCase):
def setUp(self):
super(TornadoCallbackStyleTest, self).setUp()
self.pool = Pool("ws://localhost:8182/")
@gen_test
def test_data_flow(self):
def execute(script):
future = Future()
graph = GraphDatabase("ws://localhost:8182/",
username="stephen",
password="password")
future_conn = graph.connect()
def cb(f):
conn = f.result()
stream = conn.send(script)
future.set_result(stream)
future_conn.add_done_callback(cb)
return future
result = yield execute("1 + 1")
self.assertIsInstance(result, Stream)
resp = yield result.read()
self.assertEqual(resp.data[0], 2)
class TornadoSessionTest(AsyncTestCase):
def setUp(self):
super(TornadoSessionTest, self).setUp()
self.graph = GraphDatabase("ws://localhost:8182/",
username="stephen",
password="password")
@gen_test
def test_manual_session(self):
session = yield self.graph.connect(session=str(uuid.uuid4()))
stream = session.send("v = 1+1", processor="session")
resp = yield stream.read()
stream = session.send("v", processor="session")
resp2 = yield stream.read()
self.assertEqual(resp.data[0], resp2.data[0])
session.close()
@gen_test
def test_no_session(self):
session = yield self.graph.connect()
with self.assertRaises(RuntimeError):
stream = session.send("v = 1+1", processor="session")
@gen_test
def test_session_obj_session(self):
session = yield self.graph.session()
stream = session.send("v = 1+1")
resp = yield stream.read()
stream = session.send("v")
resp2 = yield stream.read()
self.assertEqual(resp.data[0], resp2.data[0])
class TornadoAPITests(AsyncTestCase):
@gen_test
def test_create_connection(self):
conn = yield create_connection(
"ws://localhost:8182/", password="password",
username="stephen")
self.assertIsNotNone(conn.conn.closed)
conn.close()
@gen_test
def test_submit(self):
stream = yield submit(
"ws://localhost:8182/", "x + x", bindings={"x": 1},
password="password", username="stephen")
while True:
msg = yield stream.read()
if msg is None:
break
self.assertEqual(msg.status_code, 200)
self.assertEqual(msg.data[0], 2)
@gen_test(timeout=1)
def test_script_exception(self):
with self.assertRaises(RuntimeError):
stream = yield submit("ws://localhost:8182/",
"throw new Exception('error')",
password="password", username="stephen")
yield stream.read()
# class TestDeserialization(AsyncTestCase):
#
# @gen_test
# def test_complex_map_bindings(self):
# stream = yield submit("ws://localhost:8182/",
# "x.b",
# bindings={"x":{'f': {'foo': 'bar'}, 'b': ['bar', None, 1.5, {'b': 1}]}},
# password="password", username="stephen")
# resp = yield stream.read()
# self.assertEqual(resp.data[0], 'bar')
# self.assertIsNone(resp.data[1])
# self.assertEqual(resp.data[3]['b'], 1)
if __name__ == "__main__":
unittest.main()
|
mit
|
idigbio-api-hackathon/idigbio-lifemapper
|
dataretrieval/apiquery.py
|
1
|
19565
|
"""
@summary: Module containing functions for API Queries
@status: beta
@license: gpl2
@copyright: Copyright (C) 2014, University of Kansas Center for Research
Lifemapper Project, lifemapper [at] ku [dot] edu,
Biodiversity Institute,
1345 Jayhawk Boulevard, Lawrence, Kansas, 66045, USA
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
"""
import json
import urllib, urllib2
from types import BooleanType, FloatType, IntType, ListType, TupleType, StringType
import xml.etree.ElementTree as ET
#import LmCommon.common.lmXml
#from LmCommon.common.lmconstants import URL_ESCAPES, \
from constants import URL_ESCAPES, \
BISON_OCCURRENCE_URL, BISON_MAX_COUNT, \
BISON_QFILTERS, BISON_FILTERS, \
BISON_TSN_FILTERS, BISON_OCC_FILTERS, \
BISON_TSN_KEY, BISON_HIERARCHY_KEY, \
BISON_NAME_KEY, BISON_KINGDOM_KEY, \
BISON_RESPONSE_FIELDS, \
BISON_COUNT_KEYS, BISON_RECORD_KEYS, \
BISON_TSN_LIST_KEYS, \
ITIS_TAXONOMY_HIERARCHY_URL, \
ITIS_TAXONOMY_KEY, ITIS_HIERARCHY_TAG, \
ITIS_RANK_TAG, ITIS_TAXON_TAG, \
ITIS_KINGDOM_KEY, ITIS_PHYLUM_DIVISION_KEY, \
ITIS_CLASS_KEY, ITIS_ORDER_KEY, \
ITIS_FAMILY_KEY, ITIS_GENUS_KEY, \
ITIS_SPECIES_KEY, ITIS_DATA_NAMESPACE, \
BINOMIAL_REGEX, \
GBIF_REST_URL, GBIF_SPECIES_SERVICE, \
GBIF_OCCURRENCE_SERVICE, GBIF_DATASET_SERVICE
# .............................................................................
class APIQuery(object):
"""
Class to query APIs and return results
"""
def __init__(self, baseurl,
qFilters={}, otherFilters={}, filterString=None,
headers={}):
"""
@summary Constructor for the APIQuery class
"""
self.headers = headers
# No added filters are on url (unless initialized with filters in url)
self.baseurl = baseurl
self._qFilters = qFilters
self._otherFilters = otherFilters
self.filterString = self._assembleFilterString(filterString=filterString)
self.output = None
# ...............................................
@classmethod
def initFromUrl(cls, url, headers={}):
base, filters = url.split('?')
qry = APIQuery(base, filterString=filters)
return qry
# .........................................
@property
def url(self):
# All filters added to url
return '%s?%s' % (self.baseurl, self.filterString)
# ...............................................
def addFilters(self, qFilters={}, otherFilters={}):
"""
@summary: Add new or replace existing filters. This does not remove
existing filters, unless existing keys are sent with new values.
"""
self.output = None
for k, v in qFilters.iteritems():
self._qFilters[k] = v
for k, v in otherFilters.iteritems():
self._otherFilters[k] = v
self.filterString = self._assembleFilterString()
# ...............................................
def clearAll(self, qFilters=True, otherFilters=True):
"""
@summary: Clear existing qFilters, otherFilters, and output
"""
self.output = None
if qFilters:
self._qFilters = {}
if otherFilters:
self._otherFilters = {}
self.filterString = self._assembleFilterString()
# ...............................................
def clearOtherFilters(self):
"""
@summary: Clear existing otherFilters and output
"""
self.clearAll(otherFilters=True, qFilters=False)
# ...............................................
def clearQFilters(self):
"""
@summary: Clear existing qFilters and output
"""
self.clearAll(otherFilters=False, qFilters=True)
# ...............................................
def _assembleFilterString(self, filterString=None):
if filterString is not None:
for replaceStr, withStr in URL_ESCAPES:
filterString = filterString.replace(replaceStr, withStr)
else:
allFilters = self._otherFilters.copy()
if self._qFilters:
qVal = self._assembleQVal(self._qFilters)
allFilters['q'] = qVal
filterString = self._assembleKeyValFilters(allFilters)
return filterString
# ...............................................
def _assembleKeyValFilters(self, ofDict):
for k, v in ofDict.iteritems():
if isinstance(v, BooleanType):
v = str(v).lower()
ofDict[k] = unicode(v).encode('utf-8')
filterString = urllib.urlencode(ofDict)
return filterString
# ...............................................
def _interpretQClause(self, key, val):
cls = None
if (isinstance(val, StringType) or
isinstance(val, IntType) or
isinstance(val, FloatType)):
cls = '%s:%s' % (key, str(val))
# Tuple for negated or range value
elif isinstance(val, TupleType):
# negated filter
if isinstance(val[0], BooleanType) and val[0] is False:
cls = 'NOT ' + key + ':' + str(val[1])
# range filter (better be numbers)
elif ((isinstance(val[0], IntType) or isinstance(val[0], FloatType))
and (isinstance(val[1], IntType) or isinstance(val[1], FloatType))):
cls = '%s:[%s TO %s]' % (key, str(val[0]), str(val[1]))
else:
print 'Unexpected value type %s' % str(val)
else:
print 'Unexpected value type %s' % str(val)
return cls
# ...............................................
def _assembleQItem(self, key, val):
itmClauses = []
# List for multiple values of same key
if isinstance(val, ListType):
for v in val:
itmClauses.append(self._interpretQClause(key, v))
else:
itmClauses.append(self._interpretQClause(key, val))
return itmClauses
# ...............................................
def _assembleQVal(self, qDict):
clauses = []
qval = ''
# interpret dictionary
for key, val in qDict.iteritems():
clauses.extend(self._assembleQItem(key, val))
# convert to string
firstClause = None
for cls in clauses:
if not firstClause and not cls.startswith('NOT'):
firstClause = cls
elif cls.startswith('NOT'):
qval = ' '.join((qval, cls))
else:
qval = ' AND '.join((qval, cls))
qval = firstClause + qval
return qval
# ...............................................
def query(self, outputType='json'):
# # Also Works
# response = urllib2.urlopen(fullUrl.decode('utf-8'))
data = None
if self._qFilters:
# All q and other filters are on url
req = urllib2.Request(self.url, None, self.headers)
else:
if self._otherFilters:
data = urllib.urlencode(self._otherFilters)
# Any filters are in data
req = urllib2.Request(self.baseurl, data, self.headers)
response = urllib2.urlopen(req)
output = response.read()
if outputType == 'json':
import json
try:
self.output = json.loads(output)
except Exception, e:
print str(e)
raise
elif outputType == 'xml':
try:
root = ET.fromstring(output)
self.output = root
except Exception, e:
print str(e)
raise
else:
print 'Unrecognized output type %s' % str(outputType)
self.output = None
# .............................................................................
class BisonAPI(APIQuery):
# .............................................................................
"""
Class to query BISON APIs and return results
"""
# ...............................................
def __init__(self, qFilters={}, otherFilters={}, filterString=None,
headers={'Content-Type': 'application/json'}):
"""
@summary: Constructor for BisonAPI class
"""
# Add Q filters for this instance
for key, val in BISON_QFILTERS.iteritems():
qFilters[key] = val
# Add other filters for this instance
for key, val in BISON_FILTERS.iteritems():
otherFilters[key] = val
APIQuery.__init__(self, BISON_OCCURRENCE_URL, qFilters=qFilters,
otherFilters=otherFilters, filterString=filterString,
headers=headers)
# ...............................................
@classmethod
def initFromUrl(cls, url, headers={'Content-Type': 'application/json'}):
base, filters = url.split('?')
if base == BISON_OCCURRENCE_URL:
qry = BisonAPI(filterString=filters)
else:
raise Exception('Bison occurrence API must start with %s'
% BISON_OCCURRENCE_URL)
return qry
# ...............................................
def _burrow(self, keylst):
dict = self.output
for key in keylst:
dict = dict[key]
return dict
# ...............................................
def getBinomialTSNs(self):
"""
@summary: Returns a list of dictionaries where each dictionary is an
occurrence record
"""
if self.output is None:
self.query()
dataCount = self._burrow(BISON_COUNT_KEYS)
dataList = self._burrow(BISON_TSN_LIST_KEYS)
print 'Reported count = %d, actual count = %d' % (dataCount, len(dataList))
return dataList
# ...............................................
@staticmethod
def getItisTSNValues(itisTSN):
"""
@summary: Return ITISScientificName, kingdom, and TSN hierarchy from one
occurrence record ending in this TSN (species rank)
"""
itisname = king = tsnHier = None
try:
occAPI = BisonAPI(qFilters={BISON_HIERARCHY_KEY: '*-%d-' % itisTSN},
otherFilters={'rows': 1})
tsnHier = occAPI.getFirstValueFor(BISON_HIERARCHY_KEY)
itisname = occAPI.getFirstValueFor(BISON_NAME_KEY)
king = occAPI.getFirstValueFor(BISON_KINGDOM_KEY)
except Exception, e:
print str(e)
raise
return (itisname, king, tsnHier)
# ...............................................
def getTSNOccurrences(self):
"""
@summary: Returns a list of dictionaries. Each dictionary is an occurrence record
"""
if self.output is None:
self.query()
dataCount = self._burrow(BISON_COUNT_KEYS)
dataList = self._burrow(BISON_RECORD_KEYS)
return dataList
# ...............................................
def query(self):
"""
@summary: Queries the API and sets 'output' attribute to a JSON object
"""
APIQuery.query(self, outputType='json')
# ...............................................
def getFirstValueFor(self, fieldname):
"""
@summary: Returns value for given fieldname in the first data record
containing a value
"""
val = None
records = self.getTSNOccurrences()
for rec in records:
try:
val = records[0][fieldname]
break
except:
print('Missing %s for %s' % (fieldname, self.url))
return val
# .............................................................................
class ItisAPI(APIQuery):
# .............................................................................
"""
Class to query BISON APIs and return results
"""
# ...............................................
def __init__(self, otherFilters={}):
"""
@summary: Constructor for ItisAPI class
"""
APIQuery.__init__(self, ITIS_TAXONOMY_HIERARCHY_URL,
otherFilters=otherFilters)
# ...............................................
def _findTaxonByRank(self, root, rankKey):
for tax in root.iter('{%s}%s' % (ITIS_DATA_NAMESPACE, ITIS_HIERARCHY_TAG)):
rank = tax.find('{%s}%s' % (ITIS_DATA_NAMESPACE, ITIS_RANK_TAG)).text
if rank == rankKey:
name = elt.find('{%s}%s' % (ITIS_DATA_NAMESPACE, ITIS_TAXON_TAG)).text
tsn = elt.find('{%s}%s' % (ITIS_DATA_NAMESPACE, ITIS_TAXONOMY_KEY)).text
return (tsn, name)
# ...............................................
def _getRankFromPath(self, taxPath, rankKey):
for rank, tsn, name in taxPath:
if rank == rankKey:
return int(tsn), name
return None, None
# ...............................................
def _returnHierarchy(self):
"""
@note: for
"""
taxPath = []
for tax in self.output.iter('{%s}%s' % (ITIS_DATA_NAMESPACE, ITIS_HIERARCHY_TAG)):
rank = tax.find('{%s}%s' % (ITIS_DATA_NAMESPACE, ITIS_RANK_TAG)).text
name = tax.find('{%s}%s' % (ITIS_DATA_NAMESPACE, ITIS_TAXON_TAG)).text
tsn = tax.find('{%s}%s' % (ITIS_DATA_NAMESPACE, ITIS_TAXONOMY_KEY)).text
taxPath.append((rank, tsn, name))
return taxPath
# ...............................................
def getTaxonTSNHierarchy(self):
if self.output is None:
APIQuery.query(self, outputType='xml')
taxPath = self._returnHierarchy()
hierarchy = {}
for rank in (ITIS_KINGDOM_KEY, ITIS_PHYLUM_DIVISION_KEY, ITIS_CLASS_KEY,
ITIS_ORDER_KEY, ITIS_FAMILY_KEY, ITIS_GENUS_KEY,
ITIS_SPECIES_KEY):
hierarchy[rank] = self._getRankFromPath(taxPath, rank)
return hierarchy
# ...............................................
def query(self):
"""
@summary: Queries the API and sets 'output' attribute to a ElementTree object
"""
APIQuery.query(self, outputType='xml')
# .............................................................................
class GbifAPI(APIQuery):
# .............................................................................
"""
Class to query GBIF APIs and return results
"""
# ...............................................
def __init__(self, service=GBIF_SPECIES_SERVICE, key=None, otherFilters={}):
"""
@summary: Constructor for GbifAPI class
"""
url = '/'.join((GBIF_REST_URL, service))
if key is not None:
url = '/'.join((url, str(key)))
APIQuery.__init__(self, url)
else:
APIQuery.__init__(self, url, otherFilters=otherFilters)
# ...............................................
@staticmethod
def _getOutputVal(outDict, name):
try:
val = outDict[name]
except:
return None
return val
# ...............................................
@staticmethod
def getTaxonomy(taxonkey):
"""
@summary: Return ITISScientificName, kingdom, and TSN hierarchy from one
occurrence record ending in this TSN (species rank)
"""
taxAPI = GbifAPI(service=GBIF_SPECIES_SERVICE, key=taxonkey)
try:
taxAPI.query()
kingdomStr = taxAPI._getOutputVal(taxAPI.output, 'kingdom')
phylumStr = taxAPI._getOutputVal(taxAPI.output, 'phylum')
# Missing class string in GBIF output
# classStr = taxAPI._getOutputVal(taxAPI.output, '')
classStr = None
orderStr = taxAPI._getOutputVal(taxAPI.output, 'order')
familyStr = taxAPI._getOutputVal(taxAPI.output, 'family')
genusStr = taxAPI._getOutputVal(taxAPI.output, 'genus')
speciesStr = taxAPI._getOutputVal(taxAPI.output, 'species')
rankStr = taxAPI._getOutputVal(taxAPI.output, 'rank')
genuskey = taxAPI._getOutputVal(taxAPI.output, 'genusKey')
specieskey = taxAPI._getOutputVal(taxAPI.output, 'speciesKey')
acceptedkey = taxAPI._getOutputVal(taxAPI.output, 'acceptedKey')
if rankStr == 'SPECIES':
fullSpeciesStr = taxAPI._getOutputVal(taxAPI.output, 'accepted')
if fullSpeciesStr is not None:
speciesStr = fullSpeciesStr
except Exception, e:
print str(e)
raise
return (kingdomStr, phylumStr, classStr, orderStr, familyStr, genusStr,
speciesStr, genuskey, specieskey)
# ...............................................
def query(self):
"""
@summary: Queries the API and sets 'output' attribute to a ElementTree object
"""
APIQuery.query(self, outputType='json')
# .............................................................................
# .............................................................................
if __name__ == '__main__':
# tsnQuery = BisonAPI(qFilters={BISON_NAME_KEY: BINOMIAL_REGEX},
# otherFilters=BISON_TSN_FILTERS)
# tsnList = tsnQuery.getBinomialTSNs()
# print len(tsnList)
tsnList = [[u'100637', 31], [u'100667', 45], [u'100674', 24]]
response = {u'facet_counts':
{u'facet_ranges': {},
u'facet_fields': {u'TSNs': tsnList}
}
}
loopCount = 0
occAPI = None
taxAPI = None
for tsnPair in tsnList:
tsn = int(tsnPair[0])
count = int(tsnPair[1])
# taxAPI = ItisAPI(otherFilters={ITIS_TAXONOMY_KEY: tsn})
# taxPath = taxQuery.getTSNHierarchy()
# for tax in taxPath:
# print str(tax)
newQ = {BISON_HIERARCHY_KEY: '*-%d-*' % tsn}
occAPI = BisonAPI(qFilters=newQ, otherFilters=BISON_OCC_FILTERS)
print occAPI.url
occList = occAPI.getTSNOccurrences()
print 'Received %d occurrences for TSN %d' % (len(occList), tsn)
tsnAPI = BisonAPI(qFilters={BISON_HIERARCHY_KEY: '*-%d-' % tsn},
otherFilters={'rows': 1})
hier = tsnAPI.getFirstValueFor(BISON_HIERARCHY_KEY)
name = tsnAPI.getFirstValueFor(BISON_NAME_KEY)
print name, hier
GbifAPI.getTaxonomy(1000225)
# Only loop twice for debugging
loopCount += 1
if loopCount > 2:
break
|
gpl-3.0
|
doug-fish/horizon
|
openstack_dashboard/management/commands/migrate_settings.py
|
60
|
8249
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import difflib
import imp
import optparse
import os
import shlex
import subprocess
import sys
import time
import warnings
from django.core.management.templates import BaseCommand # noqa
# Suppress DeprecationWarnings which clutter the output to the point of
# rendering it unreadable.
warnings.simplefilter('ignore')
def get_module_path(module_name):
"""Gets the module path without importing anything.
Avoids conflicts with package dependencies.
(taken from http://github.com/sitkatech/pypatch)
"""
path = sys.path
for name in module_name.split('.'):
file_pointer, path, desc = imp.find_module(name, path)
path = [path, ]
if file_pointer is not None:
file_pointer.close()
return path[0]
class DirContext(object):
"""Change directory in a context manager.
This allows changing directory and to always fall back to the previous
directory whatever happens during execution.
Usage::
with DirContext('/home/foo') as dircontext:
# Some code happening in '/home/foo'
# We are back to the previous directory.
"""
def __init__(self, dirname):
self.prevdir = os.path.abspath(os.curdir)
os.chdir(dirname)
self.curdir = os.path.abspath(os.curdir)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
os.chdir(self.prevdir)
def __str__(self):
return self.curdir
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
optparse.make_option(
'--gendiff',
action='store_true',
dest='gendiff',
default=False,
help=('Generate a diff file between local_settings.py and '
'local_settings.py.example'),
),
optparse.make_option(
'-f', '--force',
action='store_true',
dest='force',
default=False,
help=('Force destination rewriting without warning if the '
'destination file already exists.'),
),
)
help = ("Creates a local_settings.py file from the "
"local_settings.py.example template.")
time_fmt = '%Y-%m-%d %H:%M:%S %Z'
file_time_fmt = '%Y%m%d%H%M%S%Z'
local_settings_example = 'local_settings.py.example'
local_settings_file = 'local_settings.py'
local_settings_diff = 'local_settings.diff'
local_settings_reject_pattern = 'local_settings.py_%s.rej'
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
settings_file = os.path.abspath(
get_module_path(os.environ['DJANGO_SETTINGS_MODULE'])
)
self.local_settings_dir = os.path.abspath(
os.path.join(
os.path.realpath(os.path.dirname(settings_file)),
'local'
)
)
def gendiff(self, force=False):
"""Generate a diff between self.local_settings and the example file.
"""
with DirContext(self.local_settings_dir) as dircontext:
if not os.path.exists(self.local_settings_diff) or force:
with open(self.local_settings_example, 'r') as fp:
example_lines = fp.readlines()
with open(self.local_settings_file, 'r') as fp:
local_settings_lines = fp.readlines()
local_settings_example_mtime = time.strftime(
self.time_fmt,
time.localtime(
os.stat(self.local_settings_example).st_mtime)
)
local_settings_mtime = time.strftime(
self.time_fmt,
time.localtime(os.stat(self.local_settings_file).st_mtime)
)
print('generating "%s"...' % os.path.join(
dircontext.curdir,
self.local_settings_diff)
)
with open(self.local_settings_diff, 'w') as fp:
for line in difflib.unified_diff(
example_lines, local_settings_lines,
fromfile=self.local_settings_example,
tofile=self.local_settings_file,
fromfiledate=local_settings_example_mtime,
tofiledate=local_settings_mtime
):
fp.write(line)
print('\tDONE.')
sys.exit(0)
else:
sys.exit(
'"%s" already exists.' %
os.path.join(dircontext.curdir,
self.local_settings_diff)
)
def patch(self, force=False):
"""Patch local_settings.py.example with local_settings.diff.
The patch application generates the local_settings.py file (the
local_settings.py.example remains unchanged).
http://github.com/sitkatech/pypatch fails if the
local_settings.py.example file is not 100% identical to the one used to
generate the first diff so we use the patch command instead.
"""
with DirContext(self.local_settings_dir) as dircontext:
if os.path.exists(self.local_settings_diff):
if not os.path.exists(self.local_settings_file) or force:
local_settings_reject = \
self.local_settings_reject_pattern % (
time.strftime(self.file_time_fmt, time.localtime())
)
patch_cmd = shlex.split(
'patch %s %s -o %s -r %s' % (
self.local_settings_example,
self.local_settings_diff,
self.local_settings_file,
local_settings_reject
)
)
try:
subprocess.check_call(patch_cmd)
except subprocess.CalledProcessError:
if os.path.exists(local_settings_reject):
sys.exit(
'Some conflict(s) occurred. Please check "%s" '
'to find unapplied parts of the diff.\n'
'Once conflicts are solved, it is safer to '
'regenerate a newer diff with the "--gendiff" '
'option.' %
os.path.join(
dircontext.curdir,
local_settings_reject)
)
else:
sys.exit('An unhandled error occurred.')
print('Generation of "%s" successful.' % os.path.join(
dircontext.curdir,
self.local_settings_file)
)
sys.exit(0)
else:
sys.exit(
'"%s" already exists.' %
os.path.join(dircontext.curdir,
self.local_settings_file)
)
else:
sys.exit('No diff file found, please generate one with the '
'"--gendiff" option.')
def handle(self, *args, **options):
force = options.get('force')
if options.get('gendiff'):
self.gendiff(force)
else:
self.patch(force)
|
apache-2.0
|
richpolis/siveinpy
|
env/lib/python2.7/site-packages/django/contrib/auth/tests/test_basic.py
|
105
|
9914
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import locale
from django.contrib.auth import get_user_model
from django.contrib.auth.management.commands import createsuperuser
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.tests.custom_user import CustomUser
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django.dispatch import receiver
from django.test import TestCase
from django.test.signals import setting_changed
from django.test.utils import override_settings
from django.utils import translation
from django.utils.encoding import force_str
from django.utils.six import binary_type, PY2, StringIO
@receiver(setting_changed)
def user_model_swapped(**kwargs):
if kwargs['setting'] == 'AUTH_USER_MODEL':
from django.db.models.manager import ensure_default_manager
from django.contrib.auth.models import User
# Reset User manager
setattr(User, 'objects', User._default_manager)
ensure_default_manager(User)
def mock_inputs(inputs):
"""
Decorator to temporarily replace input/getpass to allow interactive
createsuperuser.
"""
def inner(test_func):
def wrapped(*args):
class mock_getpass:
@staticmethod
def getpass(prompt=b'Password: ', stream=None):
if PY2:
# getpass on Windows only supports prompt as bytestring (#19807)
assert isinstance(prompt, binary_type)
return inputs['password']
def mock_input(prompt):
# prompt should be encoded in Python 2. This line will raise an
# Exception if prompt contains unencoded non-ascii on Python 2.
prompt = str(prompt)
assert str('__proxy__') not in prompt
response = ''
for key, val in inputs.items():
if force_str(key) in prompt.lower():
response = val
break
return response
old_getpass = createsuperuser.getpass
old_input = createsuperuser.input
createsuperuser.getpass = mock_getpass
createsuperuser.input = mock_input
try:
test_func(*args)
finally:
createsuperuser.getpass = old_getpass
createsuperuser.input = old_input
return wrapped
return inner
@skipIfCustomUser
class BasicTestCase(TestCase):
def test_user(self):
"Check that users can be created and can set their password"
u = User.objects.create_user('testuser', '[email protected]', 'testpw')
self.assertTrue(u.has_usable_password())
self.assertFalse(u.check_password('bad'))
self.assertTrue(u.check_password('testpw'))
# Check we can manually set an unusable password
u.set_unusable_password()
u.save()
self.assertFalse(u.check_password('testpw'))
self.assertFalse(u.has_usable_password())
u.set_password('testpw')
self.assertTrue(u.check_password('testpw'))
u.set_password(None)
self.assertFalse(u.has_usable_password())
# Check authentication/permissions
self.assertTrue(u.is_authenticated())
self.assertFalse(u.is_staff)
self.assertTrue(u.is_active)
self.assertFalse(u.is_superuser)
# Check API-based user creation with no password
u2 = User.objects.create_user('testuser2', '[email protected]')
self.assertFalse(u2.has_usable_password())
def test_user_no_email(self):
"Check that users can be created without an email"
u = User.objects.create_user('testuser1')
self.assertEqual(u.email, '')
u2 = User.objects.create_user('testuser2', email='')
self.assertEqual(u2.email, '')
u3 = User.objects.create_user('testuser3', email=None)
self.assertEqual(u3.email, '')
def test_anonymous_user(self):
"Check the properties of the anonymous user"
a = AnonymousUser()
self.assertEqual(a.pk, None)
self.assertFalse(a.is_authenticated())
self.assertFalse(a.is_staff)
self.assertFalse(a.is_active)
self.assertFalse(a.is_superuser)
self.assertEqual(a.groups.all().count(), 0)
self.assertEqual(a.user_permissions.all().count(), 0)
def test_superuser(self):
"Check the creation and properties of a superuser"
super = User.objects.create_superuser('super', '[email protected]', 'super')
self.assertTrue(super.is_superuser)
self.assertTrue(super.is_active)
self.assertTrue(super.is_staff)
def test_createsuperuser_management_command(self):
"Check the operation of the createsuperuser management command"
# We can use the management command to create a superuser
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="joe",
email="[email protected]",
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = User.objects.get(username="joe")
self.assertEqual(u.email, '[email protected]')
# created password should be unusable
self.assertFalse(u.has_usable_password())
# We can supress output on the management command
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="joe2",
email="[email protected]",
verbosity=0,
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, '')
u = User.objects.get(username="joe2")
self.assertEqual(u.email, '[email protected]')
self.assertFalse(u.has_usable_password())
call_command("createsuperuser",
interactive=False,
username="[email protected]",
email="[email protected]",
verbosity=0
)
u = User.objects.get(username="[email protected]")
self.assertEqual(u.email, '[email protected]')
self.assertFalse(u.has_usable_password())
@mock_inputs({'password': "nopasswd"})
def test_createsuperuser_nolocale(self):
"""
Check that createsuperuser does not break when no locale is set. See
ticket #16017.
"""
old_getdefaultlocale = locale.getdefaultlocale
try:
# Temporarily remove locale information
locale.getdefaultlocale = lambda: (None, None)
# Call the command in this new environment
call_command("createsuperuser",
interactive=True,
username="[email protected]",
email="[email protected]",
verbosity=0
)
except TypeError:
self.fail("createsuperuser fails if the OS provides no information about the current locale")
finally:
# Re-apply locale information
locale.getdefaultlocale = old_getdefaultlocale
# If we were successful, a user should have been created
u = User.objects.get(username="[email protected]")
self.assertEqual(u.email, '[email protected]')
@mock_inputs({
'password': "nopasswd",
'uživatel': 'foo', # username (cz)
'email': '[email protected]'})
def test_createsuperuser_non_ascii_verbose_name(self):
# Aliased so the string doesn't get extracted
from django.utils.translation import ugettext_lazy as ulazy
username_field = User._meta.get_field('username')
old_verbose_name = username_field.verbose_name
username_field.verbose_name = ulazy('uživatel')
new_io = StringIO()
try:
call_command("createsuperuser",
interactive=True,
stdout=new_io
)
finally:
username_field.verbose_name = old_verbose_name
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
def test_get_user_model(self):
"The current user model can be retrieved"
self.assertEqual(get_user_model(), User)
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user(self):
"The current user model can be swapped out for another"
self.assertEqual(get_user_model(), CustomUser)
with self.assertRaises(AttributeError):
User.objects.all()
@override_settings(AUTH_USER_MODEL='badsetting')
def test_swappable_user_bad_setting(self):
"The alternate user setting must point to something in the format app.model"
with self.assertRaises(ImproperlyConfigured):
get_user_model()
@override_settings(AUTH_USER_MODEL='thismodel.doesntexist')
def test_swappable_user_nonexistent_model(self):
"The current user model must point to an installed model"
with self.assertRaises(ImproperlyConfigured):
get_user_model()
@skipIfCustomUser
def test_user_verbose_names_translatable(self):
"Default User model verbose names are translatable (#19945)"
with translation.override('en'):
self.assertEqual(User._meta.verbose_name, 'user')
self.assertEqual(User._meta.verbose_name_plural, 'users')
with translation.override('es'):
self.assertEqual(User._meta.verbose_name, 'usuario')
self.assertEqual(User._meta.verbose_name_plural, 'usuarios')
|
mit
|
sorenh/cc
|
vendor/Twisted-10.0.0/twisted/web/vhost.py
|
53
|
4382
|
# -*- test-case-name: twisted.web.
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
I am a virtual hosts implementation.
"""
# Twisted Imports
from twisted.python import roots
from twisted.web import resource
class VirtualHostCollection(roots.Homogenous):
"""Wrapper for virtual hosts collection.
This exists for configuration purposes.
"""
entityType = resource.Resource
def __init__(self, nvh):
self.nvh = nvh
def listStaticEntities(self):
return self.nvh.hosts.items()
def getStaticEntity(self, name):
return self.nvh.hosts.get(self)
def reallyPutEntity(self, name, entity):
self.nvh.addHost(name, entity)
def delEntity(self, name):
self.nvh.removeHost(name)
class NameVirtualHost(resource.Resource):
"""I am a resource which represents named virtual hosts.
"""
default = None
def __init__(self):
"""Initialize.
"""
resource.Resource.__init__(self)
self.hosts = {}
def listStaticEntities(self):
return resource.Resource.listStaticEntities(self) + [("Virtual Hosts", VirtualHostCollection(self))]
def getStaticEntity(self, name):
if name == "Virtual Hosts":
return VirtualHostCollection(self)
else:
return resource.Resource.getStaticEntity(self, name)
def addHost(self, name, resrc):
"""Add a host to this virtual host.
This will take a host named `name', and map it to a resource
`resrc'. For example, a setup for our virtual hosts would be::
nvh.addHost('divunal.com', divunalDirectory)
nvh.addHost('www.divunal.com', divunalDirectory)
nvh.addHost('twistedmatrix.com', twistedMatrixDirectory)
nvh.addHost('www.twistedmatrix.com', twistedMatrixDirectory)
"""
self.hosts[name] = resrc
def removeHost(self, name):
"""Remove a host."""
del self.hosts[name]
def _getResourceForRequest(self, request):
"""(Internal) Get the appropriate resource for the given host.
"""
hostHeader = request.getHeader('host')
if hostHeader == None:
return self.default or resource.NoResource()
else:
host = hostHeader.lower().split(':', 1)[0]
return (self.hosts.get(host, self.default)
or resource.NoResource("host %s not in vhost map" % repr(host)))
def render(self, request):
"""Implementation of resource.Resource's render method.
"""
resrc = self._getResourceForRequest(request)
return resrc.render(request)
def getChild(self, path, request):
"""Implementation of resource.Resource's getChild method.
"""
resrc = self._getResourceForRequest(request)
if resrc.isLeaf:
request.postpath.insert(0,request.prepath.pop(-1))
return resrc
else:
return resrc.getChildWithDefault(path, request)
class _HostResource(resource.Resource):
def getChild(self, path, request):
if ':' in path:
host, port = path.split(':', 1)
port = int(port)
else:
host, port = path, 80
request.setHost(host, port)
prefixLen = 3+request.isSecure()+4+len(path)+len(request.prepath[-3])
request.path = '/'+'/'.join(request.postpath)
request.uri = request.uri[prefixLen:]
del request.prepath[:3]
return request.site.getResourceFor(request)
class VHostMonsterResource(resource.Resource):
"""
Use this to be able to record the hostname and method (http vs. https)
in the URL without disturbing your web site. If you put this resource
in a URL http://foo.com/bar then requests to
http://foo.com/bar/http/baz.com/something will be equivalent to
http://foo.com/something, except that the hostname the request will
appear to be accessing will be "baz.com". So if "baz.com" is redirecting
all requests for to foo.com, while foo.com is inaccessible from the outside,
then redirect and url generation will work correctly
"""
def getChild(self, path, request):
if path == 'http':
request.isSecure = lambda: 0
elif path == 'https':
request.isSecure = lambda: 1
return _HostResource()
|
apache-2.0
|
40223144/w16b_test
|
static/Brython3.1.1-20150328-091302/Lib/long_int1/__init__.py
|
503
|
3858
|
from browser import html, document, window
import javascript
#memorize/cache?
def _get_value(other):
if isinstance(other, LongInt):
return other.value
return other
class BigInt:
def __init__(self):
pass
def __abs__(self):
return LongInt(self.value.abs())
def __add__(self, other):
return LongInt(self.value.plus(_get_value(other)))
def __and__(self, other):
pass
def __divmod__(self, other):
_value=_get_value(other)
return LongInt(self.value.div(_value)), LongInt(self.value.mod(_value))
def __div__(self, other):
return LongInt(self.value.div(_get_value(other)))
def __eq__(self, other):
return bool(self.value.eq(_get_value(other)))
def __floordiv__(self, other):
return LongInt(self.value.div(_get_value(other)).floor())
def __ge__(self, other):
return bool(self.value.gte(_get_value(other)))
def __gt__(self, other):
return bool(self.value.gt(_get_value(other)))
def __index__(self):
if self.value.isInt():
return int(self.value.toNumber())
raise TypeError("This is not an integer")
def __le__(self, other):
return bool(self.value.lte(_get_value(other)))
def __lt__(self, other):
return bool(self.value.lt(_get_value(other)))
def __lshift__(self, shift):
if isinstance(shift, int):
_v=LongInt(2)**shift
return LongInt(self.value.times(_v.value))
def __mod__(self, other):
return LongInt(self.value.mod(_get_value(other)))
def __mul__(self, other):
return LongInt(self.value.times(_get_value(other)))
def __neg__(self, other):
return LongInt(self.value.neg(_get_value(other)))
def __or__(self, other):
pass
def __pow__(self, other):
return LongInt(self.value.pow(_get_value(other)))
def __rshift__(self, other):
pass
def __sub__(self, other):
return LongInt(self.value.minus(_get_value(other)))
def __repr__(self):
return "%s(%s)" % (self.__name__, self.value.toString(10))
def __str__(self):
return "%s(%s)" % (self.__name__, self.value.toString(10))
def __xor__(self, other):
pass
_precision=20
def get_precision(value):
if isinstance(value, LongInt):
return len(str(value.value.toString(10)))
return len(str(value))
class DecimalJS(BigInt):
def __init__(self, value=0, base=10):
global _precision
_prec=get_precision(value)
if _prec > _precision:
_precision=_prec
window.eval('Decimal.precision=%s' % _precision)
self.value=javascript.JSConstructor(window.Decimal)(value, base)
class BigNumberJS(BigInt):
def __init__(self, value=0, base=10):
self.value=javascript.JSConstructor(window.BigNumber)(value, base)
class BigJS(BigInt):
def __init__(self, value=0, base=10):
self.value=javascript.JSConstructor(window.Big)(value, base)
def __floordiv__(self, other):
_v=LongInt(self.value.div(_get_value(other)))
if _v >= 0:
return LongInt(_v.value.round(0, 0)) #round down
return LongInt(_v.value.round(0, 3)) #round up
def __pow__(self, other):
if isinstance(other, LongInt):
_value=int(other.value.toString(10))
elif isinstance(other, str):
_value=int(other)
return LongInt(self.value.pow(_value))
#_path = __file__[:__file__.rfind('/')]+'/'
_path = __BRYTHON__.brython_path + 'Lib/long_int1/'
#to use decimal.js library uncomment these 2 lines
#javascript.load(_path+'decimal.min.js', ['Decimal'])
#LongInt=DecimalJS
#to use bignumber.js library uncomment these 2 lines
javascript.load(_path+'bignumber.min.js', ['BigNumber'])
LongInt=BigNumberJS
#big.js does not have a "base" so only base 10 stuff works.
#to use big.js library uncomment these 2 lines
#javascript.load(_path+'big.min.js', ['Big'])
#LongInt=BigJS
|
agpl-3.0
|
seppi91/CouchPotatoServer
|
couchpotato/core/plugins/manage.py
|
17
|
12274
|
import os
import time
import traceback
from couchpotato import get_db
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent, fireEventAsync
from couchpotato.core.helpers.encoding import sp
from couchpotato.core.helpers.variable import splitString, getTitle, tryInt, getIdentifier, getFreeSpace
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
log = CPLog(__name__)
autoload = 'Manage'
class Manage(Plugin):
in_progress = False
def __init__(self):
fireEvent('scheduler.interval', identifier = 'manage.update_library', handle = self.updateLibrary, hours = 2)
addEvent('manage.update', self.updateLibrary)
addEvent('manage.diskspace', self.getDiskSpace)
# Add files after renaming
def after_rename(message = None, group = None):
if not group: group = {}
return self.scanFilesToLibrary(folder = group['destination_dir'], files = group['renamed_files'], release_download = group['release_download'])
addEvent('renamer.after', after_rename, priority = 110)
addApiView('manage.update', self.updateLibraryView, docs = {
'desc': 'Update the library by scanning for new movies',
'params': {
'full': {'desc': 'Do a full update or just recently changed/added movies.'},
}
})
addApiView('manage.progress', self.getProgress, docs = {
'desc': 'Get the progress of current manage update',
'return': {'type': 'object', 'example': """{
'progress': False || object, total & to_go,
}"""},
})
if not Env.get('dev') and self.conf('startup_scan'):
addEvent('app.load', self.updateLibraryQuick)
addEvent('app.load', self.setCrons)
# Enable / disable interval
addEvent('setting.save.manage.library_refresh_interval.after', self.setCrons)
def setCrons(self):
fireEvent('schedule.remove', 'manage.update_library')
refresh = tryInt(self.conf('library_refresh_interval'))
if refresh > 0:
fireEvent('schedule.interval', 'manage.update_library', self.updateLibrary, hours = refresh, single = True)
return True
def getProgress(self, **kwargs):
return {
'progress': self.in_progress
}
def updateLibraryView(self, full = 1, **kwargs):
fireEventAsync('manage.update', full = True if full == '1' else False)
return {
'progress': self.in_progress,
'success': True
}
def updateLibraryQuick(self):
return self.updateLibrary(full = False)
def updateLibrary(self, full = True):
last_update_key = 'manage.last_update%s' % ('_full' if full else '')
last_update = float(Env.prop(last_update_key, default = 0))
if self.in_progress:
log.info('Already updating library: %s', self.in_progress)
return
elif self.isDisabled() or (last_update > time.time() - 20):
return
self.in_progress = {}
fireEvent('notify.frontend', type = 'manage.updating', data = True)
try:
directories = self.directories()
directories.sort()
added_identifiers = []
# Add some progress
for directory in directories:
self.in_progress[os.path.normpath(directory)] = {
'started': False,
'eta': -1,
'total': None,
'to_go': None,
}
for directory in directories:
folder = os.path.normpath(directory)
self.in_progress[os.path.normpath(directory)]['started'] = tryInt(time.time())
if not os.path.isdir(folder):
if len(directory) > 0:
log.error('Directory doesn\'t exist: %s', folder)
continue
log.info('Updating manage library: %s', folder)
fireEvent('notify.frontend', type = 'manage.update', data = True, message = 'Scanning for movies in "%s"' % folder)
onFound = self.createAddToLibrary(folder, added_identifiers)
fireEvent('scanner.scan', folder = folder, simple = True, newer_than = last_update if not full else 0, check_file_date = False, on_found = onFound, single = True)
# Break if CP wants to shut down
if self.shuttingDown():
break
# If cleanup option is enabled, remove offline files from database
if self.conf('cleanup') and full and not self.shuttingDown():
# Get movies with done status
total_movies, done_movies = fireEvent('media.list', types = 'movie', status = 'done', release_status = 'done', status_or = True, single = True)
deleted_releases = []
for done_movie in done_movies:
if getIdentifier(done_movie) not in added_identifiers:
fireEvent('media.delete', media_id = done_movie['_id'], delete_from = 'all')
else:
releases = done_movie.get('releases', [])
for release in releases:
if release.get('files'):
brk = False
for file_type in release.get('files', {}):
for release_file in release['files'][file_type]:
# Remove release not available anymore
if not os.path.isfile(sp(release_file)):
fireEvent('release.clean', release['_id'])
brk = True
break
if brk:
break
# Check if there are duplicate releases (different quality) use the last one, delete the rest
if len(releases) > 1:
used_files = {}
for release in releases:
for file_type in release.get('files', {}):
for release_file in release['files'][file_type]:
already_used = used_files.get(release_file)
if already_used:
release_id = release['_id'] if already_used.get('last_edit', 0) > release.get('last_edit', 0) else already_used['_id']
if release_id not in deleted_releases:
fireEvent('release.delete', release_id, single = True)
deleted_releases.append(release_id)
break
else:
used_files[release_file] = release
del used_files
# Break if CP wants to shut down
if self.shuttingDown():
break
if not self.shuttingDown():
db = get_db()
db.reindex()
Env.prop(last_update_key, time.time())
except:
log.error('Failed updating library: %s', (traceback.format_exc()))
while self.in_progress and len(self.in_progress) > 0 and not self.shuttingDown():
delete_me = {}
# noinspection PyTypeChecker
for folder in self.in_progress:
if self.in_progress[folder]['to_go'] <= 0:
delete_me[folder] = True
for delete in delete_me:
del self.in_progress[delete]
time.sleep(1)
fireEvent('notify.frontend', type = 'manage.updating', data = False)
self.in_progress = False
# noinspection PyDefaultArgument
def createAddToLibrary(self, folder, added_identifiers = []):
def addToLibrary(group, total_found, to_go):
if self.in_progress[folder]['total'] is None:
self.in_progress[folder].update({
'total': total_found,
'to_go': total_found,
})
self.updateProgress(folder, to_go)
if group['media'] and group['identifier']:
added_identifiers.append(group['identifier'])
# Add it to release and update the info
fireEvent('release.add', group = group, update_info = False)
fireEvent('movie.update', identifier = group['identifier'], on_complete = self.createAfterUpdate(folder, group['identifier']))
return addToLibrary
def createAfterUpdate(self, folder, identifier):
# Notify frontend
def afterUpdate():
if not self.in_progress or self.shuttingDown():
return
total = self.in_progress[folder]['total']
movie_dict = fireEvent('media.get', identifier, single = True)
if movie_dict:
fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = None if total > 5 else 'Added "%s" to manage.' % getTitle(movie_dict))
return afterUpdate
def updateProgress(self, folder, to_go):
pr = self.in_progress[folder]
if to_go < pr['to_go']:
pr['to_go'] = to_go
avg = (time.time() - pr['started']) / (pr['total'] - pr['to_go'])
pr['eta'] = tryInt(avg * pr['to_go'])
def directories(self):
try:
return self.conf('library', default = [])
except:
pass
return []
def scanFilesToLibrary(self, folder = None, files = None, release_download = None):
folder = os.path.normpath(folder)
groups = fireEvent('scanner.scan', folder = folder, files = files, single = True)
if groups:
for group in groups.values():
if group.get('media'):
if release_download and release_download.get('release_id'):
fireEvent('release.add', group = group, update_id = release_download.get('release_id'))
else:
fireEvent('release.add', group = group)
def getDiskSpace(self):
return getFreeSpace(self.directories())
config = [{
'name': 'manage',
'groups': [
{
'tab': 'manage',
'label': 'Movie Library Manager',
'description': 'Add your existing movie folders.',
'options': [
{
'name': 'enabled',
'default': False,
'type': 'enabler',
},
{
'name': 'library',
'type': 'directories',
'description': 'Folder where the movies should be moved to.',
},
{
'label': 'Cleanup After',
'name': 'cleanup',
'type': 'bool',
'description': 'Remove movie from db if it can\'t be found after re-scan.',
'default': True,
},
{
'label': 'Scan at startup',
'name': 'startup_scan',
'type': 'bool',
'default': True,
'advanced': True,
'description': 'Do a quick scan on startup. On slow systems better disable this.',
},
{
'label': 'Full library refresh',
'name': 'library_refresh_interval',
'type': 'int',
'default': 0,
'advanced': True,
'description': 'Do a full scan every X hours. (0 is disabled)',
},
],
},
],
}]
|
gpl-3.0
|
ravindrapanda/tensorflow
|
tensorflow/python/kernel_tests/record_input_test.py
|
30
|
5958
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for record_input_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.framework.errors_impl import NotFoundError
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class RecordInputOpTest(test.TestCase):
def generateTestData(self,
prefix,
n,
m,
compression_type=tf_record.TFRecordCompressionType.NONE):
options = tf_record.TFRecordOptions(compression_type)
for i in range(n):
f = os.path.join(self.get_temp_dir(), prefix + "." + str(i))
w = tf_record.TFRecordWriter(f, options=options)
for j in range(m):
w.write("{0:0{width}}".format(i * m + j, width=10).encode("utf-8"))
w.close()
def testRecordInputSimple(self):
with self.test_session() as sess:
self.generateTestData("basic", 1, 1)
yield_op = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=1,
batch_size=1,
name="record_input").get_yield_op()
self.assertEqual(sess.run(yield_op), b"0000000000")
def testRecordInputSimpleGzip(self):
with self.test_session() as sess:
self.generateTestData(
"basic",
1,
1,
compression_type=tf_record.TFRecordCompressionType.GZIP)
yield_op = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=1,
batch_size=1,
name="record_input",
compression_type=tf_record.TFRecordCompressionType.GZIP).get_yield_op(
)
self.assertEqual(sess.run(yield_op), b"0000000000")
def testRecordInputSimpleZlib(self):
with self.test_session() as sess:
self.generateTestData(
"basic",
1,
1,
compression_type=tf_record.TFRecordCompressionType.ZLIB)
yield_op = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=1,
batch_size=1,
name="record_input",
compression_type=tf_record.TFRecordCompressionType.ZLIB).get_yield_op(
)
self.assertEqual(sess.run(yield_op), b"0000000000")
def testRecordInputEpochs(self):
files = 100
records_per_file = 100
batches = 2
with self.test_session() as sess:
self.generateTestData("basic", files, records_per_file)
records = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=2,
buffer_size=2000,
batch_size=1,
shift_ratio=0.33,
seed=10,
name="record_input",
batches=batches)
yield_op = records.get_yield_op()
# cycle over 3 epochs and make sure we never duplicate
for _ in range(3):
epoch_set = set()
for _ in range(int(files * records_per_file / batches)):
op_list = sess.run(yield_op)
self.assertTrue(len(op_list) is batches)
for r in op_list:
self.assertTrue(r[0] not in epoch_set)
epoch_set.add(r[0])
def testDoesNotDeadlock(self):
# Iterate multiple times to cause deadlock if there is a chance it can occur
for _ in range(30):
with self.test_session() as sess:
self.generateTestData("basic", 1, 1)
records = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=100,
batch_size=1,
name="record_input")
yield_op = records.get_yield_op()
for _ in range(50):
sess.run(yield_op)
def testEmptyGlob(self):
with self.test_session() as sess:
record_input = data_flow_ops.RecordInput(file_pattern="foo")
yield_op = record_input.get_yield_op()
sess.run(variables.global_variables_initializer())
with self.assertRaises(NotFoundError):
sess.run(yield_op)
def testBufferTooSmall(self):
files = 10
records_per_file = 10
batches = 2
with self.test_session() as sess:
self.generateTestData("basic", files, records_per_file)
records = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=2,
buffer_size=2000,
batch_size=1,
shift_ratio=0.33,
seed=10,
name="record_input",
batches=batches)
yield_op = records.get_yield_op()
# cycle over 3 epochs and make sure we never duplicate
for _ in range(3):
epoch_set = set()
for _ in range(int(files * records_per_file / batches)):
op_list = sess.run(yield_op)
self.assertTrue(len(op_list) is batches)
for r in op_list:
self.assertTrue(r[0] not in epoch_set)
epoch_set.add(r[0])
if __name__ == "__main__":
test.main()
|
apache-2.0
|
Macemann/Georgetown-Capstone
|
app/db/etl/Entities.py
|
1
|
1938
|
import nltk
from nltk.corpus import stopwords
class EntityExtractor (object):
'''
Class used to extract Named Entities from a string
'''
def __init__ (self, tweet):
self.tweet = tweet
self.text = tweet['text']
self.lang = tweet['lang']
self.bool = tweet['text'] and tweet['lang'].find('en') > -1
self.tokens, self.tags = self.genTokensTags()
def genTokensTags (self, rem_stop_words=False):
'''
Generates tokens --> tokens removes punctuation and is a list of words
Generates tags --> list of tuples for each word and the type of speech of each word
'''
if self.bool:
text = self.text.replace('#','').replace('RT ', ' ')
if rem_stop_words:
text = ' '.join([word for word in text.split() if word not in stopwords.words('english')])
sentences = nltk.sent_tokenize(text)
tokens = [nltk.word_tokenize(sentence) for sentence in sentences]
tags = [nltk.pos_tag(token) for token in tokens]
return tokens, tags
return None, None
def get_entities (self):
if self.tags:
entities = []
chunks = nltk.batch_ne_chunk(self.tags, binary=True)
for tree in chunks:
entities.extend(self.extractEntities(tree))
return entities
return []
def get_entity_stems (self, words):
porter = nltk.PorterStemmer()
return [porter.stem(w) for w in words]
def extractEntities (self, tree):
'''
Gets the Proper Nouns from the tags
'''
ents = []
if hasattr(tree,'node') and tree.node:
if tree.node == 'NE':
ents.append(' '.join([child[0] for child in tree]))
else:
for child in tree:
if child:
ents.extend(self.extractEntities(child))
return ents
if __name__ == '__main__':
tweet = {'text': 'This is an example tweet made by Ryan Stephany on Ubuntu using Python','lang': 'en'}
entity_extractor = EntityExtractor(tweet)
entities = entity_extractor.get_entities()
print entities
stems = entity_extractor.get_entity_stems(entities)
print stems
|
mit
|
tushar7795/MicroBlog
|
flask/lib/python2.7/site-packages/whoosh/automata/glob.py
|
53
|
3333
|
# Copyright 2012 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from whoosh.automata.fsa import ANY, EPSILON, NFA
# Constants for glob
_LIT = 0
_STAR = 1
_PLUS = 2
_QUEST = 3
_RANGE = 4
def parse_glob(pattern, _glob_multi="*", _glob_single="?",
_glob_range1="[", _glob_range2="]"):
pos = 0
last = None
while pos < len(pattern):
char = pattern[pos]
pos += 1
if char == _glob_multi: # *
# (Ignore more than one star in a row)
if last is not _STAR:
yield _STAR, None
last = _STAR
elif char == _glob_single: # ?
# (Ignore ? after a star)
if last is not _STAR:
yield _QUEST, None
last = _QUEST
elif char == _glob_range1: # [
chars = set()
negate = False
# Take the char range specification until the ]
while pos < len(pattern):
char = pattern[pos]
pos += 1
if char == _glob_range2:
break
chars.add(char)
if chars:
yield _RANGE, (chars, negate)
last = _RANGE
else:
yield _LIT, char
last = _LIT
def glob_automaton(pattern):
nfa = NFA(0)
i = -1
for i, (op, arg) in enumerate(parse_glob(pattern)):
if op is _LIT:
nfa.add_transition(i, arg, i + 1)
elif op is _STAR:
nfa.add_transition(i, ANY, i + 1)
nfa.add_transition(i, EPSILON, i + 1)
nfa.add_transition(i + 1, EPSILON, i)
elif op is _QUEST:
nfa.add_transition(i, ANY, i + 1)
elif op is _RANGE:
for char in arg[0]:
nfa.add_transition(i, char, i + 1)
nfa.add_final_state(i + 1)
return nfa
|
bsd-3-clause
|
eliastor/rt-thread
|
bsp/jz47xx/rtconfig.py
|
6
|
1704
|
import os
# toolchains options
ARCH = 'mips'
CPU = 'jz47xx'
CROSS_TOOL = 'gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = 'E:/Program Files/CodeSourcery/Sourcery G++ Lite/bin'
elif CROSS_TOOL == 'keil':
print '================ERROR============================'
print 'Not support keil yet!'
print '================================================='
exit(0)
elif CROSS_TOOL == 'iar':
print '================ERROR============================'
print 'Not support iar yet!'
print '================================================='
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
# toolchains
PREFIX = 'mips-sde-elf-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mips32 -msoft-float'
CFLAGS = DEVICE + ' -EL -G0 -DRT_USING_MINILIBC -mno-abicalls -fno-pic -fno-builtin -fno-exceptions -ffunction-sections -fomit-frame-pointer'
AFLAGS = ' -c' + DEVICE + ' -EL -x assembler-with-cpp'
LFLAGS = DEVICE + ' -EL -Wl,--gc-sections,-Map=rtthread-jz47xx.map,-cref,-u,Reset_Handler -T jz47xx_ram.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
RT_USING_MINILIBC = True
DUMP_ACTION = OBJDUMP + ' -D -S $TARGET > rtt.asm\n'
COPY_ACTION = 'copy rtthread.bin usbboot\n'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n' + COPY_ACTION
|
gpl-2.0
|
NejcZupec/ggrc-core
|
src/ggrc_basic_permissions/roles/Creator.py
|
1
|
6820
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
scope = "System"
description = """
This role grants a user basic object creation and editing permission.
"""
owner_base = [
"Categorization",
"Category",
"ControlCategory",
"ControlAssertion",
{
"type": "Issue",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Control",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "DataAsset",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "AccessGroup",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Directive",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Contract",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Policy",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Regulation",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Standard",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Facility",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Market",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Objective",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
"ObjectDocument",
"ObjectOwner",
"ObjectPerson",
{
"type": "Option",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "OrgGroup",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Vendor",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Product",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Section",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Clause",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "SystemOrProcess",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "System",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Process",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "Project",
"terms": {
"list_property": "owners",
"value": "$current_user"
},
"condition": "contains"
},
{
"type": "BackgroundTask",
"terms": {
"property_name": "modified_by",
"value": "$current_user"
},
"condition": "is"
},
"CustomAttributeDefinition",
"CustomAttributeValue",
]
owner_read = owner_base + [
{
"type": "Relationship",
"terms": {
"property_name": "source,destination",
"action": "read"
},
"condition": "relationship",
},
"Role",
"UserRole",
"Context",
"Person",
]
owner_update = owner_base + [
{
"type": "Relationship",
"terms": {
"property_name": "source,destination",
"action": "update"
},
"condition": "relationship",
},
{
"type": "Comment",
"terms": {
"property_name": "modified_by",
"value": "$current_user"
},
"condition": "is"
},
]
permissions = {
"read": owner_read,
"create": [
"Workflow"
"Categorization",
"Category",
"ControlCategory",
"ControlAssertion",
"Control",
"Comment",
"Assessment",
"Issue",
"DataAsset",
"AccessGroup",
"Directive",
"Contract",
"Policy",
"Regulation",
"Standard",
"Document",
"Facility",
"Help",
"Market",
"Objective",
"ObjectDocument",
"ObjectPerson",
"Option",
"OrgGroup",
"Vendor",
"PopulationSample",
"Product",
"Project",
{
"type": "Relationship",
"terms": {
"property_name": "source,destination",
"action": "update"
},
"condition": "relationship",
},
"Section",
"Clause",
"SystemOrProcess",
"System",
"Process",
{
"type": "ObjectOwner",
"terms": {
"property_name": "ownable.modified_by",
"value": "$current_user"
},
"condition": "is"
},
"Program",
"Context",
{
"type": "BackgroundTask",
"terms": {
"property_name": "modified_by",
"value": "$current_user"
},
"condition": "is"
},
],
"view_object_page": owner_read,
"update": owner_update,
"delete": owner_update,
}
|
apache-2.0
|
arokem/nipy
|
nipy/modalities/fmri/fmri.py
|
3
|
4969
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import warnings
import numpy as np
from ...core.api import ImageList
class FmriImageList(ImageList):
""" Class to implement image list interface for FMRI time series
Allows metadata such as volume and slice times
"""
def __init__(self, images=None, volume_start_times=None, slice_times=None):
"""
An implementation of an fMRI image as in ImageList
Parameters
----------
images : iterable
an iterable object whose items are meant to be images; this is
checked by asserting that each has a `coordmap` attribute and a
``get_data`` method. Note that Image objects are not iterable by
default; use the ``from_image`` classmethod or ``iter_axis`` function
to convert images to image lists - see examples below for the latter.
volume_start_times: None or float or (N,) ndarray
start time of each frame. It can be specified either as an ndarray
with ``N=len(images)`` elements or as a single float, the TR. None
results in ``np.arange(len(images)).astype(np.float)``
slice_times: None or (N,) ndarray
specifying offset for each slice of each frame, from the frame start
time.
See Also
--------
nipy.core.image_list.ImageList
Examples
--------
>>> from nipy.testing import funcfile
>>> from nipy.io.api import load_image
>>> from nipy.core.api import iter_axis
>>> funcim = load_image(funcfile)
>>> iterable_img = iter_axis(funcim, 't')
>>> fmrilist = FmriImageList(iterable_img)
>>> print fmrilist.get_list_data(axis=0).shape
(20, 17, 21, 3)
>>> print fmrilist[4].shape
(17, 21, 3)
"""
ImageList.__init__(self, images=images)
if volume_start_times is None:
volume_start_times = 1.
v = np.asarray(volume_start_times)
length = len(self.list)
if v.shape == (length,):
self.volume_start_times = volume_start_times
else:
v = float(volume_start_times)
self.volume_start_times = np.arange(length) * v
self.slice_times = slice_times
def __getitem__(self, index):
"""
If index is an index, return self.list[index], an Image
else return an FmriImageList with images=self.list[index].
"""
if type(index) is type(1):
return self.list[index]
return self.__class__(
images=self.list[index],
volume_start_times=self.volume_start_times[index],
slice_times=self.slice_times)
@classmethod
def from_image(klass, fourdimage, axis='t',
volume_start_times=None, slice_times=None):
"""Create an FmriImageList from a 4D Image
Get images by extracting 3d images along the 't' axis.
Parameters
----------
fourdimage : ``Image`` instance
A 4D Image
volume_start_times: None or float or (N,) ndarray
start time of each frame. It can be specified either as an ndarray
with ``N=len(images)`` elements or as a single float, the TR. None
results in ``np.arange(len(images)).astype(np.float)``
slice_times: None or (N,) ndarray
specifying offset for each slice of each frame, from the frame start
time.
Returns
-------
filist : ``FmriImageList`` instance
"""
if fourdimage.ndim != 4:
raise ValueError('expecting a 4-dimensional Image')
image_list = ImageList.from_image(fourdimage, axis)
return klass(images=image_list.list,
volume_start_times=volume_start_times,
slice_times=slice_times)
def axis0_generator(data, slicers=None):
""" Takes array-like `data`, returning slices over axes > 0
This function takes an array-like object `data` and yields tuples of slicing
thing and slices like::
[slicer, np.asarray(data)[:,slicer] for slicer in slicer]
which in the default (`slicers` is None) case, boils down to::
[i, np.asarray(data)[:,i] for i in range(data.shape[1])]
This can be used to get arrays of time series out of an array if the time
axis is axis 0.
Parameters
----------
data : array-like
object such that ``arr = np.asarray(data)`` returns an array of
at least 2 dimensions.
slicers : None or sequence
sequence of objects that can be used to slice into array ``arr``
returned from data. If None, default is ``range(data.shape[1])``
"""
arr = np.asarray(data)
if slicers is None:
slicers = range(arr.shape[1])
for slicer in slicers:
yield slicer, arr[:,slicer]
|
bsd-3-clause
|
haojunyu/numpy
|
numpy/core/tests/test_deprecations.py
|
7
|
15338
|
"""
Tests related to deprecation warnings. Also a convenient place
to document how deprecations should eventually be turned into errors.
"""
from __future__ import division, absolute_import, print_function
import sys
import operator
import warnings
import numpy as np
from numpy.testing import (run_module_suite, assert_raises,
assert_warns, assert_array_equal, assert_)
class _DeprecationTestCase(object):
# Just as warning: warnings uses re.match, so the start of this message
# must match.
message = ''
def setUp(self):
self.warn_ctx = warnings.catch_warnings(record=True)
self.log = self.warn_ctx.__enter__()
# Do *not* ignore other DeprecationWarnings. Ignoring warnings
# can give very confusing results because of
# http://bugs.python.org/issue4180 and it is probably simplest to
# try to keep the tests cleanly giving only the right warning type.
# (While checking them set to "error" those are ignored anyway)
# We still have them show up, because otherwise they would be raised
warnings.filterwarnings("always", category=DeprecationWarning)
warnings.filterwarnings("always", message=self.message,
category=DeprecationWarning)
def tearDown(self):
self.warn_ctx.__exit__()
def assert_deprecated(self, function, num=1, ignore_others=False,
function_fails=False,
exceptions=(DeprecationWarning,), args=(), kwargs={}):
"""Test if DeprecationWarnings are given and raised.
This first checks if the function when called gives `num`
DeprecationWarnings, after that it tries to raise these
DeprecationWarnings and compares them with `exceptions`.
The exceptions can be different for cases where this code path
is simply not anticipated and the exception is replaced.
Parameters
----------
f : callable
The function to test
num : int
Number of DeprecationWarnings to expect. This should normally be 1.
ignore_other : bool
Whether warnings of the wrong type should be ignored (note that
the message is not checked)
function_fails : bool
If the function would normally fail, setting this will check for
warnings inside a try/except block.
exceptions : Exception or tuple of Exceptions
Exception to expect when turning the warnings into an error.
The default checks for DeprecationWarnings. If exceptions is
empty the function is expected to run successfull.
args : tuple
Arguments for `f`
kwargs : dict
Keyword arguments for `f`
"""
# reset the log
self.log[:] = []
try:
function(*args, **kwargs)
except (Exception if function_fails else tuple()):
pass
# just in case, clear the registry
num_found = 0
for warning in self.log:
if warning.category is DeprecationWarning:
num_found += 1
elif not ignore_others:
raise AssertionError(
"expected DeprecationWarning but got: %s" %
(warning.category,))
if num is not None and num_found != num:
msg = "%i warnings found but %i expected." % (len(self.log), num)
lst = [w.category for w in self.log]
raise AssertionError("\n".join([msg] + [lst]))
with warnings.catch_warnings():
warnings.filterwarnings("error", message=self.message,
category=DeprecationWarning)
try:
function(*args, **kwargs)
if exceptions != tuple():
raise AssertionError(
"No error raised during function call")
except exceptions:
if exceptions == tuple():
raise AssertionError(
"Error raised during function call")
def assert_not_deprecated(self, function, args=(), kwargs={}):
"""Test if DeprecationWarnings are given and raised.
This is just a shorthand for:
self.assert_deprecated(function, num=0, ignore_others=True,
exceptions=tuple(), args=args, kwargs=kwargs)
"""
self.assert_deprecated(function, num=0, ignore_others=True,
exceptions=tuple(), args=args, kwargs=kwargs)
class TestBooleanUnaryMinusDeprecation(_DeprecationTestCase):
"""Test deprecation of unary boolean `-`. While + and * are well
defined, unary - is not and even a corrected form seems to have
no real uses.
The deprecation process was started in NumPy 1.9.
"""
message = r"numpy boolean negative, the `-` operator, .*"
def test_unary_minus_operator_deprecation(self):
array = np.array([True])
generic = np.bool_(True)
# Unary minus/negative ufunc:
self.assert_deprecated(operator.neg, args=(array,))
self.assert_deprecated(operator.neg, args=(generic,))
class TestBooleanBinaryMinusDeprecation(_DeprecationTestCase):
"""Test deprecation of binary boolean `-`. While + and * are well
defined, binary - is not and even a corrected form seems to have
no real uses.
The deprecation process was started in NumPy 1.9.
"""
message = r"numpy boolean subtract, the `-` operator, .*"
def test_operator_deprecation(self):
array = np.array([True])
generic = np.bool_(True)
# Minus operator/subtract ufunc:
self.assert_deprecated(operator.sub, args=(array, array))
self.assert_deprecated(operator.sub, args=(generic, generic))
class TestRankDeprecation(_DeprecationTestCase):
"""Test that np.rank is deprecated. The function should simply be
removed. The VisibleDeprecationWarning may become unnecessary.
"""
def test(self):
a = np.arange(10)
assert_warns(np.VisibleDeprecationWarning, np.rank, a)
class TestComparisonDeprecations(_DeprecationTestCase):
"""This tests the deprecation, for non-elementwise comparison logic.
This used to mean that when an error occured during element-wise comparison
(i.e. broadcasting) NotImplemented was returned, but also in the comparison
itself, False was given instead of the error.
Also test FutureWarning for the None comparison.
"""
message = "elementwise.* comparison failed; .*"
def test_normal_types(self):
for op in (operator.eq, operator.ne):
# Broadcasting errors:
self.assert_deprecated(op, args=(np.zeros(3), []))
a = np.zeros(3, dtype='i,i')
# (warning is issued a couple of times here)
self.assert_deprecated(op, args=(a, a[:-1]), num=None)
# Element comparison error (numpy array can't be compared).
a = np.array([1, np.array([1,2,3])], dtype=object)
b = np.array([1, np.array([1,2,3])], dtype=object)
self.assert_deprecated(op, args=(a, b), num=None)
def test_string(self):
# For two string arrays, strings always raised the broadcasting error:
a = np.array(['a', 'b'])
b = np.array(['a', 'b', 'c'])
assert_raises(ValueError, lambda x, y: x == y, a, b)
# The empty list is not cast to string, this is only to document
# that fact (it likely should be changed). This means that the
# following works (and returns False) due to dtype mismatch:
a == []
def test_none_comparison(self):
# Test comparison of None, which should result in elementwise
# comparison in the future. [1, 2] == None should be [False, False].
with warnings.catch_warnings():
warnings.filterwarnings('always', '', FutureWarning)
assert_warns(FutureWarning, operator.eq, np.arange(3), None)
assert_warns(FutureWarning, operator.ne, np.arange(3), None)
with warnings.catch_warnings():
warnings.filterwarnings('error', '', FutureWarning)
assert_raises(FutureWarning, operator.eq, np.arange(3), None)
assert_raises(FutureWarning, operator.ne, np.arange(3), None)
def test_scalar_none_comparison(self):
# Scalars should still just return false and not give a warnings.
# The comparisons are flagged by pep8, ignore that.
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', FutureWarning)
assert_(not np.float32(1) == None)
assert_(not np.str_('test') == None)
# This is dubious (see below):
assert_(not np.datetime64('NaT') == None)
assert_(np.float32(1) != None)
assert_(np.str_('test') != None)
# This is dubious (see below):
assert_(np.datetime64('NaT') != None)
assert_(len(w) == 0)
# For documentaiton purpose, this is why the datetime is dubious.
# At the time of deprecation this was no behaviour change, but
# it has to be considered when the deprecations is done.
assert_(np.equal(np.datetime64('NaT'), None))
def test_void_dtype_equality_failures(self):
class NotArray(object):
def __array__(self):
raise TypeError
# Needed so Python 3 does not raise DeprecationWarning twice.
def __ne__(self, other):
return NotImplemented
self.assert_deprecated(lambda: np.arange(2) == NotArray())
self.assert_deprecated(lambda: np.arange(2) != NotArray())
struct1 = np.zeros(2, dtype="i4,i4")
struct2 = np.zeros(2, dtype="i4,i4,i4")
assert_warns(FutureWarning, lambda: struct1 == 1)
assert_warns(FutureWarning, lambda: struct1 == struct2)
assert_warns(FutureWarning, lambda: struct1 != 1)
assert_warns(FutureWarning, lambda: struct1 != struct2)
def test_array_richcompare_legacy_weirdness(self):
# It doesn't really work to use assert_deprecated here, b/c part of
# the point of assert_deprecated is to check that when warnings are
# set to "error" mode then the error is propagated -- which is good!
# But here we are testing a bunch of code that is deprecated *because*
# it has the habit of swallowing up errors and converting them into
# different warnings. So assert_warns will have to be sufficient.
assert_warns(FutureWarning, lambda: np.arange(2) == "a")
assert_warns(FutureWarning, lambda: np.arange(2) != "a")
# No warning for scalar comparisons
with warnings.catch_warnings():
warnings.filterwarnings("error")
assert_(not (np.array(0) == "a"))
assert_(np.array(0) != "a")
assert_(not (np.int16(0) == "a"))
assert_(np.int16(0) != "a")
for arg1 in [np.asarray(0), np.int16(0)]:
struct = np.zeros(2, dtype="i4,i4")
for arg2 in [struct, "a"]:
for f in [operator.lt, operator.le, operator.gt, operator.ge]:
if sys.version_info[0] >= 3:
# py3
with warnings.catch_warnings() as l:
warnings.filterwarnings("always")
assert_raises(TypeError, f, arg1, arg2)
assert not l
else:
# py2
assert_warns(DeprecationWarning, f, arg1, arg2)
class TestIdentityComparisonDeprecations(_DeprecationTestCase):
"""This tests the equal and not_equal object ufuncs identity check
deprecation. This was due to the usage of PyObject_RichCompareBool.
This tests that for example for `a = np.array([np.nan], dtype=object)`
`a == a` it is warned that False and not `np.nan is np.nan` is returned.
Should be kept in sync with TestComparisonDeprecations and new tests
added when the deprecation is over. Requires only removing of @identity@
(and blocks) from the ufunc loops.c.src of the OBJECT comparisons.
"""
message = "numpy .* will not check object identity in the future."
def test_identity_equality_mismatch(self):
a = np.array([np.nan], dtype=object)
with warnings.catch_warnings():
warnings.filterwarnings('always', '', FutureWarning)
assert_warns(FutureWarning, np.equal, a, a)
assert_warns(FutureWarning, np.not_equal, a, a)
with warnings.catch_warnings():
warnings.filterwarnings('error', '', FutureWarning)
assert_raises(FutureWarning, np.equal, a, a)
assert_raises(FutureWarning, np.not_equal, a, a)
# And the other do not warn:
with np.errstate(invalid='ignore'):
np.less(a, a)
np.greater(a, a)
np.less_equal(a, a)
np.greater_equal(a, a)
def test_comparison_error(self):
class FunkyType(object):
def __eq__(self, other):
raise TypeError("I won't compare")
def __ne__(self, other):
raise TypeError("I won't compare")
a = np.array([FunkyType()])
self.assert_deprecated(np.equal, args=(a, a))
self.assert_deprecated(np.not_equal, args=(a, a))
def test_bool_error(self):
# The comparison result cannot be interpreted as a bool
a = np.array([np.array([1, 2, 3]), None], dtype=object)
self.assert_deprecated(np.equal, args=(a, a))
self.assert_deprecated(np.not_equal, args=(a, a))
class TestAlterdotRestoredotDeprecations(_DeprecationTestCase):
"""The alterdot/restoredot functions are deprecated.
These functions no longer do anything in numpy 1.10, so should not be
used.
"""
def test_alterdot_restoredot_deprecation(self):
self.assert_deprecated(np.alterdot)
self.assert_deprecated(np.restoredot)
class TestBooleanIndexShapeMismatchDeprecation():
"""Tests deprecation for boolean indexing where the boolean array
does not match the input array along the given diemsions.
"""
message = r"boolean index did not match indexed array"
def test_simple(self):
arr = np.ones((5, 4, 3))
index = np.array([True])
#self.assert_deprecated(arr.__getitem__, args=(index,))
assert_warns(np.VisibleDeprecationWarning,
arr.__getitem__, index)
index = np.array([False] * 6)
#self.assert_deprecated(arr.__getitem__, args=(index,))
assert_warns(np.VisibleDeprecationWarning,
arr.__getitem__, index)
index = np.zeros((4, 4), dtype=bool)
#self.assert_deprecated(arr.__getitem__, args=(index,))
assert_warns(np.VisibleDeprecationWarning,
arr.__getitem__, index)
#self.assert_deprecated(arr.__getitem__, args=((slice(None), index),))
assert_warns(np.VisibleDeprecationWarning,
arr.__getitem__, (slice(None), index))
if __name__ == "__main__":
run_module_suite()
|
bsd-3-clause
|
kitanata/resume
|
env/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.py
|
168
|
26964
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Access to Python's configuration information."""
import codecs
import os
import re
import sys
from os.path import pardir, realpath
try:
import configparser
except ImportError:
import ConfigParser as configparser
__all__ = [
'get_config_h_filename',
'get_config_var',
'get_config_vars',
'get_makefile_filename',
'get_path',
'get_path_names',
'get_paths',
'get_platform',
'get_python_version',
'get_scheme_names',
'parse_config_h',
]
def _safe_realpath(path):
try:
return realpath(path)
except OSError:
return path
if sys.executable:
_PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
# unable to retrieve the real program name
_PROJECT_BASE = _safe_realpath(os.getcwd())
if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
def is_python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)):
return True
return False
_PYTHON_BUILD = is_python_build()
_cfg_read = False
def _ensure_cfg_read():
global _cfg_read
if not _cfg_read:
from distlib.resources import finder
backport_package = __name__.rsplit('.', 1)[0]
_finder = finder(backport_package)
_cfgfile = _finder.find('sysconfig.cfg')
assert _cfgfile, 'sysconfig.cfg exists'
with _cfgfile.as_stream() as s:
_SCHEMES.readfp(s)
if _PYTHON_BUILD:
for scheme in ('posix_prefix', 'posix_home'):
_SCHEMES.set(scheme, 'include', '{srcdir}/Include')
_SCHEMES.set(scheme, 'platinclude', '{projectbase}/.')
_cfg_read = True
_SCHEMES = configparser.RawConfigParser()
_VAR_REPL = re.compile(r'\{([^{]*?)\}')
def _expand_globals(config):
_ensure_cfg_read()
if config.has_section('globals'):
globals = config.items('globals')
else:
globals = tuple()
sections = config.sections()
for section in sections:
if section == 'globals':
continue
for option, value in globals:
if config.has_option(section, option):
continue
config.set(section, option, value)
config.remove_section('globals')
# now expanding local variables defined in the cfg file
#
for section in config.sections():
variables = dict(config.items(section))
def _replacer(matchobj):
name = matchobj.group(1)
if name in variables:
return variables[name]
return matchobj.group(0)
for option, value in config.items(section):
config.set(section, option, _VAR_REPL.sub(_replacer, value))
#_expand_globals(_SCHEMES)
# FIXME don't rely on sys.version here, its format is an implementation detail
# of CPython, use sys.version_info or sys.hexversion
_PY_VERSION = sys.version.split()[0]
_PY_VERSION_SHORT = sys.version[:3]
_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2]
_PREFIX = os.path.normpath(sys.prefix)
_EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
_CONFIG_VARS = None
_USER_BASE = None
def _subst_vars(path, local_vars):
"""In the string `path`, replace tokens like {some.thing} with the
corresponding value from the map `local_vars`.
If there is no corresponding value, leave the token unchanged.
"""
def _replacer(matchobj):
name = matchobj.group(1)
if name in local_vars:
return local_vars[name]
elif name in os.environ:
return os.environ[name]
return matchobj.group(0)
return _VAR_REPL.sub(_replacer, path)
def _extend_dict(target_dict, other_dict):
target_keys = target_dict.keys()
for key, value in other_dict.items():
if key in target_keys:
continue
target_dict[key] = value
def _expand_vars(scheme, vars):
res = {}
if vars is None:
vars = {}
_extend_dict(vars, get_config_vars())
for key, value in _SCHEMES.items(scheme):
if os.name in ('posix', 'nt'):
value = os.path.expanduser(value)
res[key] = os.path.normpath(_subst_vars(value, vars))
return res
def format_value(value, vars):
def _replacer(matchobj):
name = matchobj.group(1)
if name in vars:
return vars[name]
return matchobj.group(0)
return _VAR_REPL.sub(_replacer, value)
def _get_default_scheme():
if os.name == 'posix':
# the default scheme for posix is posix_prefix
return 'posix_prefix'
return os.name
def _getuserbase():
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
# what about 'os2emx', 'riscos' ?
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
return env_base
else:
return joinuser(base, "Python")
if sys.platform == "darwin":
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
if env_base:
return env_base
else:
return joinuser("~", "Library", framework, "%d.%d" %
sys.version_info[:2])
if env_base:
return env_base
else:
return joinuser("~", ".local")
def _parse_makefile(filename, vars=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
if vars is None:
vars = {}
done = {}
notdone = {}
with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f:
lines = f.readlines()
for line in lines:
if line.startswith('#') or line.strip() == '':
continue
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
variables = list(notdone.keys())
# Variables with a 'PY_' prefix in the makefile. These need to
# be made available without that prefix through sysconfig.
# Special care is needed to ensure that variable expansion works, even
# if the expansion uses the name without a prefix.
renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
while len(variables) > 0:
for name in tuple(variables):
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m is not None:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
elif n in renamed_variables:
if (name.startswith('PY_') and
name[3:] in renamed_variables):
item = ""
elif 'PY_' + n in notdone:
found = False
else:
item = str(done['PY_' + n])
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try:
value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
variables.remove(name)
if (name.startswith('PY_') and
name[3:] in renamed_variables):
name = name[3:]
if name not in done:
done[name] = value
else:
# bogus variable reference (e.g. "prefix=$/opt/python");
# just drop it since we can't deal
done[name] = value
variables.remove(name)
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
vars.update(done)
return vars
def get_makefile_filename():
"""Return the path of the Makefile."""
if _PYTHON_BUILD:
return os.path.join(_PROJECT_BASE, "Makefile")
if hasattr(sys, 'abiflags'):
config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags)
else:
config_dir_name = 'config'
return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile')
def _init_posix(vars):
"""Initialize the module as appropriate for POSIX systems."""
# load the installed Makefile:
makefile = get_makefile_filename()
try:
_parse_makefile(makefile, vars)
except IOError as e:
msg = "invalid Python installation: unable to open %s" % makefile
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# load the installed pyconfig.h:
config_h = get_config_h_filename()
try:
with open(config_h) as f:
parse_config_h(f, vars)
except IOError as e:
msg = "invalid Python installation: unable to open %s" % config_h
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if _PYTHON_BUILD:
vars['LDSHARED'] = vars['BLDSHARED']
def _init_non_posix(vars):
"""Initialize the module as appropriate for NT"""
# set basic install directories
vars['LIBDEST'] = get_path('stdlib')
vars['BINLIBDEST'] = get_path('platstdlib')
vars['INCLUDEPY'] = get_path('include')
vars['SO'] = '.pyd'
vars['EXE'] = '.exe'
vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable))
#
# public APIs
#
def parse_config_h(fp, vars=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if vars is None:
vars = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try:
v = int(v)
except ValueError:
pass
vars[n] = v
else:
m = undef_rx.match(line)
if m:
vars[m.group(1)] = 0
return vars
def get_config_h_filename():
"""Return the path of pyconfig.h."""
if _PYTHON_BUILD:
if os.name == "nt":
inc_dir = os.path.join(_PROJECT_BASE, "PC")
else:
inc_dir = _PROJECT_BASE
else:
inc_dir = get_path('platinclude')
return os.path.join(inc_dir, 'pyconfig.h')
def get_scheme_names():
"""Return a tuple containing the schemes names."""
return tuple(sorted(_SCHEMES.sections()))
def get_path_names():
"""Return a tuple containing the paths names."""
# xxx see if we want a static list
return _SCHEMES.options('posix_prefix')
def get_paths(scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a mapping containing an install scheme.
``scheme`` is the install scheme name. If not provided, it will
return the default scheme for the current platform.
"""
_ensure_cfg_read()
if expand:
return _expand_vars(scheme, vars)
else:
return dict(_SCHEMES.items(scheme))
def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a path corresponding to the scheme.
``scheme`` is the install scheme name.
"""
return get_paths(scheme, vars, expand)[name]
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform.
On Unix, this means every variable defined in Python's installed Makefile;
On Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _CONFIG_VARS
if _CONFIG_VARS is None:
_CONFIG_VARS = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# distutils2 module.
_CONFIG_VARS['prefix'] = _PREFIX
_CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX
_CONFIG_VARS['py_version'] = _PY_VERSION
_CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT
_CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2]
_CONFIG_VARS['base'] = _PREFIX
_CONFIG_VARS['platbase'] = _EXEC_PREFIX
_CONFIG_VARS['projectbase'] = _PROJECT_BASE
try:
_CONFIG_VARS['abiflags'] = sys.abiflags
except AttributeError:
# sys.abiflags may not be defined on all platforms.
_CONFIG_VARS['abiflags'] = ''
if os.name in ('nt', 'os2'):
_init_non_posix(_CONFIG_VARS)
if os.name == 'posix':
_init_posix(_CONFIG_VARS)
# Setting 'userbase' is done below the call to the
# init function to enable using 'get_config_var' in
# the init-function.
if sys.version >= '2.6':
_CONFIG_VARS['userbase'] = _getuserbase()
if 'srcdir' not in _CONFIG_VARS:
_CONFIG_VARS['srcdir'] = _PROJECT_BASE
else:
_CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir'])
# Convert srcdir into an absolute path if it appears necessary.
# Normally it is relative to the build directory. However, during
# testing, for example, we might be running a non-installed python
# from a different directory.
if _PYTHON_BUILD and os.name == "posix":
base = _PROJECT_BASE
try:
cwd = os.getcwd()
except OSError:
cwd = None
if (not os.path.isabs(_CONFIG_VARS['srcdir']) and
base != cwd):
# srcdir is relative and we are not in the same directory
# as the executable. Assume executable is in the build
# directory and make srcdir absolute.
srcdir = os.path.join(base, _CONFIG_VARS['srcdir'])
_CONFIG_VARS['srcdir'] = os.path.normpath(srcdir)
if sys.platform == 'darwin':
kernel_version = os.uname()[2] # Kernel version (8.4.3)
major_version = int(kernel_version.split('.')[0])
if major_version < 8:
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_CONFIG_VARS[key] = flags
else:
# Allow the user to override the architecture flags using
# an environment variable.
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_CONFIG_VARS[key] = flags
# If we're on OSX 10.5 or later and the user tries to
# compiles an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail.
#
# The major usecase for this is users using a Python.org
# binary installer on OSX 10.6: that installer uses
# the 10.4u SDK, but that SDK is not installed by default
# when you install Xcode.
#
CFLAGS = _CONFIG_VARS.get('CFLAGS', '')
m = re.search('-isysroot\s+(\S+)', CFLAGS)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags)
_CONFIG_VARS[key] = flags
if args:
vals = []
for name in args:
vals.append(_CONFIG_VARS.get(name))
return vals
else:
return _CONFIG_VARS
def get_config_var(name):
"""Return the value of a single variable using the dictionary returned by
'get_config_vars()'.
Equivalent to get_config_vars().get(name)
"""
return get_config_vars().get(name)
def get_platform():
"""Return a string that identifies the current platform.
This is used mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return sys.platform
j = sys.version.find(")", i)
look = sys.version[i+len(prefix):j].lower()
if look == 'amd64':
return 'win-amd64'
if look == 'itanium':
return 'win-ia64'
return sys.platform
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
osname, host, release, version, machine = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = osname.lower().replace('/', '')
machine = machine.replace(' ', '_')
machine = machine.replace('/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile(r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
cfgvars = get_config_vars()
macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
if True:
# Always calculate the release of the running machine,
# needed to determine if we can build fat binaries or not.
macrelease = macver
# Get the system version. Reading this plist is a documented
# way to get the system version (see the documentation for
# the Gestalt Manager)
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
r'<string>(.*?)</string>', f.read())
finally:
f.close()
if m is not None:
macrelease = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
if not macver:
macver = macrelease
if macver:
release = macver
osname = "macosx"
if ((macrelease + '.') >= '10.4.' and
'-arch' in get_config_vars().get('CFLAGS', '').strip()):
# The universal build will build fat binaries, but not on
# systems before 10.4
#
# Try to detect 4-way universal builds, those have machine-type
# 'universal' instead of 'fat'.
machine = 'fat'
cflags = get_config_vars().get('CFLAGS')
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r" % (archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxsize >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
# See 'i386' case
if sys.maxsize >= 2**32:
machine = 'ppc64'
else:
machine = 'ppc'
return "%s-%s-%s" % (osname, release, machine)
def get_python_version():
return _PY_VERSION_SHORT
def _print_dict(title, data):
for index, (key, value) in enumerate(sorted(data.items())):
if index == 0:
print('%s: ' % (title))
print('\t%s = "%s"' % (key, value))
def _main():
"""Display all information sysconfig detains."""
print('Platform: "%s"' % get_platform())
print('Python version: "%s"' % get_python_version())
print('Current installation scheme: "%s"' % _get_default_scheme())
print()
_print_dict('Paths', get_paths())
print()
_print_dict('Variables', get_config_vars())
if __name__ == '__main__':
_main()
|
mit
|
wilselby/diy_driverless_car_ROS
|
rover_cv/camera_cal/src/camera_cal/camera_cal.py
|
1
|
6503
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#https://github.com/paramaggarwal/CarND-Advanced-Lane-Lines/blob/master/Notebook.ipynb
from __future__ import print_function
from __future__ import division
import sys
import traceback
import rospy
import numpy as np
import cv2
import pickle
import glob
import time
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class camera_calibarion(object):
def __init__(self):
"""ROS Subscriptions """
self.image_pub = rospy.Publisher("/camera_calibation/image_corrected",Image, queue_size=10)
self.image_sub = rospy.Subscriber("/cam/camera_/image_raw",Image,self.cvt_image)
""" Variables """
self.bridge = CvBridge()
self.latestImage = None
self.outputImage = None
self.process = False
self.calibrated = False
self.correctedImage = None
self.mtx = None
self.dist = None
def cvt_image(self,data):
try:
self.latestImage = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
if self.process != True:
self.process = True
def camera_cal(self, image):
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
nx = 8
ny = 6
dst = np.copy(image)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((ny * nx, 3), np.float32)
objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Search for chessboard corners
grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#ret_thresh, mask = cv2.threshold(grey, 30, 255, cv2.THRESH_BINARY)
ret, corners = cv2.findChessboardCorners(image, (nx, ny), None) #flags=(cv2.cv.CV_CALIB_CB_ADAPTIVE_THRESH + cv2.cv.CV_CALIB_CB_FILTER_QUADS))
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
cv2.cornerSubPix(grey,corners, (11,11), (-1,-1), criteria)
imgpoints.append(corners)
self.calibrated = True
print ("FOUND!")
#Draw and display the corners
cv2.drawChessboardCorners(image, (nx, ny), corners, ret)
# Do camera calibration given object points and image points
ret, self.mtx, self.dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, grey.shape[::-1], None, None)
# Save the camera calibration result for later use (we won't worry about rvecs / tvecs)
dist_pickle = {}
dist_pickle["mtx"] = self.mtx
dist_pickle["dist"] = self.dist
dist_pickle['objpoints'] = objpoints
dist_pickle['imgpoints'] = imgpoints
pickle.dump( dist_pickle, open( "/home/wil/ros/catkin_ws/src/av_sim/computer_vision/camera_calibration/data/camera_cal_pickle.p", "wb" ) )
#else:
#print("Searching...")
return image
def drawQuad(self, image, points, color=[255, 0, 0], thickness=4):
p1, p2, p3, p4 = points
cv2.line(image, tuple(p1), tuple(p2), color, thickness)
cv2.line(image, tuple(p2), tuple(p3), color, thickness)
cv2.line(image, tuple(p3), tuple(p4), color, thickness)
cv2.line(image, tuple(p4), tuple(p1), color, thickness)
def perspective_transform(self, image, debug=True, size_top=70, size_bottom=370):
height, width = image.shape[0:2]
output_size = height/2
#src = np.float32([[(width/2) - size_top, height*0.65], [(width/2) + size_top, height*0.65], [(width/2) + size_bottom, height-50], [(width/2) - size_bottom, height-50]])
src = np.float32([[512, 450], [675, 454], [707, 560], [347, 568]])
dst = np.float32([[347, height], [707, height], [707, 0], [347, 0]])
#dst = np.float32([[(width/2) - output_size, (height/2) - output_size], [(width/2) + output_size, (height/2) - output_size], [(width/2) + output_size, (height/2) + output_size], [(width/2) - output_size, (height/2) + output_size]])
M = cv2.getPerspectiveTransform(src, dst)
print(M)
warped = cv2.warpPerspective(image, M, (width, height), flags=cv2.INTER_LINEAR)
if debug:
self.drawQuad(image, src, [255, 0, 0])
self.drawQuad(image, dst, [255, 255, 0])
plt.imshow(image)
plt.show()
return warped
def undistort_image(self, image):
return cv2.undistort(image, self.mtx, self.dist, None, self.mtx)
def run(self):
while True:
# Only run loop if we have an image
if self.process:
filename = "/home/wil/ros/catkin_ws/src/av_sim/computer_vision/camera_calibration/data/check_test.png"
image = cv2.imread(filename, flags=cv2.IMREAD_COLOR)
if self.calibrated is not True:
#print("Calibrating...")
cornersImage = self.camera_cal(image)
cvImage = cornersImage
else:
correctedImage = self.undistort_image(self.latestImage) # Distortion Correction Function
transformImage = self.perspective_transform(self.latestImage)
cvImage = transformImage
# Publish Undistorted Image
try:
imgmsg = self.bridge.cv2_to_imgmsg(cvImage, "bgr8") #"mono8" "bgr8"
self.image_pub.publish(imgmsg)
except CvBridgeError as e:
print(e)
def main(args):
rospy.init_node('camera_calibarion', anonymous=True)
cc = camera_calibarion()
cc.run()
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
|
bsd-2-clause
|
emilk/sproxel
|
distro/common/lib/encodings/mac_cyrillic.py
|
93
|
14017
|
""" Python Character Mapping Codec mac_cyrillic generated from 'MAPPINGS/VENDORS/APPLE/CYRILLIC.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-cyrillic',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\u0410' # 0x80 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0x81 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0x82 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0x83 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0x84 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0x85 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0x86 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0x87 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0x88 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0x89 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0x8A -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0x8B -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0x8C -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0x8D -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0x8E -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0x8F -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0x90 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0x91 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0x92 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0x93 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0x94 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0x95 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0x96 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0x97 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0x98 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0x99 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0x9A -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0x9B -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0x9C -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0x9D -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0x9E -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0x9F -> CYRILLIC CAPITAL LETTER YA
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\u0490' # 0xA2 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\u0406' # 0xA7 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\u0402' # 0xAB -> CYRILLIC CAPITAL LETTER DJE
u'\u0452' # 0xAC -> CYRILLIC SMALL LETTER DJE
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u0403' # 0xAE -> CYRILLIC CAPITAL LETTER GJE
u'\u0453' # 0xAF -> CYRILLIC SMALL LETTER GJE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\u0456' # 0xB4 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u0491' # 0xB6 -> CYRILLIC SMALL LETTER GHE WITH UPTURN
u'\u0408' # 0xB7 -> CYRILLIC CAPITAL LETTER JE
u'\u0404' # 0xB8 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0454' # 0xB9 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0407' # 0xBA -> CYRILLIC CAPITAL LETTER YI
u'\u0457' # 0xBB -> CYRILLIC SMALL LETTER YI
u'\u0409' # 0xBC -> CYRILLIC CAPITAL LETTER LJE
u'\u0459' # 0xBD -> CYRILLIC SMALL LETTER LJE
u'\u040a' # 0xBE -> CYRILLIC CAPITAL LETTER NJE
u'\u045a' # 0xBF -> CYRILLIC SMALL LETTER NJE
u'\u0458' # 0xC0 -> CYRILLIC SMALL LETTER JE
u'\u0405' # 0xC1 -> CYRILLIC CAPITAL LETTER DZE
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\u040b' # 0xCB -> CYRILLIC CAPITAL LETTER TSHE
u'\u045b' # 0xCC -> CYRILLIC SMALL LETTER TSHE
u'\u040c' # 0xCD -> CYRILLIC CAPITAL LETTER KJE
u'\u045c' # 0xCE -> CYRILLIC SMALL LETTER KJE
u'\u0455' # 0xCF -> CYRILLIC SMALL LETTER DZE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u201e' # 0xD7 -> DOUBLE LOW-9 QUOTATION MARK
u'\u040e' # 0xD8 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045e' # 0xD9 -> CYRILLIC SMALL LETTER SHORT U
u'\u040f' # 0xDA -> CYRILLIC CAPITAL LETTER DZHE
u'\u045f' # 0xDB -> CYRILLIC SMALL LETTER DZHE
u'\u2116' # 0xDC -> NUMERO SIGN
u'\u0401' # 0xDD -> CYRILLIC CAPITAL LETTER IO
u'\u0451' # 0xDE -> CYRILLIC SMALL LETTER IO
u'\u044f' # 0xDF -> CYRILLIC SMALL LETTER YA
u'\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xED -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xEE -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE
u'\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0xFD -> CYRILLIC SMALL LETTER E
u'\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU
u'\u20ac' # 0xFF -> EURO SIGN
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
bsd-3-clause
|
pombredanne/teamwork
|
wsgi/static/reeborg/src/libraries/brython/Lib/unittest/result.py
|
15
|
6282
|
"""Test result object"""
import io
import sys
import traceback
from . import util
from functools import wraps
__unittest = True
def failfast(method):
@wraps(method)
def inner(self, *args, **kw):
if getattr(self, 'failfast', False):
self.stop()
return method(self, *args, **kw)
return inner
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
class TestResult(object):
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_testRunEntered = False
_moduleSetUpFailed = False
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
def printErrors(self):
"Called by TestRunner after test run"
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun += 1
self._mirrorOutput = False
self._setupStdout()
def _setupStdout(self):
if self.buffer:
if self._stderr_buffer is None:
self._stderr_buffer = io.StringIO()
self._stdout_buffer = io.StringIO()
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer
def startTestRun(self):
"""Called once before any tests are executed.
See startTest for a method called before each test.
"""
def stopTest(self, test):
"""Called when the given test has been run"""
self._restoreStdout()
self._mirrorOutput = False
def _restoreStdout(self):
if self.buffer:
if self._mirrorOutput:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate()
def stopTestRun(self):
"""Called once after all tests are executed.
See stopTest for a method called after each test.
"""
@failfast
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
@failfast
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def addSkip(self, test, reason):
"""Called when a test is skipped."""
self.skipped.append((test, reason))
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occured."""
self.expectedFailures.append(
(test, self._exc_info_to_string(err, test)))
@failfast
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"Tells whether or not this result was a success"
return len(self.failures) == len(self.errors) == 0
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
return ''.join(msgLines)
def _is_relevant_tb_level(self, tb):
return '__unittest' in tb.tb_frame.f_globals
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return ("<%s run=%i errors=%i failures=%i>" %
(util.strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures)))
|
gpl-2.0
|
kapouer/mapnik
|
scons/scons-local-2.3.0/SCons/Tool/msginit.py
|
11
|
4732
|
""" msginit tool
Tool specific initialization of msginit tool.
"""
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/msginit.py 2013/03/03 09:48:35 garyo"
import SCons.Warnings
import SCons.Builder
import re
#############################################################################
def _optional_no_translator_flag(env):
""" Return '--no-translator' flag if we run *msginit(1)* in non-interactive
mode."""
import SCons.Util
if env.has_key('POAUTOINIT'):
autoinit = env['POAUTOINIT']
else:
autoinit = False
if autoinit:
return [SCons.Util.CLVar('--no-translator')]
else:
return [SCons.Util.CLVar('')]
#############################################################################
#############################################################################
def _POInitBuilder(env, **kw):
""" Create builder object for `POInit` builder. """
import SCons.Action
from SCons.Tool.GettextCommon import _init_po_files, _POFileBuilder
action = SCons.Action.Action(_init_po_files, None)
return _POFileBuilder(env, action=action, target_alias='$POCREATE_ALIAS')
#############################################################################
#############################################################################
from SCons.Environment import _null
#############################################################################
def _POInitBuilderWrapper(env, target=None, source=_null, **kw):
""" Wrapper for _POFileBuilder. We use it to make user's life easier.
This wrapper checks for `$POTDOMAIN` construction variable (or override in
`**kw`) and treats it appropriatelly.
"""
if source is _null:
if 'POTDOMAIN' in kw:
domain = kw['POTDOMAIN']
elif env.has_key('POTDOMAIN'):
domain = env['POTDOMAIN']
else:
domain = 'messages'
source = [ domain ] # NOTE: Suffix shall be appended automatically
return env._POInitBuilder(target, source, **kw)
#############################################################################
#############################################################################
def generate(env,**kw):
""" Generate the `msginit` tool """
import SCons.Util
from SCons.Tool.GettextCommon import _detect_msginit
try:
env['MSGINIT'] = _detect_msginit(env)
except:
env['MSGINIT'] = 'msginit'
msginitcom = '$MSGINIT ${_MSGNoTranslator(__env__)} -l ${_MSGINITLOCALE}' \
+ ' $MSGINITFLAGS -i $SOURCE -o $TARGET'
# NOTE: We set POTSUFFIX here, in case the 'xgettext' is not loaded
# (sometimes we really don't need it)
env.SetDefault(
POSUFFIX = ['.po'],
POTSUFFIX = ['.pot'],
_MSGINITLOCALE = '${TARGET.filebase}',
_MSGNoTranslator = _optional_no_translator_flag,
MSGINITCOM = msginitcom,
MSGINITCOMSTR = '',
MSGINITFLAGS = [ ],
POAUTOINIT = False,
POCREATE_ALIAS = 'po-create'
)
env.Append( BUILDERS = { '_POInitBuilder' : _POInitBuilder(env) } )
env.AddMethod(_POInitBuilderWrapper, 'POInit')
env.AlwaysBuild(env.Alias('$POCREATE_ALIAS'))
#############################################################################
#############################################################################
def exists(env):
""" Check if the tool exists """
from SCons.Tool.GettextCommon import _msginit_exists
try:
return _msginit_exists(env)
except:
return False
#############################################################################
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
lgpl-2.1
|
DeskboxBrazil/Cura
|
cura/CuraActions.py
|
7
|
1090
|
from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, pyqtProperty, QUrl
from PyQt5.QtGui import QDesktopServices
from UM.Event import CallFunctionEvent
from UM.Application import Application
import webbrowser
class CuraActions(QObject):
def __init__(self, parent = None):
super().__init__(parent)
@pyqtSlot()
def openDocumentation(self):
# Starting a web browser from a signal handler connected to a menu will crash on windows.
# So instead, defer the call to the next run of the event loop, since that does work.
# Note that weirdly enough, only signal handlers that open a web browser fail like that.
event = CallFunctionEvent(self._openUrl, [QUrl("http://ultimaker.com/en/support/software")], {})
Application.getInstance().functionEvent(event)
@pyqtSlot()
def openBugReportPage(self):
event = CallFunctionEvent(self._openUrl, [QUrl("http://github.com/Ultimaker/Cura/issues")], {})
Application.getInstance().functionEvent(event)
def _openUrl(self, url):
QDesktopServices.openUrl(url)
|
agpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.