repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Mhynlo/SickRage
|
lib/imdb/parser/http/bsouplxml/bsoupxpath.py
|
126
|
14521
|
"""
parser.http.bsoupxpath module (imdb.parser.http package).
This module provides XPath support for BeautifulSoup.
Copyright 2008 H. Turgut Uyar <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
__author__ = 'H. Turgut Uyar <[email protected]>'
__docformat__ = 'restructuredtext'
import re
import string
import _bsoup as BeautifulSoup
# XPath related enumerations and constants
AXIS_ANCESTOR = 'ancestor'
AXIS_ATTRIBUTE = 'attribute'
AXIS_CHILD = 'child'
AXIS_DESCENDANT = 'descendant'
AXIS_FOLLOWING = 'following'
AXIS_FOLLOWING_SIBLING = 'following-sibling'
AXIS_PRECEDING_SIBLING = 'preceding-sibling'
AXES = (AXIS_ANCESTOR, AXIS_ATTRIBUTE, AXIS_CHILD, AXIS_DESCENDANT,
AXIS_FOLLOWING, AXIS_FOLLOWING_SIBLING, AXIS_PRECEDING_SIBLING)
XPATH_FUNCTIONS = ('starts-with', 'string-length', 'contains')
def tokenize_path(path):
"""Tokenize a location path into location steps. Return the list of steps.
If two steps are separated by a double slash, the double slashes are part of
the second step. If they are separated by only one slash, the slash is not
included in any of the steps.
"""
# form a list of tuples that mark the start and end positions of steps
separators = []
last_position = 0
i = -1
in_string = False
while i < len(path) - 1:
i = i + 1
if path[i] == "'":
in_string = not in_string
if in_string:
# slashes within strings are not step separators
continue
if path[i] == '/':
if i > 0:
separators.append((last_position, i))
if (path[i+1] == '/'):
last_position = i
i = i + 1
else:
last_position = i + 1
separators.append((last_position, len(path)))
steps = []
for start, end in separators:
steps.append(path[start:end])
return steps
class Path:
"""A location path.
"""
def __init__(self, path, parse=True):
self.path = path
self.steps = []
if parse:
if (path[0] == '/') and (path[1] != '/'):
# if not on the descendant axis, remove the leading slash
path = path[1:]
steps = tokenize_path(path)
for step in steps:
self.steps.append(PathStep(step))
def apply(self, node):
"""Apply the path to a node. Return the resulting list of nodes.
Apply the steps in the path sequentially by sending the output of each
step as input to the next step.
"""
# FIXME: this should return a node SET, not a node LIST
# or at least a list with no duplicates
if self.path[0] == '/':
# for an absolute path, start from the root
if not isinstance(node, BeautifulSoup.Tag) \
or (node.name != '[document]'):
node = node.findParent('[document]')
nodes = [node]
for step in self.steps:
nodes = step.apply(nodes)
return nodes
class PathStep:
"""A location step in a location path.
"""
AXIS_PATTERN = r"""(%s)::|@""" % '|'.join(AXES)
NODE_TEST_PATTERN = r"""\w+(\(\))?"""
PREDICATE_PATTERN = r"""\[(.*?)\]"""
LOCATION_STEP_PATTERN = r"""(%s)?(%s)((%s)*)""" \
% (AXIS_PATTERN, NODE_TEST_PATTERN, PREDICATE_PATTERN)
_re_location_step = re.compile(LOCATION_STEP_PATTERN)
PREDICATE_NOT_PATTERN = r"""not\((.*?)\)"""
PREDICATE_AXIS_PATTERN = r"""(%s)?(%s)(='(.*?)')?""" \
% (AXIS_PATTERN, NODE_TEST_PATTERN)
PREDICATE_FUNCTION_PATTERN = r"""(%s)\(([^,]+(,\s*[^,]+)*)?\)(=(.*))?""" \
% '|'.join(XPATH_FUNCTIONS)
_re_predicate_not = re.compile(PREDICATE_NOT_PATTERN)
_re_predicate_axis = re.compile(PREDICATE_AXIS_PATTERN)
_re_predicate_function = re.compile(PREDICATE_FUNCTION_PATTERN)
def __init__(self, step):
self.step = step
if (step == '.') or (step == '..'):
return
if step[:2] == '//':
default_axis = AXIS_DESCENDANT
step = step[2:]
else:
default_axis = AXIS_CHILD
step_match = self._re_location_step.match(step)
# determine the axis
axis = step_match.group(1)
if axis is None:
self.axis = default_axis
elif axis == '@':
self.axis = AXIS_ATTRIBUTE
else:
self.axis = step_match.group(2)
self.soup_args = {}
self.index = None
self.node_test = step_match.group(3)
if self.node_test == 'text()':
self.soup_args['text'] = True
else:
self.soup_args['name'] = self.node_test
self.checkers = []
predicates = step_match.group(5)
if predicates is not None:
predicates = [p for p in predicates[1:-1].split('][') if p]
for predicate in predicates:
checker = self.__parse_predicate(predicate)
if checker is not None:
self.checkers.append(checker)
def __parse_predicate(self, predicate):
"""Parse the predicate. Return a callable that can be used to filter
nodes. Update `self.soup_args` to take advantage of BeautifulSoup search
features.
"""
try:
position = int(predicate)
if self.axis == AXIS_DESCENDANT:
return PredicateFilter('position', value=position)
else:
# use the search limit feature instead of a checker
self.soup_args['limit'] = position
self.index = position - 1
return None
except ValueError:
pass
if predicate == "last()":
self.index = -1
return None
negate = self._re_predicate_not.match(predicate)
if negate:
predicate = negate.group(1)
function_match = self._re_predicate_function.match(predicate)
if function_match:
name = function_match.group(1)
arguments = function_match.group(2)
value = function_match.group(4)
if value is not None:
value = function_match.group(5)
return PredicateFilter(name, arguments, value)
axis_match = self._re_predicate_axis.match(predicate)
if axis_match:
axis = axis_match.group(1)
if axis is None:
axis = AXIS_CHILD
elif axis == '@':
axis = AXIS_ATTRIBUTE
if axis == AXIS_ATTRIBUTE:
# use the attribute search feature instead of a checker
attribute_name = axis_match.group(3)
if axis_match.group(5) is not None:
attribute_value = axis_match.group(6)
elif not negate:
attribute_value = True
else:
attribute_value = None
if not self.soup_args.has_key('attrs'):
self.soup_args['attrs'] = {}
self.soup_args['attrs'][attribute_name] = attribute_value
return None
elif axis == AXIS_CHILD:
node_test = axis_match.group(3)
node_value = axis_match.group(6)
return PredicateFilter('axis', node_test, value=node_value,
negate=negate)
raise NotImplementedError("This predicate is not implemented")
def apply(self, nodes):
"""Apply the step to a list of nodes. Return the list of nodes for the
next step.
"""
if self.step == '.':
return nodes
elif self.step == '..':
return [node.parent for node in nodes]
result = []
for node in nodes:
if self.axis == AXIS_CHILD:
found = node.findAll(recursive=False, **self.soup_args)
elif self.axis == AXIS_DESCENDANT:
found = node.findAll(recursive=True, **self.soup_args)
elif self.axis == AXIS_ATTRIBUTE:
try:
found = [node[self.node_test]]
except KeyError:
found = []
elif self.axis == AXIS_FOLLOWING_SIBLING:
found = node.findNextSiblings(**self.soup_args)
elif self.axis == AXIS_PRECEDING_SIBLING:
# TODO: make sure that the result is reverse ordered
found = node.findPreviousSiblings(**self.soup_args)
elif self.axis == AXIS_FOLLOWING:
# find the last descendant of this node
last = node
while (not isinstance(last, BeautifulSoup.NavigableString)) \
and (len(last.contents) > 0):
last = last.contents[-1]
found = last.findAllNext(**self.soup_args)
elif self.axis == AXIS_ANCESTOR:
found = node.findParents(**self.soup_args)
# this should only be active if there is a position predicate
# and the axis is not 'descendant'
if self.index is not None:
if found:
if len(found) > self.index:
found = [found[self.index]]
else:
found = []
if found:
for checker in self.checkers:
found = filter(checker, found)
result.extend(found)
return result
class PredicateFilter:
"""A callable class for filtering nodes.
"""
def __init__(self, name, arguments=None, value=None, negate=False):
self.name = name
self.arguments = arguments
self.negate = negate
if name == 'position':
self.__filter = self.__position
self.value = value
elif name == 'axis':
self.__filter = self.__axis
self.node_test = arguments
self.value = value
elif name in ('starts-with', 'contains'):
if name == 'starts-with':
self.__filter = self.__starts_with
else:
self.__filter = self.__contains
args = map(string.strip, arguments.split(','))
if args[0][0] == '@':
self.arguments = (True, args[0][1:], args[1][1:-1])
else:
self.arguments = (False, args[0], args[1][1:-1])
elif name == 'string-length':
self.__filter = self.__string_length
args = map(string.strip, arguments.split(','))
if args[0][0] == '@':
self.arguments = (True, args[0][1:])
else:
self.arguments = (False, args[0])
self.value = int(value)
else:
raise NotImplementedError("This XPath function is not implemented")
def __call__(self, node):
if self.negate:
return not self.__filter(node)
else:
return self.__filter(node)
def __position(self, node):
if isinstance(node, BeautifulSoup.NavigableString):
actual_position = len(node.findPreviousSiblings(text=True)) + 1
else:
actual_position = len(node.findPreviousSiblings(node.name)) + 1
return actual_position == self.value
def __axis(self, node):
if self.node_test == 'text()':
return node.string == self.value
else:
children = node.findAll(self.node_test, recursive=False)
if len(children) > 0 and self.value is None:
return True
for child in children:
if child.string == self.value:
return True
return False
def __starts_with(self, node):
if self.arguments[0]:
# this is an attribute
attribute_name = self.arguments[1]
if node.has_key(attribute_name):
first = node[attribute_name]
return first.startswith(self.arguments[2])
elif self.arguments[1] == 'text()':
first = node.contents and node.contents[0]
if isinstance(first, BeautifulSoup.NavigableString):
return first.startswith(self.arguments[2])
return False
def __contains(self, node):
if self.arguments[0]:
# this is an attribute
attribute_name = self.arguments[1]
if node.has_key(attribute_name):
first = node[attribute_name]
return self.arguments[2] in first
elif self.arguments[1] == 'text()':
first = node.contents and node.contents[0]
if isinstance(first, BeautifulSoup.NavigableString):
return self.arguments[2] in first
return False
def __string_length(self, node):
if self.arguments[0]:
# this is an attribute
attribute_name = self.arguments[1]
if node.has_key(attribute_name):
value = node[attribute_name]
else:
value = None
elif self.arguments[1] == 'text()':
value = node.string
if value is not None:
return len(value) == self.value
return False
_paths = {}
_steps = {}
def get_path(path):
"""Utility for eliminating repeated parsings of the same paths and steps.
"""
if not _paths.has_key(path):
p = Path(path, parse=False)
steps = tokenize_path(path)
for step in steps:
if not _steps.has_key(step):
_steps[step] = PathStep(step)
p.steps.append(_steps[step])
_paths[path] = p
return _paths[path]
|
gpl-3.0
|
briancoutinho0905/2dsampling
|
src/mem/slicc/ast/StateDeclAST.py
|
32
|
3307
|
# Copyright (c) 2011 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.DeclAST import DeclAST
from slicc.symbols import Func, Type
class StateDeclAST(DeclAST):
def __init__(self, slicc, type_ast, pairs, states):
super(StateDeclAST, self).__init__(slicc, pairs)
self.type_ast = type_ast
self.states = states
def __repr__(self):
return "[StateDecl: %s]" % (self.type_ast)
def files(self, parent=None):
if "external" in self:
return set()
if parent:
ident = "%s_%s" % (parent, self.type_ast.ident)
else:
ident = self.type_ast.ident
s = set(("%s.hh" % ident, "%s.cc" % ident))
return s
def generate(self):
ident = str(self.type_ast)
# Make the new type
t = Type(self.symtab, ident, self.location, self.pairs,
self.state_machine)
self.symtab.newSymbol(t)
# Add all of the states of the type to it
for state in self.states:
state.generate(t)
# Add the implicit State_to_string method - FIXME, this is a bit dirty
func_id = "%s_to_string" % t.c_ident
pairs = { "external" : "yes" }
func = Func(self.symtab, func_id + "_" +
t.ident, func_id, self.location,
self.symtab.find("std::string", Type), [ t ], [], "",
pairs)
self.symtab.newSymbol(func)
# Add the State_to_permission method
func_id = "%s_to_permission" % t.c_ident
pairs = { "external" : "yes" }
func = Func(self.symtab, func_id + "_" +
t.ident, func_id, self.location,
self.symtab.find("AccessPermission", Type), [ t ], [], "",
pairs)
self.symtab.newSymbol(func)
|
bsd-3-clause
|
bumshakabum/brint
|
budget/flask/lib/python3.6/site-packages/pip/_vendor/lockfile/linklockfile.py
|
536
|
2652
|
from __future__ import absolute_import
import time
import os
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class LinkLockFile(LockBase):
"""Lock access to a file using atomic property of link(2).
>>> lock = LinkLockFile('somefile')
>>> lock = LinkLockFile('somefile', threaded=False)
"""
def acquire(self, timeout=None):
try:
open(self.unique_name, "wb").close()
except IOError:
raise LockFailed("failed to create %s" % self.unique_name)
timeout = timeout if timeout is not None else self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
# Try and create a hard link to it.
try:
os.link(self.unique_name, self.lock_file)
except OSError:
# Link creation failed. Maybe we've double-locked?
nlinks = os.stat(self.unique_name).st_nlink
if nlinks == 2:
# The original link plus the one I created == 2. We're
# good to go.
return
else:
# Otherwise the lock creation failed.
if timeout is not None and time.time() > end_time:
os.unlink(self.unique_name)
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout is not None and timeout / 10 or 0.1)
else:
# Link creation succeeded. We're good to go.
return
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
elif not os.path.exists(self.unique_name):
raise NotMyLock("%s is locked, but not by me" % self.path)
os.unlink(self.unique_name)
os.unlink(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name) and
os.stat(self.unique_name).st_nlink == 2)
def break_lock(self):
if os.path.exists(self.lock_file):
os.unlink(self.lock_file)
|
gpl-3.0
|
NorfolkDataSci/presentations
|
2018-01_chatbot/serverless-chatbots-workshop-master/LambdaFunctions/sentiment-analysis/nltk/classify/weka.py
|
7
|
12625
|
# Natural Language Toolkit: Interface to Weka Classsifiers
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Edward Loper <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Classifiers that make use of the external 'Weka' package.
"""
from __future__ import print_function
import time
import tempfile
import os
import subprocess
import re
import zipfile
from sys import stdin
from nltk import compat
from nltk.probability import DictionaryProbDist
from nltk.internals import java, config_java
from nltk.classify.api import ClassifierI
_weka_classpath = None
_weka_search = ['.',
'/usr/share/weka',
'/usr/local/share/weka',
'/usr/lib/weka',
'/usr/local/lib/weka',]
def config_weka(classpath=None):
global _weka_classpath
# Make sure java's configured first.
config_java()
if classpath is not None:
_weka_classpath = classpath
if _weka_classpath is None:
searchpath = _weka_search
if 'WEKAHOME' in os.environ:
searchpath.insert(0, os.environ['WEKAHOME'])
for path in searchpath:
if os.path.exists(os.path.join(path, 'weka.jar')):
_weka_classpath = os.path.join(path, 'weka.jar')
version = _check_weka_version(_weka_classpath)
if version:
print(('[Found Weka: %s (version %s)]' %
(_weka_classpath, version)))
else:
print('[Found Weka: %s]' % _weka_classpath)
_check_weka_version(_weka_classpath)
if _weka_classpath is None:
raise LookupError('Unable to find weka.jar! Use config_weka() '
'or set the WEKAHOME environment variable. '
'For more information about Weka, please see '
'http://www.cs.waikato.ac.nz/ml/weka/')
def _check_weka_version(jar):
try:
zf = zipfile.ZipFile(jar)
except (SystemExit, KeyboardInterrupt):
raise
except:
return None
try:
try:
return zf.read('weka/core/version.txt')
except KeyError:
return None
finally:
zf.close()
class WekaClassifier(ClassifierI):
def __init__(self, formatter, model_filename):
self._formatter = formatter
self._model = model_filename
def prob_classify_many(self, featuresets):
return self._classify_many(featuresets, ['-p', '0', '-distribution'])
def classify_many(self, featuresets):
return self._classify_many(featuresets, ['-p', '0'])
def _classify_many(self, featuresets, options):
# Make sure we can find java & weka.
config_weka()
temp_dir = tempfile.mkdtemp()
try:
# Write the test data file.
test_filename = os.path.join(temp_dir, 'test.arff')
self._formatter.write(test_filename, featuresets)
# Call weka to classify the data.
cmd = ['weka.classifiers.bayes.NaiveBayes',
'-l', self._model, '-T', test_filename] + options
(stdout, stderr) = java(cmd, classpath=_weka_classpath,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Check if something went wrong:
if stderr and not stdout:
if 'Illegal options: -distribution' in stderr:
raise ValueError('The installed version of weka does '
'not support probability distribution '
'output.')
else:
raise ValueError('Weka failed to generate output:\n%s'
% stderr)
# Parse weka's output.
return self.parse_weka_output(stdout.decode(stdin.encoding).split('\n'))
finally:
for f in os.listdir(temp_dir):
os.remove(os.path.join(temp_dir, f))
os.rmdir(temp_dir)
def parse_weka_distribution(self, s):
probs = [float(v) for v in re.split('[*,]+', s) if v.strip()]
probs = dict(zip(self._formatter.labels(), probs))
return DictionaryProbDist(probs)
def parse_weka_output(self, lines):
# Strip unwanted text from stdout
for i,line in enumerate(lines):
if line.strip().startswith("inst#"):
lines = lines[i:]
break
if lines[0].split() == ['inst#', 'actual', 'predicted',
'error', 'prediction']:
return [line.split()[2].split(':')[1]
for line in lines[1:] if line.strip()]
elif lines[0].split() == ['inst#', 'actual', 'predicted',
'error', 'distribution']:
return [self.parse_weka_distribution(line.split()[-1])
for line in lines[1:] if line.strip()]
# is this safe:?
elif re.match(r'^0 \w+ [01]\.[0-9]* \?\s*$', lines[0]):
return [line.split()[1] for line in lines if line.strip()]
else:
for line in lines[:10]:
print(line)
raise ValueError('Unhandled output format -- your version '
'of weka may not be supported.\n'
' Header: %s' % lines[0])
# [xx] full list of classifiers (some may be abstract?):
# ADTree, AODE, BayesNet, ComplementNaiveBayes, ConjunctiveRule,
# DecisionStump, DecisionTable, HyperPipes, IB1, IBk, Id3, J48,
# JRip, KStar, LBR, LeastMedSq, LinearRegression, LMT, Logistic,
# LogisticBase, M5Base, MultilayerPerceptron,
# MultipleClassifiersCombiner, NaiveBayes, NaiveBayesMultinomial,
# NaiveBayesSimple, NBTree, NNge, OneR, PaceRegression, PART,
# PreConstructedLinearModel, Prism, RandomForest,
# RandomizableClassifier, RandomTree, RBFNetwork, REPTree, Ridor,
# RuleNode, SimpleLinearRegression, SimpleLogistic,
# SingleClassifierEnhancer, SMO, SMOreg, UserClassifier, VFI,
# VotedPerceptron, Winnow, ZeroR
_CLASSIFIER_CLASS = {
'naivebayes': 'weka.classifiers.bayes.NaiveBayes',
'C4.5': 'weka.classifiers.trees.J48',
'log_regression': 'weka.classifiers.functions.Logistic',
'svm': 'weka.classifiers.functions.SMO',
'kstar': 'weka.classifiers.lazy.KStar',
'ripper': 'weka.classifiers.rules.JRip',
}
@classmethod
def train(cls, model_filename, featuresets,
classifier='naivebayes', options=[], quiet=True):
# Make sure we can find java & weka.
config_weka()
# Build an ARFF formatter.
formatter = ARFF_Formatter.from_train(featuresets)
temp_dir = tempfile.mkdtemp()
try:
# Write the training data file.
train_filename = os.path.join(temp_dir, 'train.arff')
formatter.write(train_filename, featuresets)
if classifier in cls._CLASSIFIER_CLASS:
javaclass = cls._CLASSIFIER_CLASS[classifier]
elif classifier in cls._CLASSIFIER_CLASS.values():
javaclass = classifier
else:
raise ValueError('Unknown classifier %s' % classifier)
# Train the weka model.
cmd = [javaclass, '-d', model_filename, '-t', train_filename]
cmd += list(options)
if quiet:
stdout = subprocess.PIPE
else: stdout = None
java(cmd, classpath=_weka_classpath, stdout=stdout)
# Return the new classifier.
return WekaClassifier(formatter, model_filename)
finally:
for f in os.listdir(temp_dir):
os.remove(os.path.join(temp_dir, f))
os.rmdir(temp_dir)
class ARFF_Formatter:
"""
Converts featuresets and labeled featuresets to ARFF-formatted
strings, appropriate for input into Weka.
Features and classes can be specified manually in the constructor, or may
be determined from data using ``from_train``.
"""
def __init__(self, labels, features):
"""
:param labels: A list of all class labels that can be generated.
:param features: A list of feature specifications, where
each feature specification is a tuple (fname, ftype);
and ftype is an ARFF type string such as NUMERIC or
STRING.
"""
self._labels = labels
self._features = features
def format(self, tokens):
"""Returns a string representation of ARFF output for the given data."""
return self.header_section() + self.data_section(tokens)
def labels(self):
"""Returns the list of classes."""
return list(self._labels)
def write(self, outfile, tokens):
"""Writes ARFF data to a file for the given data."""
if not hasattr(outfile, 'write'):
outfile = open(outfile, 'w')
outfile.write(self.format(tokens))
outfile.close()
@staticmethod
def from_train(tokens):
"""
Constructs an ARFF_Formatter instance with class labels and feature
types determined from the given data. Handles boolean, numeric and
string (note: not nominal) types.
"""
# Find the set of all attested labels.
labels = set(label for (tok, label) in tokens)
# Determine the types of all features.
features = {}
for tok, label in tokens:
for (fname, fval) in tok.items():
if issubclass(type(fval), bool):
ftype = '{True, False}'
elif issubclass(type(fval), (compat.integer_types, float, bool)):
ftype = 'NUMERIC'
elif issubclass(type(fval), compat.string_types):
ftype = 'STRING'
elif fval is None:
continue # can't tell the type.
else:
raise ValueError('Unsupported value type %r' % ftype)
if features.get(fname, ftype) != ftype:
raise ValueError('Inconsistent type for %s' % fname)
features[fname] = ftype
features = sorted(features.items())
return ARFF_Formatter(labels, features)
def header_section(self):
"""Returns an ARFF header as a string."""
# Header comment.
s = ('% Weka ARFF file\n' +
'% Generated automatically by NLTK\n' +
'%% %s\n\n' % time.ctime())
# Relation name
s += '@RELATION rel\n\n'
# Input attribute specifications
for fname, ftype in self._features:
s += '@ATTRIBUTE %-30r %s\n' % (fname, ftype)
# Label attribute specification
s += '@ATTRIBUTE %-30r {%s}\n' % ('-label-', ','.join(self._labels))
return s
def data_section(self, tokens, labeled=None):
"""
Returns the ARFF data section for the given data.
:param tokens: a list of featuresets (dicts) or labelled featuresets
which are tuples (featureset, label).
:param labeled: Indicates whether the given tokens are labeled
or not. If None, then the tokens will be assumed to be
labeled if the first token's value is a tuple or list.
"""
# Check if the tokens are labeled or unlabeled. If unlabeled,
# then use 'None'
if labeled is None:
labeled = tokens and isinstance(tokens[0], (tuple, list))
if not labeled:
tokens = [(tok, None) for tok in tokens]
# Data section
s = '\n@DATA\n'
for (tok, label) in tokens:
for fname, ftype in self._features:
s += '%s,' % self._fmt_arff_val(tok.get(fname))
s += '%s\n' % self._fmt_arff_val(label)
return s
def _fmt_arff_val(self, fval):
if fval is None:
return '?'
elif isinstance(fval, (bool, compat.integer_types)):
return '%s' % fval
elif isinstance(fval, float):
return '%r' % fval
else:
return '%r' % fval
if __name__ == '__main__':
from nltk.classify.util import names_demo, binary_names_demo_features
def make_classifier(featuresets):
return WekaClassifier.train('/tmp/name.model', featuresets,
'C4.5')
classifier = names_demo(make_classifier, binary_names_demo_features)
|
mit
|
ygsun/async
|
concurrent_thread_taomm.py
|
1
|
10435
|
#!/usr/bin/env python3
"""
Python version: > 2.5
Dependence: requests BeautifulSoup
线程版本
爬虫类
从淘女郎网站(https://mm.taobao.com)获取图片链接并下载,按照地区、相册名、姓名分类
"""
import contextlib
import threading
import os
import re
import requests
import time
import json
import argparse
import logging
from bs4 import BeautifulSoup
# 第一页
FIRST_PAGE = 1
# 淘女郎列表页面
user_list = 'https://mm.taobao.com/json/request_top_list.htm?page={}'
# 淘女郎信息页
user_info = 'https://mm.taobao.com/self/info/model_info_show.htm?user_id={}'
# 淘女郎相册列表页面
album_list = 'https://mm.taobao.com/self/album/open_album_list.htm?user_id={}&page={}'
# 淘女郎相册json
photo_list = 'https://mm.taobao.com/album/json/get_album_photo_list.htm?user_id={}&album_id={}&page={}'
def cli():
# setting argparser
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--user', type=int, default=1, help='Max user page to fetch.')
parser.add_argument('-a', '--album', type=int, default=1, help='Max album page to fetch.')
parser.add_argument('-p', '--photo', type=int, default=1, help='Max photo page to fetch.')
parser.add_argument('-d', '--download', action='store_true', default=False, help='Download photos from url.')
parser.add_argument('-l', '--loglevel', default='INFO', help='Loglevel [DEBUG | INFO | ERROR]. Default: NOTSET')
args = parser.parse_args()
# setting logging configuration
numeric_level = getattr(logging, args.loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % args.loglevel)
logging.basicConfig(style='{', format='{asctime} {levelname} {funcName} {msg}', level=numeric_level)
return args
class Parser(threading.Thread):
def __init__(self, func, *args, **kwargs):
super(Parser, self).__init__()
self._func = func
self._args = args
self._kwargs = kwargs
self._result = []
def run(self):
self._result = self._func(*self._args, **self._kwargs)
def result(self):
return self._result
def __repr__(self):
return '<Parser({})>'.format(threading.get_ident())
class Photo(threading.Thread):
g_count = 0
def __init__(self, id, url, album_name, user_name, location, session):
super(Photo, self).__init__()
self._id = id
self._url = 'https:' + url
self._user_name = user_name
self._album_name = album_name
self._location = location
self._session = session
self._path = os.path.join(os.getcwd(), 'taomm', self._location, self._user_name, self._album_name)
os.makedirs(self._path, exist_ok=True)
def run(self):
if args.download:
image = self.fetch(self._url)
self.save(image)
logging.debug(self)
Photo.g_count += 1
def fetch(self, url):
r = self._session.get(url)
return r.content
def save(self, image):
path = self._path + '\\' + self._id + '.jpg'
with open(path, 'wb') as f:
f.write(image)
def __repr__(self):
return '<Photo(id={} url={})>'.format(self._id, self._url)
class Album(threading.Thread):
def __init__(self, id, name, user_id, user_name, location, *, session):
super(Album, self).__init__()
self._id = id
self._user_id = user_id
self._name = name
self._user_name = user_name
self._location = location
self._photos = []
self._session = session
def get_page_nums(self):
# get users list page nums
photo_list_url = photo_list.format(self._user_id, self._id, FIRST_PAGE)
resp = self.fetch(photo_list_url)
return self.parse_page_nums(resp)
def get_photo_by_page(self, page):
photo_list_url = photo_list.format(self._user_id, self._id, page)
resp = self.fetch(photo_list_url)
return self.parse_photo_url(resp)
def fetch(self, url):
r = self._session.get(url)
return r.text
@staticmethod
def parse_page_nums(resp):
json_data = json.loads(resp)
pages = int(json_data['totalPage'])
return pages
def parse_photo_url(self, resp):
json_data = json.loads(resp)
photos = json_data['picList']
photo_items = []
for photo in photos:
photo = Photo(photo['picId'],
photo['picUrl'],
self._name,
self._user_name,
self._location,
session=self._session)
photo_items.append(photo)
return photo_items
def get_photos(self):
# 获取照片页面数
pages = self.get_page_nums()
for page in range(min(args.photo, pages)):
photo_items = self.get_photo_by_page(page + 1)
for photo in photo_items:
self._photos.append(photo)
photo.start()
def run(self):
# 获取照片列表
self.get_photos()
logging.debug(self)
# 等待照片保存任务完成
for photo in self._photos:
photo.join()
def __repr__(self):
return '<Album(id={} name={} user={})>'.format(self._id, self._name, self._user_name)
class User(threading.Thread):
def __init__(self, id, *, session):
super(User, self).__init__()
self._id = id
self._name = ''
self._location = ''
self._albums = []
self._session = session
def get_page_nums(self):
# get users list page nums
album_list_url = album_list.format(self._id, FIRST_PAGE)
resp = self.fetch(album_list_url)
return self.parse_page_nums(resp)
def get_album_by_page(self, page):
album_list_url = album_list.format(self._id, page)
resp = self.fetch(album_list_url)
return self.parse_album_id(resp)
def fetch(self, url):
r = self._session.get(url)
return r.text
@staticmethod
def parse_page_nums(resp):
soup = BeautifulSoup(resp, 'html.parser')
pages = int(soup.find('input', id='J_Totalpage').get('value', 0))
return pages
def parse_user_info(self, resp):
soup = BeautifulSoup(resp, 'html.parser')
self._name = soup.find('ul', class_='mm-p-info-cell clearfix').li.span.text
self._location = soup.find('li', class_='mm-p-cell-right').span.text
def parse_album_id(self, resp):
soup = BeautifulSoup(resp, 'html.parser')
pattern = re.compile(r'album_id=(\d+)')
album_items = []
tags = soup.select('h4 a')
for tag in tags:
match = pattern.search(tag['href'])
if match:
album_id = match.group(1)
album_name = tag.text.strip().replace('.', '').strip()
album = Album(album_id,
album_name,
self._id,
self._name,
self._location,
session=self._session)
album_items.append(album)
return album_items
def get_info(self):
user_info_url = user_info.format(self._id)
resp = self.fetch(user_info_url)
self.parse_user_info(resp)
def get_albums(self):
# 获取相册页面数
pages = self.get_page_nums()
for page in range(min(args.album, pages)):
album_items = self.get_album_by_page(page + 1)
for album in album_items:
self._albums.append(album)
album.start()
def run(self):
# 获取用户信息
self.get_info()
logging.debug(self)
# 获取相册列表
self.get_albums()
# 等待相册任务完成
for album in self._albums:
album.join()
def __repr__(self):
return '<User(id={} name={})>'.format(self._id, self._name)
class Manager(threading.Thread):
def __init__(self):
super(Manager, self).__init__()
self._users = []
self._session = requests.Session()
def get_user_pages(self):
# 第一页的用户页面URL
user_list_url = user_list.format(FIRST_PAGE)
# 获取页面内容并返回页数
resp = self.fetch(user_list_url)
return self.parse_page_nums(resp)
def get_user_by_page(self, page):
# 第N页的用户页面URL
user_list_url = user_list.format(page)
# 获取页面内容并返回页数
resp = self.fetch(user_list_url)
return self.parse_user_id(resp)
def get_users(self):
# 获取用户页数
pages = self.get_user_pages()
for page in range(min(args.user, pages)):
user_items = self.get_user_by_page(page + 1)
for user in user_items:
self._users.append(user)
user.start()
# 获取页面内容
def fetch(self, url):
r = self._session.get(url)
return r.text
@staticmethod
def parse_page_nums(content):
soup = BeautifulSoup(content, 'html.parser')
pages = int(soup.find('input', id='J_Totalpage').get('value', 0))
return pages
def parse_user_id(self, content):
soup = BeautifulSoup(content, 'html.parser')
user_items = []
for item in soup.find_all('span', class_='friend-follow J_FriendFollow'):
user = User(item['data-userid'], session=self._session)
user_items.append(user)
return user_items
def run(self):
# 等待获取用户ID
self.get_users()
logging.debug(self)
# 等待用户任务完成
for user in self._users:
user.join()
# 关闭session
self._session.close()
def __repr__(self):
return '<Manager(users_num={})>'.format(len(self._users))
@contextlib.contextmanager
def timer():
start = time.time()
yield
logging.info('run in {:.1f} seconds'.format(time.time() - start))
if __name__ == '__main__':
args = cli()
with timer():
manager = Manager()
manager.start()
manager.join()
logging.info('{} photos fetched.'.format(Photo.g_count))
|
bsd-3-clause
|
kdwink/intellij-community
|
python/testData/MockSdk3.2/Lib/io.py
|
113
|
3624
|
"""The io module provides the Python interfaces to stream handling. The
builtin open function is defined in this module.
At the top of the I/O hierarchy is the abstract base class IOBase. It
defines the basic interface to a stream. Note, however, that there is no
separation between reading and writing to streams; implementations are
allowed to throw an IOError if they do not support a given operation.
Extending IOBase is RawIOBase which deals simply with the reading and
writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide
an interface to OS files.
BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its
subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer
streams that are readable, writable, and both respectively.
BufferedRandom provides a buffered interface to random access
streams. BytesIO is a simple stream of in-memory bytes.
Another IOBase subclass, TextIOBase, deals with the encoding and decoding
of streams into text. TextIOWrapper, which extends it, is a buffered text
interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO
is a in-memory stream for text.
Argument names are not part of the specification, and only the arguments
of open() are intended to be used as keyword arguments.
data:
DEFAULT_BUFFER_SIZE
An int containing the default buffer size used by the module's buffered
I/O classes. open() uses the file's blksize (as obtained by os.stat) if
possible.
"""
# New I/O library conforming to PEP 3116.
# XXX edge cases when switching between reading/writing
# XXX need to support 1 meaning line-buffered
# XXX whenever an argument is None, use the default value
# XXX read/write ops should check readable/writable
# XXX buffered readinto should work with arbitrary buffer objects
# XXX use incremental encoder for text output, at least for UTF-16 and UTF-8-SIG
# XXX check writable, readable and seekable in appropriate places
__author__ = ("Guido van Rossum <[email protected]>, "
"Mike Verdone <[email protected]>, "
"Mark Russell <[email protected]>, "
"Antoine Pitrou <[email protected]>, "
"Amaury Forgeot d'Arc <[email protected]>, "
"Benjamin Peterson <[email protected]>")
__all__ = ["BlockingIOError", "open", "IOBase", "RawIOBase", "FileIO",
"BytesIO", "StringIO", "BufferedIOBase",
"BufferedReader", "BufferedWriter", "BufferedRWPair",
"BufferedRandom", "TextIOBase", "TextIOWrapper",
"UnsupportedOperation", "SEEK_SET", "SEEK_CUR", "SEEK_END"]
import _io
import abc
from _io import (DEFAULT_BUFFER_SIZE, BlockingIOError, UnsupportedOperation,
open, FileIO, BytesIO, StringIO, BufferedReader,
BufferedWriter, BufferedRWPair, BufferedRandom,
IncrementalNewlineDecoder, TextIOWrapper)
OpenWrapper = _io.open # for compatibility with _pyio
# for seek()
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
# Declaring ABCs in C is tricky so we do it here.
# Method descriptions and default implementations are inherited from the C
# version however.
class IOBase(_io._IOBase, metaclass=abc.ABCMeta):
pass
class RawIOBase(_io._RawIOBase, IOBase):
pass
class BufferedIOBase(_io._BufferedIOBase, IOBase):
pass
class TextIOBase(_io._TextIOBase, IOBase):
pass
RawIOBase.register(FileIO)
for klass in (BytesIO, BufferedReader, BufferedWriter, BufferedRandom,
BufferedRWPair):
BufferedIOBase.register(klass)
for klass in (StringIO, TextIOWrapper):
TextIOBase.register(klass)
del klass
|
apache-2.0
|
paradise6/DetectMaliciousURL
|
model/eval.py
|
1
|
5331
|
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import data_helper
import word2vec_helpers
from URLCNN import URLCNN
import csv
# Parameters
# ==================================================
# Data Parameters
tf.flags.DEFINE_string("input_text_file", "../data/data2.csv", "Test text data source to evaluate.")
# tf.flags.DEFINE_string("input_label_file", "", "Label file for test text data source.")
tf.flags.DEFINE_string("single_url",None,"single url to evaluate")
# Eval Parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_string("checkpoint_dir", "./runs/1494174954/checkpoints/", "Checkpoint directory from training run")
tf.flags.DEFINE_boolean("eval_train", True, "Evaluate on all training data")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
# validate
# ==================================================
# validate checkout point file
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
if checkpoint_file is None:
print("Cannot find a valid checkpoint file!")
exit(0)
print("Using checkpoint file : {}".format(checkpoint_file))
# validate word2vec model file
trained_word2vec_model_file = os.path.join(FLAGS.checkpoint_dir, "..", "trained_word2vec.model")
if not os.path.exists(trained_word2vec_model_file):
print("Word2vec model file \'{}\' doesn't exist!".format(trained_word2vec_model_file))
print("Using word2vec model file : {}".format(trained_word2vec_model_file))
# validate training params file
training_params_file = os.path.join(FLAGS.checkpoint_dir, "..", "training_params.pickle")
if not os.path.exists(training_params_file):
print("Training params file \'{}\' is missing!".format(training_params_file))
print("Using training params file : {}".format(training_params_file))
# Load params
params = data_helper.loadDict(training_params_file)
num_labels = int(params['num_labels'])
max_document_length = int(params['max_document_length'])
# Load data
if FLAGS.eval_train and FLAGS.single_url is None:
x_raw, y_test = data_helper.load_data_and_labels(FLAGS.input_text_file)
elif FLAGS.single_url is not None:
x_raw = [FLAGS.single_url]
y_test=None
else:
x_raw = ["a masterpiece four years in the making", "everything is off."]
y_test = [1, 0]
# Get Embedding vector x_test
sentences, max_document_length = data_helper.padding_sentences(x_raw, '<PADDING>', padding_sentence_length = max_document_length)
x_test = np.array(word2vec_helpers.embedding_sentences(sentences, file_to_load = trained_word2vec_model_file))
print("x_test.shape = {}".format(x_test.shape))
# Evaluation
# ==================================================
print("\nEvaluating...\n")
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x = graph.get_operation_by_name("input_x").outputs[0]
# input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# Tensors we want to evaluate
predictions = graph.get_operation_by_name("output/predictions").outputs[0]
# Generate batches for one epoch
batches = data_helper.batch_iter(list(x_test), FLAGS.batch_size, 1, shuffle=False)
# Collect the predictions here
all_predictions = []
for x_test_batch in batches:
batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0})
all_predictions = np.concatenate([all_predictions, batch_predictions])
# Print accuracy if y_test is defined
if y_test is not None:
correct_predictions=0
# correct_predictions = float(sum((all_predictions == y_test)))
for i in range(0,len(all_predictions)):
if all_predictions[i]==y_test[i][1]:
correct_predictions=correct_predictions+1
correct_predictions=float(correct_predictions)
print("Total number of test examples: {}".format(len(y_test)))
print("Accuracy: {:g}".format(correct_predictions/float(len(y_test))))
if FLAGS.single_url is not None:
print sentences
print "Result:", all_predictions
else:
# Save the evaluation to a csv
predictions_human_readable = np.column_stack((np.array([text for text in x_raw]), all_predictions))
out_path = os.path.join(FLAGS.checkpoint_dir, "..", "prediction.csv")
print("Saving evaluation to {0}".format(out_path))
with open(out_path, 'w') as f:
csv.writer(f).writerows(predictions_human_readable)
|
apache-2.0
|
bitsgalore/omSipCreator
|
omSipCreator/checksums.py
|
2
|
1412
|
#! /usr/bin/env python
"""
Checksum reading and generation
"""
import os
import logging
import hashlib
from . import config
from .shared import errorExit
def readChecksums(fileIn):
"""Read checksum file, return contents as nested list
Also strip away any file paths if they exist (return names only)
"""
try:
data = []
f = open(fileIn, "r", encoding="utf-8")
for row in f:
rowSplit = row.split(' ', 1)
# Second col contains file name. Strip away any path components if they are present
# Raises IndexError if entry only 1 col (malformed checksum file)!
fileName = rowSplit[1].strip()
rowSplit[1] = os.path.basename(fileName)
data.append(rowSplit)
f.close()
return data
except IOError:
logging.fatal("cannot read '" + fileIn + "'")
config.errors += 1
errorExit(config.errors, config.warnings)
def generate_file_sha512(fileIn):
"""Generate sha512 hash of file
fileIn is read in chunks to ensure it will work with (very) large files as well
Adapted from: http://stackoverflow.com/a/1131255/1209004
"""
blocksize = 2**20
m = hashlib.sha512()
with open(fileIn, "rb") as f:
while True:
buf = f.read(blocksize)
if not buf:
break
m.update(buf)
return m.hexdigest()
|
apache-2.0
|
toumorokoshi/miura
|
miura/script.py
|
1
|
2256
|
import os
from miura import runner
from .utils import get_method_from_module, format_path_to_module
from .data import load_data_from_path, filter_data
from .template import TemplateSet
from .exceptions import MiuraException
import logging
DEFAULT_DATA_DIRECTORY = os.path.join(os.curdir, 'data')
DEFAULT_TEMPLATE_DIRECTORY = os.path.join(os.curdir, 'templates')
DEFAULT_SCRIPTS_DIRECTORY = os.path.join(os.curdir, 'scripts')
LOGGER = logging.getLogger(__name__)
class MiuraScript(object):
delete = False # if true, delete the jobs instead of upserting them
dry_run = False # if true, the jobs will not run
print_dir = None # if set, will print configs to the directory passed
def __init__(self, script_name,
data_directory,
scripts_directory,
template_directory,
data_filters=None
):
self.script_name = script_name
self.data_directory = data_directory
self.scripts_directory = scripts_directory
self.template_directory = template_directory
self.data_filters = data_filters or {}
self.method_options = {}
def __call__(self):
target_module = "{0}.{1}".format(
format_path_to_module(self.scripts_directory),
self.script_name
)
try:
run_method = get_method_from_module(target_module, 'run')
except ImportError:
raise MiuraException("Unable to find script {0}".format(target_module))
data = load_data_from_path(self.data_directory)
if self.data_filters:
filter_data(data, self.data_filters)
templates = TemplateSet(self.template_directory)
if self.delete:
target_method = 'delete'
else:
target_method = 'upsert'
if self.dry_run:
LOGGER.info("Performing a Dry Run! No Jobs Are Being Created")
target_method = 'dry_run'
job_parser = runner.JobParser(
data,
templates,
)
for job in job_parser.parse_job(run_method, self.method_options):
if self.print_dir:
job.print_job(self.print_dir)
getattr(job, target_method)()
|
mit
|
nkgilley/home-assistant
|
homeassistant/components/tado/config_flow.py
|
6
|
5269
|
"""Config flow for Tado integration."""
import logging
from PyTado.interface import Tado
import requests.exceptions
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import callback
from .const import CONF_FALLBACK, UNIQUE_ID
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema(
{vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str}
)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
try:
tado = await hass.async_add_executor_job(
Tado, data[CONF_USERNAME], data[CONF_PASSWORD]
)
tado_me = await hass.async_add_executor_job(tado.getMe)
except KeyError:
raise InvalidAuth
except RuntimeError:
raise CannotConnect
except requests.exceptions.HTTPError as ex:
if ex.response.status_code > 400 and ex.response.status_code < 500:
raise InvalidAuth
raise CannotConnect
if "homes" not in tado_me or len(tado_me["homes"]) == 0:
raise NoHomes
home = tado_me["homes"][0]
unique_id = str(home["id"])
name = home["name"]
return {"title": name, UNIQUE_ID: unique_id}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Tado."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
try:
validated = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except NoHomes:
errors["base"] = "no_homes"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
if "base" not in errors:
await self.async_set_unique_id(validated[UNIQUE_ID])
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=validated["title"], data=user_input
)
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
async def async_step_homekit(self, homekit_info):
"""Handle HomeKit discovery."""
if self._async_current_entries():
# We can see tado on the network to tell them to configure
# it, but since the device will not give up the account it is
# bound to and there can be multiple tado devices on a single
# account, we avoid showing the device as discovered once
# they already have one configured as they can always
# add a new one via "+"
return self.async_abort(reason="already_configured")
properties = {
key.lower(): value for (key, value) in homekit_info["properties"].items()
}
await self.async_set_unique_id(properties["id"])
return await self.async_step_user()
async def async_step_import(self, user_input):
"""Handle import."""
if self._username_already_configured(user_input):
return self.async_abort(reason="already_configured")
return await self.async_step_user(user_input)
def _username_already_configured(self, user_input):
"""See if we already have a username matching user input configured."""
existing_username = {
entry.data[CONF_USERNAME] for entry in self._async_current_entries()
}
return user_input[CONF_USERNAME] in existing_username
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for tado."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
data_schema = vol.Schema(
{
vol.Required(
CONF_FALLBACK, default=self.config_entry.options.get(CONF_FALLBACK)
): bool,
}
)
return self.async_show_form(step_id="init", data_schema=data_schema)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
class NoHomes(exceptions.HomeAssistantError):
"""Error to indicate the account has no homes."""
|
apache-2.0
|
sanghinitin/golismero
|
thirdparty_libs/django/utils/html_parser.py
|
231
|
4546
|
from django.utils.six.moves import html_parser as _html_parser
import re
import sys
current_version = sys.version_info
use_workaround = (
(current_version < (2, 7, 3)) or
(current_version >= (3, 0) and current_version < (3, 2, 3))
)
HTMLParseError = _html_parser.HTMLParseError
if not use_workaround:
HTMLParser = _html_parser.HTMLParser
else:
tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
class HTMLParser(_html_parser.HTMLParser):
"""
Patched version of stdlib's HTMLParser with patch from:
http://bugs.python.org/issue670664
"""
def __init__(self):
_html_parser.HTMLParser.__init__(self)
self.cdata_tag = None
def set_cdata_mode(self, tag):
try:
self.interesting = _html_parser.interesting_cdata
except AttributeError:
self.interesting = re.compile(r'</\s*%s\s*>' % tag.lower(), re.I)
self.cdata_tag = tag.lower()
def clear_cdata_mode(self):
self.interesting = _html_parser.interesting_normal
self.cdata_tag = None
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i + 1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
m = _html_parser.attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag) # <--------------------------- Changed
return endpos
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i + 2] == "</", "unexpected call to parse_endtag"
match = _html_parser.endendtag.search(rawdata, i + 1) # >
if not match:
return -1
j = match.end()
match = _html_parser.endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_tag is not None: # *** add ***
self.handle_data(rawdata[i:j]) # *** add ***
return j # *** add ***
self.error("bad end tag: %r" % (rawdata[i:j],))
# --- changed start ---------------------------------------------------
tag = match.group(1).strip()
if self.cdata_tag is not None:
if tag.lower() != self.cdata_tag:
self.handle_data(rawdata[i:j])
return j
# --- changed end -----------------------------------------------------
self.handle_endtag(tag.lower())
self.clear_cdata_mode()
return j
|
gpl-2.0
|
eadgarchen/tensorflow
|
tensorflow/contrib/boosted_trees/python/utils/losses.py
|
23
|
8661
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Losses for Gtflow Estimator and Batch Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
def per_example_logistic_loss(labels, weights, predictions):
"""Logistic loss given labels, example weights and predictions.
Args:
labels: Rank 2 (N, 1) tensor of per-example labels.
weights: Rank 2 (N, 1) tensor of per-example weights.
predictions: Rank 2 (N, 1) tensor of per-example predictions.
Returns:
loss: A Rank 2 (N, 1) tensor of per-example logistic loss.
update_op: An update operation to update the loss's internal state.
"""
labels = math_ops.to_float(labels)
unweighted_loss = nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=predictions)
return unweighted_loss * weights, control_flow_ops.no_op()
# This is classical form of Maximum entropy loss, that is twice differentiable
# (sparse_softmax_cross_entropy which is what we go for is not twice
# differentiable).
def per_example_maxent_loss(labels, weights, logits, num_classes, eps=1e-15):
"""Maximum entropy loss for multiclass problems.
Maximum entropy is a generalization of logistic loss for the case when more
than 2 classes are present.
Args:
labels: Rank 2 (N, 1) or Rank 1 (N) tensor of per-example labels.
weights: Rank 2 (N, 1) tensor of per-example weights.
logits: Rank 2 (N, K) tensor of per-example predictions, K - num of
classes.
num_classes: number of classes in classification task. Used to expand label
indices into one-hot encodings.
eps: tolerance, used as a minimum possible value.
Returns:
loss: A Rank 2 (N, 1) tensor of per-example maxent loss
update_op: An update operation to update the loss's internal state.
"""
labels = math_ops.to_int64(labels)
# If labels are of rank 1, make them rank 2.
labels_shape = labels.get_shape()
if len(labels_shape) != 2:
labels = array_ops.expand_dims(labels, 1)
# Labels are indices of classes, convert them to one hot encodings.
target_one_hot = array_ops.one_hot(indices=labels, depth=num_classes)
labels = math_ops.reduce_sum(
input_tensor=target_one_hot, reduction_indices=[1])
labels = math_ops.to_float(labels)
# Calculate softmax probabilities for each class.
unnormalized_probs = math_ops.exp(logits)
normalizers = math_ops.reduce_sum(unnormalized_probs, 1, keep_dims=True)
softmax_predictions = math_ops.divide(unnormalized_probs,
math_ops.add(normalizers, eps))
# Pull out the probabilities for real label.
probs_for_real_class = math_ops.reduce_sum(labels * softmax_predictions, 1)
# Add handling for values near 0 and 1.
zeros = array_ops.zeros_like(probs_for_real_class, dtype=logits.dtype) + eps
one_minus_eps = array_ops.ones_like(
probs_for_real_class, dtype=logits.dtype) - eps
# Take maximum(eps, pred)
cond = (probs_for_real_class >= eps)
probs_for_real_class = array_ops.where(cond, probs_for_real_class, zeros)
# Take minimum(1-eps, pred)
cond = (probs_for_real_class <= 1 - eps)
probs_for_real_class = array_ops.where(cond, probs_for_real_class,
one_minus_eps)
unweighted_loss = array_ops.expand_dims(-math_ops.log(probs_for_real_class),
1)
if weights is None:
return unweighted_loss, control_flow_ops.no_op()
else:
return unweighted_loss * weights, control_flow_ops.no_op()
def per_example_squared_loss(labels, weights, predictions):
"""Squared loss given labels, example weights and predictions.
Args:
labels: Rank 2 (N, D) tensor of per-example labels.
weights: Rank 2 (N, 1) tensor of per-example weights.
predictions: Rank 2 (N, D) tensor of per-example predictions.
Returns:
loss: A Rank 2 (N, 1) tensor of per-example squared loss.
update_op: An update operation to update the loss's internal state.
"""
unweighted_loss = math_ops.reduce_sum(
math_ops.square(predictions - labels), 1, keep_dims=True)
return unweighted_loss * weights, control_flow_ops.no_op()
def per_example_exp_loss(labels, weights, predictions, name=None, eps=0.1):
"""Exponential loss given labels, example weights and predictions.
Note that this is only for binary classification.
If logistic loss tries to make sure that the classifier is certain of its
predictions, exp loss says: "as long as it got it correct, even barely, i
don't care". Can be used on noisy data, or when you don't care about getting
the actual probabilities from the model, just the correct label.
The loss returns is exp(-targets*modified_predictions), where
modified_predictions are 1 if sigmoid is >= 0.5+eps (eg we predict positive
class), -1 if sigmoid < 0.5-eps (e.g. we predict negative class) and ax+b in
the interval 0.5-eps, 0.5+eps, where a = 1/eps, b=1/(2eps).
Args:
labels: Rank 2 (N, D) tensor of per-example labels.
weights: Rank 2 (N, 1) tensor of per-example weights.
predictions: Rank 2 (N, D) tensor of per-example predictions.
name: A name for the operation (optional).
eps: For the range (0.5-eps, 0.5+eps) we set the predictions to be ax+b.
Returns:
loss: A Rank 2 (N, 1) tensor of per-example exp loss
update_op: An update operation to update the loss's internal state.
"""
def exp_with_logits(name, eps, labels=None, logits=None):
"""Computes exponential loss given `logits`.
The loss returns is exp(-targets*modified_predictions), where
modified_predictions are 1 if sigmoid is >= 0.5+eps (eg we predict positive
class), -1 if sigmoid < 0.5-eps (e.g. we predict negative class) and ax+b in
the interval 0.5-eps, 0.5+eps, where a = 1/eps, b=1/(2eps).
Args:
name: A name for the operation (optional).
eps: For the range (0.5-eps, 0.5+eps) we set the predictions to be ax+b.
labels: A `Tensor` of the same type and shape as `logits`.
logits: A `Tensor` of type `float32` or `float64`.
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
exponential losses.
Raises:
ValueError: If `logits` and `labels` do not have the same shape.
"""
with ops.name_scope(name, "exp_loss", [logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
try:
labels.get_shape().merge_with(logits.get_shape())
except ValueError:
raise ValueError("logits and labels must have the same shape (%s vs %s)"
% (logits.get_shape(), labels.get_shape()))
# Default threshold to switch between classes
zeros = array_ops.zeros_like(logits, dtype=logits.dtype)
ones = array_ops.ones_like(logits, dtype=logits.dtype)
neg_ones = -array_ops.ones_like(logits, dtype=logits.dtype)
# Convert labels to 1 and -1
cond_labels = (labels > zeros)
labels_converted = array_ops.where(cond_labels, ones, neg_ones)
# Convert predictions to 1 and -1
# The loss we build is min(1, max(-1,ax+b))
# where a=1/eps, b=-1/2eps.
a = 1.0 / eps
b = -1.0 / 2 / eps
probs = math_ops.sigmoid(logits)
y = a * probs + b
# Build max(-1, ax+b)
cond = (y < -1)
max_res = array_ops.where(cond, neg_ones, y)
# Build min part
cond = (max_res > 1)
min_res = array_ops.where(cond, ones, max_res)
preds_converted = min_res
return math_ops.exp(-preds_converted * labels_converted)
labels = math_ops.to_float(labels)
unweighted_loss = exp_with_logits(
name=name, eps=eps, labels=labels, logits=predictions)
return unweighted_loss * weights, control_flow_ops.no_op()
|
apache-2.0
|
dpendl00/headphones
|
lib/requests/packages/chardet/escprober.py
|
2936
|
3187
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .escsm import (HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel,
ISO2022KRSMModel)
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .compat import wrap_ord
class EscCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = [
CodingStateMachine(HZSMModel),
CodingStateMachine(ISO2022CNSMModel),
CodingStateMachine(ISO2022JPSMModel),
CodingStateMachine(ISO2022KRSMModel)
]
self.reset()
def reset(self):
CharSetProber.reset(self)
for codingSM in self._mCodingSM:
if not codingSM:
continue
codingSM.active = True
codingSM.reset()
self._mActiveSM = len(self._mCodingSM)
self._mDetectedCharset = None
def get_charset_name(self):
return self._mDetectedCharset
def get_confidence(self):
if self._mDetectedCharset:
return 0.99
else:
return 0.00
def feed(self, aBuf):
for c in aBuf:
# PY3K: aBuf is a byte array, so c is an int, not a byte
for codingSM in self._mCodingSM:
if not codingSM:
continue
if not codingSM.active:
continue
codingState = codingSM.next_state(wrap_ord(c))
if codingState == constants.eError:
codingSM.active = False
self._mActiveSM -= 1
if self._mActiveSM <= 0:
self._mState = constants.eNotMe
return self.get_state()
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
self._mDetectedCharset = codingSM.get_coding_state_machine() # nopep8
return self.get_state()
return self.get_state()
|
gpl-3.0
|
Textalk/poppler
|
regtest/TestReferences.py
|
2
|
3512
|
# TestReferences.py
#
# Copyright (C) 2011 Carlos Garcia Campos <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import errno
from backends import get_backend, get_all_backends
from Config import Config
from Printer import get_printer
from Utils import get_document_paths_from_dir, get_skipped_tests
from Queue import Queue
from threading import Thread
class TestReferences:
def __init__(self, docsdir, refsdir):
self._docsdir = docsdir
self._refsdir = refsdir
self._skipped = get_skipped_tests(docsdir)
self.config = Config()
self.printer = get_printer()
self._queue = Queue()
try:
os.makedirs(self._refsdir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
except:
raise
def create_refs_for_file(self, filename, n_doc = 1, total_docs = 1):
if filename in self._skipped:
self.printer.print_default("Skipping test '%s' (%d/%d)" % (os.path.join(self._docsdir, filename), n_doc, total_docs))
return
refs_path = os.path.join(self._refsdir, filename)
try:
os.makedirs(refs_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
except:
raise
doc_path = os.path.join(self._docsdir, filename)
if self.config.backends:
backends = [get_backend(name) for name in self.config.backends]
else:
backends = get_all_backends()
for backend in backends:
if not self.config.force and backend.has_results(refs_path):
self.printer.print_default("Results found, skipping '%s' for %s backend (%d/%d)" % (doc_path, backend.get_name(), n_doc, total_docs))
continue
self.printer.printout_ln("Creating refs for '%s' using %s backend (%d/%d)" % (doc_path, backend.get_name(), n_doc, total_docs))
if backend.create_refs(doc_path, refs_path):
backend.create_checksums(refs_path, self.config.checksums_only)
def _worker_thread(self):
while True:
doc, n_doc, total_docs = self._queue.get()
self.create_refs_for_file(doc, n_doc, total_docs)
self._queue.task_done()
def create_refs(self):
docs, total_docs = get_document_paths_from_dir(self._docsdir)
self.printer.printout_ln('Process %d is spawning %d worker threads...' % (os.getpid(), self.config.threads))
for n_thread in range(self.config.threads):
thread = Thread(target=self._worker_thread)
thread.daemon = True
thread.start()
n_doc = 0
for doc in docs:
n_doc += 1
self._queue.put( (doc, n_doc, total_docs) )
self._queue.join()
|
gpl-2.0
|
zclfly/cgt
|
examples/bench/theano_gru.py
|
22
|
3642
|
import theano, theano.tensor as TT
from cgt.utils import Message
import time
import numpy as np
def normc(x):
assert x.ndim == 2
return x/norms(x,0)[None,:]
def randnf(*shp):
return np.random.randn(*shp).astype(theano.config.floatX)
def norms(x,ax):
return np.sqrt(np.square(x).sum(axis=ax))
class GRUCell(object):
"""
Gated Recurrent Unit. E.g., see
Chung, Junyoung, et al. "Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling." arXiv preprint arXiv:1412.3555 (2014).
"""
def __init__(self,input_sizes,mem_size,name_prefix=""):
Wiz_vals = [normc(randnf(input_size,mem_size)) for input_size in input_sizes]
self.Wizs = [theano.shared(Wiz_val,name=name_prefix+"Wiz") for Wiz_val in Wiz_vals]
Wmz_val = normc(randnf(mem_size,mem_size))
self.Wmz = theano.shared(Wmz_val,name=name_prefix+"Wmz")
bz = np.zeros((1,mem_size),theano.config.floatX)
self.bz = theano.shared(bz,name=name_prefix+"bz")
self.bz.type.broadcastable = (True,False)
Wir_vals = [normc(randnf(input_size,mem_size)) for input_size in input_sizes]
self.Wirs = [theano.shared(Wir_val,name=name_prefix+"Wir") for Wir_val in Wir_vals]
Wmr_val = normc(randnf(mem_size,mem_size))
self.Wmr = theano.shared(Wmr_val,name=name_prefix+"Wmr")
br = np.zeros((1,mem_size),theano.config.floatX)
self.br = theano.shared(br,name=name_prefix+"br")
self.br.type.broadcastable = (True,False)
Wim_vals = [normc(randnf(input_size,mem_size)) for input_size in input_sizes]
self.Wims = [theano.shared(Wim_val,name=name_prefix+"Wim") for Wim_val in Wim_vals]
Wmm_val = normc(np.eye(mem_size,dtype=theano.config.floatX))
self.Wmm = theano.shared(Wmm_val,name=name_prefix+"Wmm")
bm = np.zeros((1,mem_size),theano.config.floatX)
self.bm = theano.shared(bm,name=name_prefix+"bm")
self.bm.type.broadcastable = (True,False)
def __call__(self,M,*inputs):
assert len(inputs) == len(self.Wizs)
summands = [Xi.dot(Wiz) for (Xi,Wiz) in zip(inputs,self.Wizs)] + [M.dot(self.Wmz),self.bz]
z = TT.nnet.sigmoid(TT.add(*summands))
summands = [Xi.dot(Wir) for (Xi,Wir) in zip(inputs,self.Wirs)] + [M.dot(self.Wmr),self.br]
r = TT.nnet.sigmoid(TT.add(*summands))
summands = [Xi.dot(Wim) for (Xi,Wim) in zip(inputs,self.Wims)] + [(r*M).dot(self.Wmm),self.bm]
Mtarg = TT.tanh(TT.add(*summands)) #pylint: disable=E1111
Mnew = (1-z)*M + z*Mtarg
return Mnew
def params(self):
out = []
out.extend(self.Wizs)
out.append(self.Wmz)
out.append(self.bz)
out.extend(self.Wirs)
out.append(self.Wmr)
out.append(self.br)
out.extend(self.Wims)
out.append(self.Wmm)
out.append(self.bm)
return out
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--horizon",type=int)
args = parser.parse_args()
horizon =args.horizon
assert horizon is not None
size=128
batchsize=64
cell = GRUCell([size],size)
X = TT.tensor3()
init = TT.zeros((batchsize, size),theano.config.floatX)
prev_h = init
for i in xrange(horizon):
prev_h = cell(X[i], prev_h)
with Message("compiling"):
f = theano.function([X],theano.grad(prev_h.sum(), cell.params()))
with Message("running"):
x = np.zeros((horizon,batchsize,size),theano.config.floatX)
for i in xrange(100):
f(x)
|
mit
|
morph027/ansible-modules-extras
|
univention/udm_group.py
|
29
|
5267
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# Copyright (c) 2016, Adfinis SyGroup AG
# Tobias Rueetschi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.univention_umc import (
umc_module_for_add,
umc_module_for_edit,
ldap_search,
base_dn,
)
DOCUMENTATION = '''
---
module: udm_group
version_added: "2.2"
author: "Tobias Rueetschi (@2-B)"
short_description: Manage of the posix group
description:
- "This module allows to manage user groups on a univention corporate server (UCS).
It uses the python API of the UCS to create a new object or edit it."
requirements:
- Python >= 2.6
options:
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the group is present or not.
name:
required: true
description:
- Name of the posix group.
description:
required: false
description:
- Group description.
position:
required: false
description:
- define the whole ldap position of the group, e.g.
C(cn=g123m-1A,cn=classes,cn=schueler,cn=groups,ou=schule,dc=example,dc=com).
ou:
required: false
description:
- LDAP OU, e.g. school for LDAP OU C(ou=school,dc=example,dc=com).
subpath:
required: false
description:
- Subpath inside the OU, e.g. C(cn=classes,cn=students,cn=groups).
'''
EXAMPLES = '''
# Create a POSIX group
- udm_group: name=g123m-1A
# Create a POSIX group with the exact DN
# C(cn=g123m-1A,cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com)
- udm_group: name=g123m-1A
subpath='cn=classes,cn=students,cn=groups'
ou=school
# or
- udm_group: name=g123m-1A
position='cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com'
'''
RETURN = '''# '''
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True,
type='str'),
description = dict(default=None,
type='str'),
position = dict(default='',
type='str'),
ou = dict(default='',
type='str'),
subpath = dict(default='cn=groups',
type='str'),
state = dict(default='present',
choices=['present', 'absent'],
type='str')
),
supports_check_mode=True
)
name = module.params['name']
description = module.params['description']
position = module.params['position']
ou = module.params['ou']
subpath = module.params['subpath']
state = module.params['state']
changed = False
groups = list(ldap_search(
'(&(objectClass=posixGroup)(cn={}))'.format(name),
attr=['cn']
))
if position != '':
container = position
else:
if ou != '':
ou = 'ou={},'.format(ou)
if subpath != '':
subpath = '{},'.format(subpath)
container = '{}{}{}'.format(subpath, ou, base_dn())
group_dn = 'cn={},{}'.format(name, container)
exists = bool(len(groups))
if state == 'present':
try:
if not exists:
grp = umc_module_for_add('groups/group', container)
else:
grp = umc_module_for_edit('groups/group', group_dn)
grp['name'] = name
grp['description'] = description
diff = grp.diff()
changed = grp.diff() != []
if not module.check_mode:
if not exists:
grp.create()
else:
grp.modify()
except:
module.fail_json(
msg="Creating/editing group {} in {} failed".format(name, container)
)
if state == 'absent' and exists:
try:
grp = umc_module_for_edit('groups/group', group_dn)
if not module.check_mode:
grp.remove()
changed = True
except:
module.fail_json(
msg="Removing group {} failed".format(name)
)
module.exit_json(
changed=changed,
name=name,
diff=diff,
container=container
)
if __name__ == '__main__':
main()
|
gpl-3.0
|
gsehub/edx-platform
|
common/djangoapps/third_party_auth/tests/test_admin.py
|
9
|
3683
|
"""
Tests third_party_auth admin views
"""
import unittest
from django.contrib.admin.sites import AdminSite
from django.core.files.uploadedfile import SimpleUploadedFile
from django.urls import reverse
from django.forms import models
from student.tests.factories import UserFactory
from third_party_auth.admin import OAuth2ProviderConfigAdmin
from third_party_auth.models import OAuth2ProviderConfig
from third_party_auth.tests import testutil
# This is necessary because cms does not implement third party auth
@unittest.skipUnless(testutil.AUTH_FEATURE_ENABLED, testutil.AUTH_FEATURES_KEY + ' not enabled')
class Oauth2ProviderConfigAdminTest(testutil.TestCase):
"""
Tests for oauth2 provider config admin
"""
def test_oauth2_provider_edit_icon_image(self):
"""
Test that we can update an OAuth provider's icon image from the admin
form.
OAuth providers are updated using KeyedConfigurationModelAdmin, which
updates models by adding a new instance that replaces the old one,
instead of editing the old instance directly.
Updating the icon image is tricky here because
KeyedConfigurationModelAdmin copies data over from the previous
version by injecting its attributes into request.GET, but the icon
ends up in request.FILES. We need to ensure that the value is
prepopulated correctly, and that we can clear and update the image.
"""
# Login as a super user
user = UserFactory.create(is_staff=True, is_superuser=True)
user.save()
self.client.login(username=user.username, password='test')
# Get baseline provider count
providers = OAuth2ProviderConfig.objects.all()
pcount = len(providers)
# Create a provider
provider1 = self.configure_dummy_provider(
enabled=True,
icon_class='',
icon_image=SimpleUploadedFile('icon.svg', '<svg><rect width="50" height="100"/></svg>'),
)
# Get the provider instance with active flag
providers = OAuth2ProviderConfig.objects.all()
self.assertEquals(len(providers), 1)
self.assertEquals(providers[pcount].id, provider1.id)
# Edit the provider via the admin edit link
admin = OAuth2ProviderConfigAdmin(provider1, AdminSite())
# pylint: disable=protected-access
update_url = reverse('admin:{}_{}_add'.format(admin.model._meta.app_label, admin.model._meta.model_name))
update_url += "?source={}".format(provider1.pk)
# Remove the icon_image from the POST data, to simulate unchanged icon_image
post_data = models.model_to_dict(provider1)
del post_data['icon_image']
# Remove max_session_length; it has a default null value which must be POSTed
# back as an absent value, rather than as a "null-like" included value.
del post_data['max_session_length']
# Change the name, to verify POST
post_data['name'] = 'Another name'
# Post the edit form: expecting redirect
response = self.client.post(update_url, post_data)
self.assertEquals(response.status_code, 302)
# Editing the existing provider creates a new provider instance
providers = OAuth2ProviderConfig.objects.all()
self.assertEquals(len(providers), pcount + 2)
self.assertEquals(providers[pcount].id, provider1.id)
provider2 = providers[pcount + 1]
# Ensure the icon_image was preserved on the new provider instance
self.assertEquals(provider2.icon_image, provider1.icon_image)
self.assertEquals(provider2.name, post_data['name'])
|
agpl-3.0
|
rochacbruno/dynaconf
|
dynaconf/validator_conditions.py
|
1
|
1384
|
# pragma: no cover
"""
Implement basic assertions to be used in assertion action
"""
def eq(value, other):
"""Equal"""
return value == other
def ne(value, other):
"""Not equal"""
return value != other
def gt(value, other):
"""Greater than"""
return value > other
def lt(value, other):
"""Lower than"""
return value < other
def gte(value, other):
"""Greater than or equal"""
return value >= other
def lte(value, other):
"""Lower than or equal"""
return value <= other
def identity(value, other):
"""Identity check using ID"""
return value is other
def is_type_of(value, other):
"""Type check"""
return isinstance(value, other)
def is_in(value, other):
"""Existence"""
return value in other
def is_not_in(value, other):
"""Inexistence"""
return value not in other
def cont(value, other):
"""Contains"""
return other in value
def len_eq(value, other):
"""Length Equal"""
return len(value) == other
def len_ne(value, other):
"""Length Not equal"""
return len(value) != other
def len_min(value, other):
"""Minimum length"""
return len(value) >= other
def len_max(value, other):
"""Maximum lenght"""
return len(value) <= other
def startswith(value, term):
"""returns value.startswith(term) result"""
return value.startswith(term)
|
mit
|
njwilson23/scipy
|
scipy/optimize/_trustregion_dogleg.py
|
135
|
4449
|
"""Dog-leg trust-region optimization."""
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.linalg
from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem)
__all__ = []
def _minimize_dogleg(fun, x0, args=(), jac=None, hess=None,
**trust_region_options):
"""
Minimization of scalar function of one or more variables using
the dog-leg trust-region algorithm.
Options
-------
initial_trust_radius : float
Initial trust-region radius.
max_trust_radius : float
Maximum value of the trust-region radius. No steps that are longer
than this value will be proposed.
eta : float
Trust region related acceptance stringency for proposed steps.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
"""
if jac is None:
raise ValueError('Jacobian is required for dogleg minimization')
if hess is None:
raise ValueError('Hessian is required for dogleg minimization')
return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess,
subproblem=DoglegSubproblem,
**trust_region_options)
class DoglegSubproblem(BaseQuadraticSubproblem):
"""Quadratic subproblem solved by the dogleg method"""
def cauchy_point(self):
"""
The Cauchy point is minimal along the direction of steepest descent.
"""
if self._cauchy_point is None:
g = self.jac
Bg = self.hessp(g)
self._cauchy_point = -(np.dot(g, g) / np.dot(g, Bg)) * g
return self._cauchy_point
def newton_point(self):
"""
The Newton point is a global minimum of the approximate function.
"""
if self._newton_point is None:
g = self.jac
B = self.hess
cho_info = scipy.linalg.cho_factor(B)
self._newton_point = -scipy.linalg.cho_solve(cho_info, g)
return self._newton_point
def solve(self, trust_radius):
"""
Minimize a function using the dog-leg trust-region algorithm.
This algorithm requires function values and first and second derivatives.
It also performs a costly Hessian decomposition for most iterations,
and the Hessian is required to be positive definite.
Parameters
----------
trust_radius : float
We are allowed to wander only this far away from the origin.
Returns
-------
p : ndarray
The proposed step.
hits_boundary : bool
True if the proposed step is on the boundary of the trust region.
Notes
-----
The Hessian is required to be positive definite.
References
----------
.. [1] Jorge Nocedal and Stephen Wright,
Numerical Optimization, second edition,
Springer-Verlag, 2006, page 73.
"""
# Compute the Newton point.
# This is the optimum for the quadratic model function.
# If it is inside the trust radius then return this point.
p_best = self.newton_point()
if scipy.linalg.norm(p_best) < trust_radius:
hits_boundary = False
return p_best, hits_boundary
# Compute the Cauchy point.
# This is the predicted optimum along the direction of steepest descent.
p_u = self.cauchy_point()
# If the Cauchy point is outside the trust region,
# then return the point where the path intersects the boundary.
p_u_norm = scipy.linalg.norm(p_u)
if p_u_norm >= trust_radius:
p_boundary = p_u * (trust_radius / p_u_norm)
hits_boundary = True
return p_boundary, hits_boundary
# Compute the intersection of the trust region boundary
# and the line segment connecting the Cauchy and Newton points.
# This requires solving a quadratic equation.
# ||p_u + t*(p_best - p_u)||**2 == trust_radius**2
# Solve this for positive time t using the quadratic formula.
_, tb = self.get_boundaries_intersections(p_u, p_best - p_u,
trust_radius)
p_boundary = p_u + tb * (p_best - p_u)
hits_boundary = True
return p_boundary, hits_boundary
|
bsd-3-clause
|
weso/landportal-importers
|
IpfriExtractor/es/weso/translator/ipfri_trasnlator.py
|
2
|
4614
|
__author__ = 'Dani'
import xlrd
from model2xml.model2xml import ModelToXMLTransformer
from .parser.parser import Parser
from .object_builder.model_object_builder import IpfriModelObjectBuilder
import os.path
class IpfriTranslator(object):
def __init__(self, log, config, look_for_historical=True):
self._paths_to_files = []
self._log = log
self._config = config
self._look_for_historical = look_for_historical
self._dataset_user_file_groups = []
def run(self):
try:
self.determine_paths_to_files()
except BaseException as e:
raise RuntimeError("While trying to determine paths to files to parse: " + e.message)
try:
self._initialize_ids_propperly()
except BaseException as e:
raise RuntimeError("While trying to initialize ids to create entities: " + e.message)
try:
self.translate_files_into_model_objects()
except BaseException as e:
raise RuntimeError("While trying to generate model objects: " + e.message)
try:
self.translate_model_objects_into_xml()
except BaseException as e:
raise RuntimeError("While trying to turn model objects into xml: " + e.message)
self._log.info("Final xml succesfully sent to the Receiver module")
def _initialize_ids_propperly(self):
"""
The object builders will take values for assign ids from the config. But there will be several
instances of objects builders, all of them using the same config. We will manage the config here,
before starting the execution of the object builders.
"""
if self._look_for_historical: # restart the conunt
self._config.set("TRANSLATOR", "obs_int", "0")
self._config.set("TRANSLATOR", "sli_int", 0)
self._config.set("TRANSLATOR", "igr_int", 0)
self._config.set("TRANSLATOR", "dat_int", 0)
else:
pass # The value that we have in the config is valid in this case
def translate_files_into_model_objects(self):
i = 0
for a_path in self._paths_to_files:
i += 1
a_sheet = self.take_data_sheet_from_file_path(a_path)
indicators, dates, countries = Parser(a_sheet).run()
a_group = IpfriModelObjectBuilder(self._log,
self._config,
indicators,
dates,
countries,
os.path.abspath(a_path)).run()
self._dataset_user_file_groups.append(a_group)
def translate_model_objects_into_xml(self):
for a_group in self._dataset_user_file_groups:
ModelToXMLTransformer(dataset=a_group.dataset,
import_process=ModelToXMLTransformer.XLS,
user=a_group.user,
path_to_original_file=a_group.file_path).run()
self._persist_config_values()
def _persist_config_values(self):
with open("./files/configuration.ini", "wb") as config_file:
self._config.write(config_file)
def determine_paths_to_files(self):
path_pattern = self._config.get("IPFRI", "target_downloaded_file_pattern")
if self._look_for_historical:
self.determine_paths_to_every_available_year(path_pattern)
else:
self.determine_paths_to_current_year(path_pattern)
def determine_paths_to_every_available_year(self, path_pattern):
available_years = self._config.get("IPFRI", "available_years").split(",")
for year in available_years:
self._paths_to_files.append(path_pattern.replace("{year}", year))
def determine_paths_to_current_year(self, path_pattern):
year = self._config.get("AVAILABLE_YEARS", "year_to_look_for")
candidate_file = path_pattern.replace("{year}", int(year))
if os.path.exists(candidate_file):
self._paths_to_files.append(candidate_file)
else:
raise RuntimeError("It looks like there is no available actual info. IpfriImporter will stop its execution")
@staticmethod
def take_data_sheet_from_file_path(a_path):
book = xlrd.open_workbook(a_path, encoding_override='latin-1')
#We are assuming that the sheet with the data is placed the last in the book
return book.sheet_by_index(book.nsheets - 1)
|
unlicense
|
1it/python-dns-failover
|
dns_failover/backends.py
|
2
|
4136
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
class CloudFlareDNS(object):
"""
CloudFlare Backend for Python DNS Failover
"""
def __init__(self, email, key, zone, ttl=1,
url='https://www.cloudflare.com/api_json.html'):
"""
Sets up a CloudFlareDNS backend instance with the provided
configuration.
# Params:
- `email`: E-mail address of your CloudFlare account.
- `key`: API key of your CloudFlare account.
- `zone`: target DNS full qualified domain name.
- `ttl`: TTL of record in seconds. 1 = Automatic, otherwise, value
must in between 120 and 4,294,967,295 seconds. Defaults to 1.
- `url`: CloudFlare client gateway interface url. Defaults to
'https://www.cloudflare.com/api_json.html'.
"""
self.url = url
self.zone = zone
self.ttl = ttl
self.base_data = {
'email': email,
'tkn': key,
'z': self.zone,
}
def _do_request(self, data={}):
"""
Configures and does the request to the backend API endpoint
and catches any possible exception.
# Params:
- `data`: additional data for the request.
"""
data.update(self.base_data)
response = requests.post(self.url, data=data)
response.raise_for_status()
response_data = response.json()
# If result is not successful, raise error
if response_data.get('result') != 'success':
raise Exception(response_data.get('msg'))
return response_data
def _hostname(self, fqdn):
"""
Asserts that the given FQDN belong to the configured zone and
returns the hostname.
# Params:
- `fqdn`: full qualified domain name to retrieve the hostname from.
"""
zone = '.' + self.zone
assert fqdn.endswith(zone)
return fqdn[:-len(zone)]
@property
def _records(self):
"""
Load all current DNS zone records.
Returns the list of the current DNS zone records.
"""
data = {
'a': 'rec_load_all',
}
response = self._do_request(data=data)
return response.get('response').get('recs').get('objs')
def get_a_records(self, fqdn):
"""
Returns the list of ip adresses records associated with the given FQDN.
# Params:
- `fqdn`: full qualified domain name of the records to retrieve.
"""
return [record.get('content')
for record in self._records
if record.get('name') == fqdn and record.get('type') == 'A']
def add_a_record(self, fqdn, ip):
"""
Adds a resource record of type A to the DNS list.
Returns the new record created.
# Params:
- `fqdn`: full qualified domain name to add to the dns record.
- `ip`: server ip to add to the dns record.
"""
data = {
'a': 'rec_new',
'content': ip,
'name': self._hostname(fqdn),
'ttl': self.ttl,
'type': 'A',
}
response = self._do_request(data=data)
new_record = response.get('response').get('rec').get('obj')
return new_record
def delete_a_record(self, fqdn, ip):
"""
Deletes all DNS A-type resource records targeting the given ip.
Returns the number of deleted records.
# Params:
- `fqdn`: full qualified domain name of the dns record to remove.
- `ip`: target ip to remove from the dns record to remove.
"""
num_deleted = 0
for record in self._records:
if record.get('name') == fqdn and \
record.get('type') == 'A' and \
record.get('content') == ip:
data = {
'a': 'rec_delete',
'id': record.get('rec_id'),
}
self._do_request(data=data)
num_deleted += 1
return num_deleted
|
bsd-3-clause
|
amoya-dx/account-financial-tools
|
currency_rate_date_check/company.py
|
38
|
1695
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Currency rate date check module for Odoo
# Copyright (C) 2012-2014 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class ResCompany(models.Model):
_inherit = 'res.company'
currency_rate_max_delta = fields.Integer(
string='Max Time Delta in Days for Currency Rates', default=7,
help="This is the maximum interval in days between "
"the date associated with the amount to convert and the date "
"of the nearest currency rate available in Odoo.")
_sql_constraints = [
('currency_rate_max_delta_positive',
'CHECK (currency_rate_max_delta >= 0)',
"The value of the field 'Max Time Delta in Days for Currency Rates' "
"must be positive or 0."),
]
|
agpl-3.0
|
michaelkirk/QGIS
|
tests/src/python/test_qgsfontutils.py
|
20
|
2128
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for core QgsFontUtils class
From build dir: ctest -R PyQgsFontUtils -V
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Larry Shaffer'
__date__ = '2014/02/19'
__copyright__ = 'Copyright 2014, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis
from qgis.core import QgsFontUtils
from utilities import (
TestCase,
getQgisTestApp,
unittest,
getTestFontFamily,
loadTestFonts
)
QGISAPP, CANVAS, IFACE, PARENT = getQgisTestApp()
class TestQgsFontUtils(TestCase):
@classmethod
def setUpClass(cls):
cls._family = getTestFontFamily()
cls._has_style = QgsFontUtils.fontFamilyHasStyle
def test_loading_base_test_fonts(self):
loadTestFonts()
def test_loading_every_test_font(self):
QgsFontUtils.loadStandardTestFonts(['All'])
# styles = ''
# for style in QFontDatabase().styles(self._family):
# styles += ' ' + style
# print self._family + ' styles:' + styles
res = (
self._has_style(self._family, 'Roman')
and self._has_style(self._family, 'Oblique')
and self._has_style(self._family, 'Bold')
and self._has_style(self._family, 'Bold Oblique')
)
msg = self._family + ' test font styles could not be loaded'
assert res, msg
def test_get_specific_test_font(self):
# default returned is Roman at 12 pt
f = QgsFontUtils.getStandardTestFont('Bold Oblique', 14)
""":type: QFont"""
res = (
f.family() == self._family
and f.bold()
and f.italic()
and f.pointSize() == 14
)
msg = self._family + ' test font Bold Oblique at 14 pt not retrieved'
assert res, msg
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
|
ghmajx/asuswrt-merlin
|
release/src/router/samba-3.5.8/source4/scripting/python/examples/winreg.py
|
24
|
2812
|
#!/usr/bin/python
#
# tool to manipulate a remote registry
# Copyright Andrew Tridgell 2005
# Copyright Jelmer Vernooij 2007
# Released under the GNU GPL v3 or later
#
import sys
# Find right directory when running from source tree
sys.path.insert(0, "bin/python")
from samba.dcerpc import winreg
import optparse
import samba.getopt as options
parser = optparse.OptionParser("%s <BINDING> [path]" % sys.argv[0])
sambaopts = options.SambaOptions(parser)
parser.add_option_group(sambaopts)
parser.add_option("--createkey", type="string", metavar="KEYNAME",
help="create a key")
opts, args = parser.parse_args()
if len(args) < 1:
parser.print_usage()
sys.exit(-1)
binding = args[0]
print "Connecting to " + binding
conn = winreg.winreg(binding, sambaopts.get_loadparm())
def list_values(key):
(num_values, max_valnamelen, max_valbufsize) = conn.QueryInfoKey(key, winreg.String())[4:8]
for i in range(num_values):
name = winreg.StringBuf()
name.size = max_valnamelen
(name, type, data, _, data_len) = conn.EnumValue(key, i, name, 0, "", max_valbufsize, 0)
print "\ttype=%-30s size=%4d '%s'" % type, len, name
if type in (winreg.REG_SZ, winreg.REG_EXPAND_SZ):
print "\t\t'%s'" % data
# if (v.type == reg.REG_MULTI_SZ) {
# for (j in v.value) {
# printf("\t\t'%s'\n", v.value[j])
# }
# }
# if (v.type == reg.REG_DWORD || v.type == reg.REG_DWORD_BIG_ENDIAN) {
# printf("\t\t0x%08x (%d)\n", v.value, v.value)
# }
# if (v.type == reg.REG_QWORD) {
# printf("\t\t0x%llx (%lld)\n", v.value, v.value)
# }
def list_path(key, path):
count = 0
(num_subkeys, max_subkeylen, max_subkeysize) = conn.QueryInfoKey(key, winreg.String())[1:4]
for i in range(num_subkeys):
name = winreg.StringBuf()
name.size = max_subkeysize
keyclass = winreg.StringBuf()
keyclass.size = max_subkeysize
(name, _, _) = conn.EnumKey(key, i, name, keyclass=keyclass, last_changed_time=None)[0]
subkey = conn.OpenKey(key, name, 0, winreg.KEY_QUERY_VALUE | winreg.KEY_ENUMERATE_SUB_KEYS)
count += list_path(subkey, "%s\\%s" % (path, name))
list_values(subkey)
return count
if len(args) > 1:
root = args[1]
else:
root = "HKLM"
if opts.createkey:
reg.create_key("HKLM\\SOFTWARE", opt.createkey)
else:
print "Listing registry tree '%s'" % root
try:
root_key = getattr(conn, "Open%s" % root)(None, winreg.KEY_QUERY_VALUE | winreg.KEY_ENUMERATE_SUB_KEYS)
except AttributeError:
print "Unknown root key name %s" % root
sys.exit(1)
count = list_path(root_key, root)
if count == 0:
print "No entries found"
sys.exit(1)
|
gpl-2.0
|
glennrub/micropython
|
ports/stm32/boards/NUCLEO_WB55/rfcore_firmware.py
|
8
|
22879
|
# This file is part of the MicroPython project, http://micropython.org/
#
# The MIT License (MIT)
#
# Copyright (c) 2020 Damien P. George
# Copyright (c) 2020 Jim Mussared
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# This script provides helpers for working with the FUS/WS firmware on the WB55.
# It can be frozen into the MicroPython firmware (via manifest.py)
#
# The current FUS and WS firmware version and state can be queried via the
# `stm` module, e.g.
# stm.rfcore_status() (returns the first word of the device info table)
# stm.rfcore_fw_version(id) (returns a 5-tuple indicating fw version; id is: 0=FUS, 1=WS)
# stm.rfcore_sys_hci(ogf, ocf, cmd_buf) (synchronously execute HCI command on SYS channel)
#
# To perform a firmware update:
#
# 1. Generate "obfuscated" binary images using rfcore_makefirmware.py
# ./boards/NUCLEO_WB55/rfcore_makefirmware.py ~/src/github.com/STMicroelectronics/STM32CubeWB/Projects/STM32WB_Copro_Wireless_Binaries/STM32WB5x/ /tmp
# This will generate /tmp/{fus_102,fus_110,ws_ble_hci}.bin
#
# 2. Copy required files to the device filesystem.
# In general, it's always safe to copy all three files and the updater will
# figure out what needs to be done. This is the recommended option.
# However, if you already have the latest FUS (1.1.0) installed, then just the
# WS firmware is required.
# If a FUS binary is present, then the existing WS will be removed so it's a good
# idea to always include the WS binary if updating FUS.
# Note that a WS binary will not be installed unless FUS 1.1.0 is installed.
#
# 3. Ensure boot.py calls `rfcore_firmware.resume()`.
# The WB55 will reset several times during the firmware update process, so this
# script manages the update state using RTC backup registers.
# `rfcore_firmware.resume()` will continue the update operation on startup to
# resume any in-progress update operation, and either trigger another reset, or
# return 0 to indicate that the operation completed successfully, or a reason
# code (see REASON_* below) to indicate failure.
#
# 4. Call rfcore_firmware.check_for_updates() to start the update process.
# The device will then immediately reboot and when the firmware update completes,
# the status will be returned from rfcore_firmware.resume(). See the REASON_ codes below.
# You can use the built-in stm.rfcore_fw_version() to query the installed version
# from your application code.
import struct, os
import machine, stm
from micropython import const
_OGF_VENDOR = const(0x3F)
_OCF_FUS_GET_STATE = const(0x52)
_OCF_FUS_FW_UPGRADE = const(0x54)
_OCF_FUS_FW_DELETE = const(0x55)
_OCF_FUS_START_WS = const(0x5A)
_OCF_BLE_INIT = const(0x66)
_HCI_KIND_VENDOR_RESPONSE = const(0x11)
# The firmware updater will search all of flash for the image to install, so
# it's important that the file doesn't exist anywhere on the filesystem and
# that the updater only finds the version that we copy into the reserved area.
# Otherwise it will find matching headers/footers in the flash filesystem and
# get confused leading to either "FUS_STATE_IMG_NOT_AUTHENTIC" or (worse)
# corrupting the FUS.
# See footnote [1] referenced by Table 9 in AN5185 - Rev 4 -- the address
# passed to FUS_FW_UPGRADE is ignored (implying that it must be searching the
# flash). This requires that the firmware files have been pre-processed by
# rfcore_makefirmware.py and this key must match the one there.
_OBFUSCATION_KEY = const(0x0573B55AA)
# On boards using the internal flash filesystem, this must match the
# `_flash_fs_end` symbol defined by the linker script (boards/stm32wb55xg.ld).
# We erase everything from here until the start of the secure area (defined by
# SFSA) just to ensure that no other fragments of firmware files are left
# behind. On boards with external flash, this just needs to ensure that it
# includes any regions that may contain partial firmware data.
# This is non-const so it can be override.
STAGING_AREA_START = 0x80C0000
# First word of device info table indicating FUS state (returned by `stm.rfcore_status()`).
_MAGIC_FUS_ACTIVE = const(0xA94656B9) # AN5185
_MAGIC_IPCC_MEM_INCORRECT = const(0x3DE96F61) # # AN5185
# Argument to `stm.rfcore_fw_version()`.
_FW_VERSION_FUS = const(0)
_FW_VERSION_WS = const(1)
# No firmware update in progress. Boot normally.
_STATE_IDLE = const(0)
# A previous firmware update failed. Will return reason code from resume().
_STATE_FAILED = const(1)
# Trying to get into the FUS. Keep issuing GET_STATE until the FUS is active.
_STATE_WAITING_FOR_FUS = const(2)
# Trying to get into the WS. Keep issuing START_WS until the WS is active (or fails).
_STATE_WAITING_FOR_WS = const(3)
# FW_DELETE has been issued. Waiting for the WS version to report zero.
_STATE_DELETING_WS = const(4)
# Flash copy has started for FUS/WS. If a reboot occurs, then fail.
_STATE_COPYING_FUS = const(5)
_STATE_COPYING_WS = const(6)
# Flash write fully completed, ready for install.
_STATE_COPIED_FUS = const(7)
_STATE_COPIED_WS = const(8)
# Check for next update to perform.
# Either we've just gotten into the FUS, or the first update in a sequence
# has completed. (e.g. FUS done, now do WS).
_STATE_CHECK_UPDATES = const(9)
# Installation has started, keep polling GET_STATE.
_STATE_INSTALLING_WS = const(10)
_STATE_INSTALLING_FUS = const(11)
# Update completed successfully.
REASON_OK = const(0)
# The device reset during flash copy. Possibly WS still installed.
REASON_FLASH_COPY_FAILED = const(1)
# Unable to start the WS after firmware update.
REASON_NO_WS = const(2)
# Copying FUS image to staging area caused FUS to fail.
REASON_FLASH_FUS_BAD_STATE = const(3)
# Copying WS image to staging area caused FUS to fail.
REASON_FLASH_WS_BAD_STATE = const(4)
# Cannot get into the FUS. Perhaps rfcore misconfigured.
REASON_FUS_NOT_RESPONDING = const(5)
# After a FUS install, unable to get back to the FUS.
REASON_FUS_NOT_RESPONDING_AFTER_FUS = const(6)
# After a WS install, unable to get back to the FUS.
REASON_FUS_NOT_RESPONDING_AFTER_WS = const(7)
# Unable to query rfcore version/active.
REASON_RFCORE_NOT_CONFIGURED = const(8)
# The WS deletion didn't have any effect.
REASON_WS_STILL_PRESENT = const(9)
# FUS refused to delete the WS.
REASON_WS_DELETION_FAILED = const(10)
# FUS returned a specific code for a FUS update.
# See AN5185 Rev 4, Table 12. Reason between 0x00-0x11 will be added.
REASON_FUS_VENDOR = const(0x10)
# FUS returned a specific code for a WS update. Values as for the FUS update.
REASON_WS_VENDOR = const(0x30)
# FUS 1.0.2 must be installed before FUS 1.1.0 can be installed.
# A factory Nucleo board has FUS (0, 5, 3, 0, 0) and WS (0, 5, 1, 0, 0).
_FUS_VERSION_102 = (1, 0, 2, 0, 0)
_FUS_VERSION_110 = (1, 1, 0, 0, 0)
_PATH_FUS_102 = "fus_102.bin"
_PATH_FUS_110 = "fus_110.bin"
_PATH_WS_BLE_HCI = "ws_ble_hci.bin"
# This address is correct for versions up to v1.8 (assuming existing firmware deleted).
# Note any address from the end of the filesystem to the SFSA would be fine, but if
# the FUS is fixed in the future to use the specified address then these are the "correct"
# ones.
_ADDR_FUS = 0x080EC000
_ADDR_WS_BLE_HCI = 0x080DC000
# When installing the FUS/WS it can take a long time to return to the first
# GET_STATE HCI command.
# e.g. Installing stm32wb5x_BLE_Stack_full_fw.bin takes 3600ms to respond.
_INSTALLING_FUS_GET_STATE_TIMEOUT = const(1000)
_INSTALLING_WS_GET_STATE_TIMEOUT = const(6000)
def log(msg, *args, **kwargs):
print("[rfcore update]", msg.format(*args, **kwargs))
class _Flash:
_FLASH_KEY1 = 0x45670123
_FLASH_KEY2 = 0xCDEF89AB
_FLASH_CR_STRT_MASK = 1 << 16
_FLASH_CR_LOCK_MASK = 1 << 31
_FLASH_SR_BSY_MASK = 1 << 16
def wait_not_busy(self):
while machine.mem32[stm.FLASH + stm.FLASH_SR] & _Flash._FLASH_SR_BSY_MASK:
machine.idle()
def unlock(self):
if machine.mem32[stm.FLASH + stm.FLASH_CR] & _Flash._FLASH_CR_LOCK_MASK:
# Only unlock if already locked (i.e. FLASH_CR_LOCK is set).
machine.mem32[stm.FLASH + stm.FLASH_KEYR] = _Flash._FLASH_KEY1
machine.mem32[stm.FLASH + stm.FLASH_KEYR] = _Flash._FLASH_KEY2
else:
log("Flash was already unlocked.")
def lock(self):
machine.mem32[stm.FLASH + stm.FLASH_CR] = _Flash._FLASH_CR_LOCK_MASK
def erase_page(self, page):
assert 0 <= page <= 255 # 1MiB range (4k page)
self.wait_not_busy()
cr = page << 3 | 1 << 1 # PNB # PER
machine.mem32[stm.FLASH + stm.FLASH_CR] = cr
machine.mem32[stm.FLASH + stm.FLASH_CR] = cr | _Flash._FLASH_CR_STRT_MASK
self.wait_not_busy()
machine.mem32[stm.FLASH + stm.FLASH_CR] = 0
def write(self, addr, buf, sz, key=0):
assert sz % 4 == 0
self.wait_not_busy()
cr = 1 << 0 # PG
machine.mem32[stm.FLASH + stm.FLASH_CR] = cr
off = 0
while off < sz:
v = (buf[off]) | (buf[off + 1] << 8) | (buf[off + 2] << 16) | (buf[off + 3] << 24)
machine.mem32[addr + off] = v ^ key
off += 4
if off % 8 == 0:
self.wait_not_busy()
if off % 8:
machine.mem32[addr + off] = 0
self.wait_not_busy()
machine.mem32[stm.FLASH + stm.FLASH_CR] = 0
def _copy_file_to_flash(filename, addr):
flash = _Flash()
flash.unlock()
try:
# Erase the entire staging area in flash.
erase_addr = STAGING_AREA_START
sfr_sfsa = machine.mem32[stm.FLASH + stm.FLASH_SFR] & 0xFF
erase_limit = 0x08000000 + sfr_sfsa * 4096
while erase_addr < erase_limit:
flash.erase_page((erase_addr - 0x08000000) // 4096)
erase_addr += 4096
# Write the contents of the firmware (note flash.write will apply the
# XOR de-obfuscation).
with open(filename, "rb") as f:
buf = bytearray(4096)
while 1:
sz = f.readinto(buf)
if sz == 0:
break
flash.write(addr, buf, sz, _OBFUSCATION_KEY)
addr += 4096
finally:
flash.lock()
def _parse_vendor_response(data):
assert len(data) >= 7
assert data[0] == _HCI_KIND_VENDOR_RESPONSE
assert data[1] == 0x0E
# assert data[3] == 0xff # "Num HCI" -- docs say 0xff, but we see 0x01
op = (data[5] << 8) | data[4]
return (op >> 10, op & 0x3FF, data[6], data[7] if len(data) > 7 else 0)
def _run_sys_hci_cmd(ogf, ocf, buf=b"", timeout=0):
try:
ogf_out, ocf_out, status, result = _parse_vendor_response(
stm.rfcore_sys_hci(ogf, ocf, buf, timeout)
)
except OSError:
# Timeout or FUS not active.
return (0xFF, 0xFF)
assert ogf_out == ogf
assert ocf_out == ocf
return (status, result)
def fus_get_state(timeout=0):
return _run_sys_hci_cmd(_OGF_VENDOR, _OCF_FUS_GET_STATE, timeout=timeout)
def fus_is_idle():
return fus_get_state() == (0, 0)
def fus_start_ws():
return _run_sys_hci_cmd(_OGF_VENDOR, _OCF_FUS_START_WS)
def _fus_fwdelete():
return _run_sys_hci_cmd(_OGF_VENDOR, _OCF_FUS_FW_DELETE)
def _fus_run_fwupgrade(addr):
# Note: Address is ignored by the FUS (see comments above).
return _run_sys_hci_cmd(_OGF_VENDOR, _OCF_FUS_FW_UPGRADE, struct.pack("<I", addr))
# Get/set current state/reason to RTC Backup Domain.
# Using the second- and third-last registers (17, 18) as the final one (19)
# is reserved by powerctrl.c for restoring the frequency.
# Can be overridden if necessary.
REG_RTC_STATE = stm.RTC + stm.RTC_BKP18R
REG_RTC_REASON = stm.RTC + stm.RTC_BKP17R
def _read_state():
return machine.mem32[REG_RTC_STATE]
def _write_state(state):
machine.mem32[REG_RTC_STATE] = state
def _read_failure_reason():
return machine.mem32[REG_RTC_REASON]
def _write_failure_state(reason):
machine.mem32[REG_RTC_REASON] = reason
_write_state(_STATE_FAILED)
return reason
# Check for the presence of a given file and attempt to start installing it.
def _stat_and_start_copy(path, addr, copying_state, copied_state):
try:
os.stat(path)
except OSError:
log("{} not found", path)
return False
log("{} update is available", path)
if sum(stm.rfcore_fw_version(_FW_VERSION_WS)):
# There was some WS firmware already installed. Need to remove that
# before copying to flash (both FUS or WS copy require this).
log("Removing existing WS firmware")
_write_state(_STATE_DELETING_WS)
_fus_fwdelete()
else:
log("Copying {} to flash", path)
# Mark that the flash write has started. Any failure should result in an overall failure.
_write_state(copying_state) # Either _STATE_COPYING_FUS or _STATE_COPYING_WS
_copy_file_to_flash(path, addr)
log("Copying complete")
# The entire write has completed successfully, start the install.
_write_state(copied_state) # Either _STATE_COPIED_FUS or _STATE_COPIED_WS
return True
# This should be called in boot.py to resume any in-progress update.
# If there's nothing to do, it will return 0 and the app can continue as normal.
# If a previous update has failed, then it will return the failure reason.
# Otherwise it will attempt to continue the update from where it left off.
def resume():
log("Checking firmware update progress...")
if stm.rfcore_status() == _MAGIC_IPCC_MEM_INCORRECT:
return _write_failure_state(REASON_RFCORE_NOT_CONFIGURED)
while True:
state = _read_state()
if state == _STATE_IDLE:
log("Firmware update complete")
return 0
elif state == _STATE_FAILED:
log("Firmware update failed")
return _read_failure_reason()
# Keep calling GET_STATE until error or FUS.
elif state == _STATE_WAITING_FOR_FUS:
log("Querying FUS state")
status, result = fus_get_state()
log("FUS state: {} {}", status, result)
if status == 0xFF and result == 0xFF:
_write_failure_state(REASON_FUS_NOT_RESPONDING)
elif status != 0:
log("Operation in progress. Re-querying FUS state")
elif stm.rfcore_status() == _MAGIC_FUS_ACTIVE:
log("FUS active")
_write_state(_STATE_CHECK_UPDATES)
# Keep trying to start the WS until !fus_active() (or error).
elif state == _STATE_WAITING_FOR_WS:
if stm.rfcore_status() != _MAGIC_FUS_ACTIVE:
log("WS active")
_write_state(_STATE_IDLE)
# Need to force a reset otherwise BLE will fail if FUS has changed.
machine.reset()
else:
log("Starting WS")
status, result = fus_start_ws()
if status != 0:
log("Can't start WS")
log("WS version: {}", stm.rfcore_fw_version(_FW_VERSION_WS))
_write_failure_state(REASON_NO_WS)
# Sequence the FUS 1.0.2 -> FUS 1.1.0 -> WS (depending on what's available).
elif state == _STATE_CHECK_UPDATES:
log("Checking for updates")
fus_version = stm.rfcore_fw_version(_FW_VERSION_FUS)
log("FUS version {}", fus_version)
if fus_version < _FUS_VERSION_102:
log("Factory FUS detected")
if _stat_and_start_copy(
_PATH_FUS_102, _ADDR_FUS, _STATE_COPYING_FUS, _STATE_COPIED_FUS
):
continue
elif fus_version >= _FUS_VERSION_102 and fus_version < _FUS_VERSION_110:
log("FUS 1.0.2 detected")
if _stat_and_start_copy(
_PATH_FUS_110, _ADDR_FUS, _STATE_COPYING_FUS, _STATE_COPIED_FUS
):
continue
else:
log("FUS is up-to-date")
if fus_version >= _FUS_VERSION_110:
if _stat_and_start_copy(
_PATH_WS_BLE_HCI, _ADDR_WS_BLE_HCI, _STATE_COPYING_WS, _STATE_COPIED_WS
):
continue
else:
log("No WS updates available")
else:
# Don't attempt to install WS if we're running an old FUS.
log("Need latest FUS to install WS")
# Attempt to go back to WS.
# Either this will fail (because WS was removed due to FUS install), or
# this whole thing was a no-op and we should be fine to restart WS.
_write_state(_STATE_WAITING_FOR_WS)
# This shouldn't happen - the flash write should always complete and
# move straight onto the COPIED state. Failure here indicates that
# the rfcore is misconfigured or the WS firmware was not deleted first.
elif state == _STATE_COPYING_FUS or state == _STATE_COPYING_WS:
log("Flash copy failed mid-write")
_write_failure_state(REASON_FLASH_COPY_FAILED)
# Flash write completed, we should immediately see GET_STATE return 0,0
# so we can start the FUS install.
elif state == _STATE_COPIED_FUS:
if fus_is_idle():
log("FUS copy complete, installing")
_write_state(_STATE_INSTALLING_FUS)
_fus_run_fwupgrade(_ADDR_FUS)
else:
log("FUS copy bad state")
_write_failure_state(REASON_FLASH_FUS_BAD_STATE)
# Keep polling the state until we see a 0,0 (success) or non-transient
# error. In general we should expect to see (16,0) several times,
# followed by a (255,0), followed by (0, 0).
elif state == _STATE_INSTALLING_FUS:
log("Installing FUS...")
status, result = fus_get_state(_INSTALLING_FUS_GET_STATE_TIMEOUT)
log("FUS state: {} {}", status, result)
if 0x20 <= status <= 0x2F and result == 0:
# FUS_STATE_FUS_UPGRD_ONGOING
log("FUS still in progress...")
elif 0x10 <= status <= 0x1F and result == 0x11:
# FUS_STATE_FW_UPGRD_ONGOING and FUS_FW_ROLLBACK_ERROR
# Confusingly this is a "FW_UPGRD" (0x10) not "FUS_UPRD" (0x20).
log("Attempted to install same FUS version... re-querying FUS state to resume.")
elif status == 0:
log("FUS update successful")
_write_state(_STATE_CHECK_UPDATES)
elif result == 0:
# See below (for equivalent path for WS install -- we
# sometimes see (255,0) right at the end).
log("Re-querying FUS state...")
elif result == 0xFF:
_write_failure_state(REASON_FUS_NOT_RESPONDING_AFTER_FUS)
else:
_write_failure_state(REASON_FUS_VENDOR + result)
# Keep polling the state until we see 0,0 or failure (1,0). Any other
# result means retry (but the docs say that 0 and 1 are the only
# status values).
elif state == _STATE_DELETING_WS:
log("Deleting WS...")
status, result = fus_get_state()
log("FUS state: {} {}", status, result)
if status == 0:
if sum(stm.rfcore_fw_version(_FW_VERSION_WS)) == 0:
log("WS deletion complete")
_write_state(_STATE_CHECK_UPDATES)
else:
log("WS deletion no effect")
_write_failure_state(REASON_WS_STILL_PRESENT)
elif status == 1:
log("WS deletion failed")
_write_failure_state(REASON_WS_DELETION_FAILED)
# As for _STATE_COPIED_FUS above. We should immediately see 0,0.
elif state == _STATE_COPIED_WS:
if fus_is_idle():
log("WS copy complete, installing")
_write_state(_STATE_INSTALLING_WS)
_fus_run_fwupgrade(_ADDR_WS_BLE_HCI)
else:
log("WS copy bad state")
_write_failure_state(REASON_FLASH_WS_BAD_STATE)
# As for _STATE_INSTALLING_FUS above.
elif state == _STATE_INSTALLING_WS:
log("Installing WS...")
status, result = fus_get_state(_INSTALLING_WS_GET_STATE_TIMEOUT)
log("FUS state: {} {}", status, result)
if 0x10 <= status <= 0x1F and result == 0:
# FUS_STATE_FW_UPGRD_ONGOING
log("WS still in progress...")
elif 0x10 <= status <= 0x1F and result == 0x11:
# FUS_FW_ROLLBACK_ERROR
log("Attempted to install same WS version... re-querying FUS state to resume.")
elif status == 0:
log("WS update successful")
_write_state(_STATE_WAITING_FOR_WS)
elif result == 0:
# We get a error response with no payload sometimes at the end
# of the update (this is not in AN5185). Re-try the GET_STATE.
# The same thing happens transitioning from WS to FUS mode.
# The actual HCI response has no payload, the result=0 comes from
# _parse_vendor_response above when len=7.
log("Re-querying FUS state...")
elif result == 0xFF:
# This is specifically a failure sending the HCI command.
_write_failure_state(REASON_FUS_NOT_RESPONDING_AFTER_WS)
else:
_write_failure_state(REASON_WS_VENDOR + result)
# Start a firmware update.
# This will immediately trigger a reset and start the update process on boot.
def check_for_updates():
log("Starting firmware update")
_write_state(_STATE_WAITING_FOR_FUS)
machine.reset()
|
mit
|
duncanmmacleod/gwpy
|
gwpy/timeseries/io/ascii.py
|
3
|
1185
|
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2017-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""ASCII I/O registrations for gwpy.timeseries objects
"""
from ...types.io.ascii import register_ascii_series_io
from .. import (TimeSeries, StateVector)
# -- registration -------------------------------------------------------------
register_ascii_series_io(TimeSeries, format='txt')
register_ascii_series_io(TimeSeries, format='csv', delimiter=',')
register_ascii_series_io(StateVector, format='txt')
register_ascii_series_io(StateVector, format='csv', delimiter=',')
|
gpl-3.0
|
ach4m0/f1experiencealloy
|
plugins/ti.alloy/plugin.py
|
1729
|
5251
|
import os, sys, subprocess, hashlib
import subprocess
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
>>> check_output(['/usr/bin/python', '--version'])
Python 2.6.2
"""
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
def compile(config):
paths = {}
binaries = ["alloy","node"]
dotAlloy = os.path.abspath(os.path.join(config['project_dir'], 'build', '.alloynewcli'))
if os.path.exists(dotAlloy):
print "[DEBUG] build/.alloynewcli file found, skipping plugin..."
os.remove(dotAlloy)
else:
for binary in binaries:
try:
# see if the environment variable is defined
paths[binary] = os.environ["ALLOY_" + ("NODE_" if binary == "node" else "") + "PATH"]
except KeyError as ex:
# next try PATH, and then our guess paths
if sys.platform == "darwin" or sys.platform.startswith('linux'):
userPath = os.environ["HOME"]
guessPaths = [
"/usr/local/bin/"+binary,
"/opt/local/bin/"+binary,
userPath+"/local/bin/"+binary,
"/opt/bin/"+binary,
"/usr/bin/"+binary,
"/usr/local/share/npm/bin/"+binary
]
try:
binaryPath = check_output(["which",binary], stderr=subprocess.STDOUT).strip()
print "[DEBUG] %s installed at '%s'" % (binary,binaryPath)
except:
print "[WARN] Couldn't find %s on your PATH:" % binary
print "[WARN] %s" % os.environ["PATH"]
print "[WARN]"
print "[WARN] Checking for %s in a few default locations:" % binary
for p in guessPaths:
sys.stdout.write("[WARN] %s -> " % p)
if os.path.exists(p):
binaryPath = p
print "FOUND"
break
else:
print "not found"
binaryPath = None
if binaryPath is None:
print "[ERROR] Couldn't find %s" % binary
sys.exit(1)
else:
paths[binary] = binaryPath
# no guesses on windows, just use the PATH
elif sys.platform == "win32":
paths["alloy"] = "alloy.cmd"
f = os.path.abspath(os.path.join(config['project_dir'], 'app'))
if os.path.exists(f):
print "[INFO] alloy app found at %s" % f
rd = os.path.abspath(os.path.join(config['project_dir'], 'Resources'))
devicefamily = 'none'
simtype = 'none'
version = '0'
deploytype = 'development'
if config['platform']==u'ios':
version = config['iphone_version']
devicefamily = config['devicefamily']
deploytype = config['deploytype']
if config['platform']==u'android':
builder = config['android_builder']
version = builder.tool_api_level
deploytype = config['deploy_type']
if config['platform']==u'mobileweb':
builder = config['mobileweb_builder']
deploytype = config['deploytype']
cfg = "platform=%s,version=%s,simtype=%s,devicefamily=%s,deploytype=%s," % (config['platform'],version,simtype,devicefamily,deploytype)
if sys.platform == "win32":
cmd = [paths["alloy"], "compile", f, "--no-colors", "--config", cfg]
else:
cmd = [paths["node"], paths["alloy"], "compile", f, "--no-colors", "--config", cfg]
print "[INFO] Executing Alloy compile:"
print "[INFO] %s" % " ".join(cmd)
try:
print check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
if hasattr(ex, 'output'):
print ex.output
print "[ERROR] Alloy compile failed"
retcode = 1
if hasattr(ex, 'returncode'):
retcode = ex.returncode
sys.exit(retcode)
except EnvironmentError as ex:
print "[ERROR] Unexpected error with Alloy compiler plugin: %s" % ex.strerror
sys.exit(2)
|
apache-2.0
|
yvaucher/stock-logistics-tracking
|
__unported__/stock_tracking_add_move/wizard/__init__.py
|
4
|
1061
|
# -*- coding: utf-8 -*-
#################################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Julius Network Solutions SARL <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
import add_move
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
mattupstate/overholt
|
overholt/users/models.py
|
9
|
1646
|
# -*- coding: utf-8 -*-
"""
overholt.users.models
~~~~~~~~~~~~~~~~~~~~~
User models
"""
from flask_security import UserMixin, RoleMixin
from ..core import db
from ..helpers import JsonSerializer
roles_users = db.Table(
'roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('users.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('roles.id')))
class Role(RoleMixin, db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
def __eq__(self, other):
return (self.name == other or
self.name == getattr(other, 'name', None))
def __ne__(self, other):
return (self.name != other and
self.name != getattr(other, 'name', None))
class UserJsonSerializer(JsonSerializer):
__json_public__ = ['id', 'email']
class User(UserJsonSerializer, UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), unique=True)
password = db.Column(db.String(120))
active = db.Column(db.Boolean())
confirmed_at = db.Column(db.DateTime())
last_login_at = db.Column(db.DateTime())
current_login_at = db.Column(db.DateTime())
last_login_ip = db.Column(db.String(100))
current_login_ip = db.Column(db.String(100))
login_count = db.Column(db.Integer)
registered_at = db.Column(db.DateTime())
roles = db.relationship('Role', secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
|
mit
|
beck/django
|
tests/model_meta/test_legacy.py
|
199
|
7556
|
import warnings
from django import test
from django.contrib.contenttypes.fields import GenericRelation
from django.core.exceptions import FieldDoesNotExist
from django.db.models.fields import CharField, related
from django.utils.deprecation import RemovedInDjango110Warning
from .models import BasePerson, Person
from .results import TEST_RESULTS
class OptionsBaseTests(test.SimpleTestCase):
def _map_related_query_names(self, res):
return tuple((o.field.related_query_name(), m) for o, m in res)
def _map_names(self, res):
return tuple((f.name, m) for f, m in res)
class M2MTests(OptionsBaseTests):
def test_many_to_many_with_model(self):
for model, expected_result in TEST_RESULTS['many_to_many_with_model'].items():
with warnings.catch_warnings(record=True) as warning:
warnings.simplefilter("always")
models = [model for field, model in model._meta.get_m2m_with_model()]
self.assertEqual([RemovedInDjango110Warning], [w.message.__class__ for w in warning])
self.assertEqual(models, expected_result)
@test.ignore_warnings(category=RemovedInDjango110Warning)
class RelatedObjectsTests(OptionsBaseTests):
key_name = lambda self, r: r[0]
def test_related_objects(self):
result_key = 'get_all_related_objects_with_model_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model()
self.assertEqual(self._map_related_query_names(objects), expected)
def test_related_objects_local(self):
result_key = 'get_all_related_objects_with_model_local_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(local_only=True)
self.assertEqual(self._map_related_query_names(objects), expected)
def test_related_objects_include_hidden(self):
result_key = 'get_all_related_objects_with_model_hidden_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(include_hidden=True)
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
def test_related_objects_include_hidden_local_only(self):
result_key = 'get_all_related_objects_with_model_hidden_local_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(
include_hidden=True, local_only=True)
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
def test_related_objects_proxy(self):
result_key = 'get_all_related_objects_with_model_proxy_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(
include_proxy_eq=True)
self.assertEqual(self._map_related_query_names(objects), expected)
def test_related_objects_proxy_hidden(self):
result_key = 'get_all_related_objects_with_model_proxy_hidden_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(
include_proxy_eq=True, include_hidden=True)
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
@test.ignore_warnings(category=RemovedInDjango110Warning)
class RelatedM2MTests(OptionsBaseTests):
def test_related_m2m_with_model(self):
result_key = 'get_all_related_many_to_many_with_model_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_m2m_objects_with_model()
self.assertEqual(self._map_related_query_names(objects), expected)
def test_related_m2m_local_only(self):
result_key = 'get_all_related_many_to_many_local_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_many_to_many_objects(local_only=True)
self.assertEqual([o.field.related_query_name() for o in objects], expected)
def test_related_m2m_asymmetrical(self):
m2m = Person._meta.many_to_many
self.assertTrue('following_base' in [f.attname for f in m2m])
related_m2m = Person._meta.get_all_related_many_to_many_objects()
self.assertTrue('followers_base' in [o.field.related_query_name() for o in related_m2m])
def test_related_m2m_symmetrical(self):
m2m = Person._meta.many_to_many
self.assertTrue('friends_base' in [f.attname for f in m2m])
related_m2m = Person._meta.get_all_related_many_to_many_objects()
self.assertIn('friends_inherited_rel_+', [o.field.related_query_name() for o in related_m2m])
@test.ignore_warnings(category=RemovedInDjango110Warning)
class GetFieldByNameTests(OptionsBaseTests):
def test_get_data_field(self):
field_info = Person._meta.get_field_by_name('data_abstract')
self.assertEqual(field_info[1:], (BasePerson, True, False))
self.assertIsInstance(field_info[0], CharField)
def test_get_m2m_field(self):
field_info = Person._meta.get_field_by_name('m2m_base')
self.assertEqual(field_info[1:], (BasePerson, True, True))
self.assertIsInstance(field_info[0], related.ManyToManyField)
def test_get_related_object(self):
field_info = Person._meta.get_field_by_name('relating_baseperson')
self.assertEqual(field_info[1:], (BasePerson, False, False))
self.assertTrue(field_info[0].auto_created)
def test_get_related_m2m(self):
field_info = Person._meta.get_field_by_name('relating_people')
self.assertEqual(field_info[1:], (None, False, True))
self.assertTrue(field_info[0].auto_created)
def test_get_generic_relation(self):
field_info = Person._meta.get_field_by_name('generic_relation_base')
self.assertEqual(field_info[1:], (None, True, False))
self.assertIsInstance(field_info[0], GenericRelation)
def test_get_m2m_field_invalid(self):
with warnings.catch_warnings(record=True) as warning:
warnings.simplefilter("always")
self.assertRaises(
FieldDoesNotExist,
Person._meta.get_field,
**{'field_name': 'm2m_base', 'many_to_many': False}
)
self.assertEqual(Person._meta.get_field('m2m_base', many_to_many=True).name, 'm2m_base')
# 2 RemovedInDjango110Warning messages should be raised, one for each call of get_field()
# with the 'many_to_many' argument.
self.assertEqual(
[RemovedInDjango110Warning, RemovedInDjango110Warning],
[w.message.__class__ for w in warning]
)
@test.ignore_warnings(category=RemovedInDjango110Warning)
class GetAllFieldNamesTestCase(OptionsBaseTests):
def test_get_all_field_names(self):
for model, expected_names in TEST_RESULTS['get_all_field_names'].items():
objects = model._meta.get_all_field_names()
self.assertEqual(sorted(map(str, objects)), sorted(expected_names))
|
bsd-3-clause
|
colinligertwood/odoo
|
openerp/tools/yaml_tag.py
|
105
|
6204
|
import yaml
import logging
class YamlTag(object):
"""
Superclass for constructors of custom tags defined in yaml file.
__str__ is overriden in subclass and used for serialization in module recorder.
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __getitem__(self, key):
return getattr(self, key)
def __getattr__(self, attr):
return None
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, sorted(self.__dict__.items()))
class Assert(YamlTag):
def __init__(self, model, id=None, severity=logging.WARNING, string="NONAME", **kwargs):
self.model = model
self.id = id
self.severity = severity
self.string = string
super(Assert, self).__init__(**kwargs)
class Record(YamlTag):
def __init__(self, model, id, use='id', view=True, **kwargs):
self.model = model
self.id = id
self.view = view
super(Record, self).__init__(**kwargs)
def __str__(self):
return '!record {model: %s, id: %s}:' % (str(self.model,), str(self.id,))
class Python(YamlTag):
def __init__(self, model, severity=logging.ERROR, name="", **kwargs):
self.model= model
self.severity = severity
self.name = name
super(Python, self).__init__(**kwargs)
def __str__(self):
return '!python {model: %s}: |' % (str(self.model), )
class Menuitem(YamlTag):
def __init__(self, id, name, **kwargs):
self.id = id
self.name = name
super(Menuitem, self).__init__(**kwargs)
class Workflow(YamlTag):
def __init__(self, model, action, ref=None, **kwargs):
self.model = model
self.action = action
self.ref = ref
super(Workflow, self).__init__(**kwargs)
def __str__(self):
return '!workflow {model: %s, action: %s, ref: %s}' % (str(self.model,), str(self.action,), str(self.ref,))
class ActWindow(YamlTag):
def __init__(self, **kwargs):
super(ActWindow, self).__init__(**kwargs)
class Function(YamlTag):
def __init__(self, model, name, **kwargs):
self.model = model
self.name = name
super(Function, self).__init__(**kwargs)
class Report(YamlTag):
def __init__(self, model, name, string, **kwargs):
self.model = model
self.name = name
self.string = string
super(Report, self).__init__(**kwargs)
class Delete(YamlTag):
def __init__(self, **kwargs):
super(Delete, self).__init__(**kwargs)
class Context(YamlTag):
def __init__(self, **kwargs):
super(Context, self).__init__(**kwargs)
class Url(YamlTag):
def __init__(self, **kwargs):
super(Url, self).__init__(**kwargs)
class Eval(YamlTag):
def __init__(self, expression):
self.expression = expression
super(Eval, self).__init__()
def __str__(self):
return '!eval %s' % str(self.expression)
class Ref(YamlTag):
def __init__(self, expr="False", *args, **kwargs):
self.expr = expr
super(Ref, self).__init__(*args, **kwargs)
def __str__(self):
return 'ref(%s)' % repr(self.expr)
class IrSet(YamlTag):
def __init__(self):
super(IrSet, self).__init__()
def assert_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Assert(**kwargs)
def record_constructor(loader, node):
kwargs = loader.construct_mapping(node)
assert "model" in kwargs, "'model' argument is required for !record"
assert "id" in kwargs, "'id' argument is required for !record"
return Record(**kwargs)
def python_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Python(**kwargs)
def menuitem_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Menuitem(**kwargs)
def workflow_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Workflow(**kwargs)
def act_window_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return ActWindow(**kwargs)
def function_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Function(**kwargs)
def report_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Report(**kwargs)
def delete_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Delete(**kwargs)
def context_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Context(**kwargs)
def url_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return Url(**kwargs)
def eval_constructor(loader, node):
expression = loader.construct_scalar(node)
return Eval(expression)
def ref_constructor(loader, tag_suffix, node):
if tag_suffix == "id":
kwargs = {"id": loader.construct_scalar(node)}
else:
kwargs = loader.construct_mapping(node)
return Ref(**kwargs)
def ir_set_constructor(loader, node):
kwargs = loader.construct_mapping(node)
return IrSet(**kwargs)
# Registers constructors for custom tags.
# Constructors are actually defined globally: do not redefined them in another
# class/file/package. This means that module recorder need import this file.
def add_constructors():
yaml.add_constructor(u"!assert", assert_constructor)
yaml.add_constructor(u"!record", record_constructor)
yaml.add_constructor(u"!python", python_constructor)
yaml.add_constructor(u"!menuitem", menuitem_constructor)
yaml.add_constructor(u"!workflow", workflow_constructor)
yaml.add_constructor(u"!act_window", act_window_constructor)
yaml.add_constructor(u"!function", function_constructor)
yaml.add_constructor(u"!report", report_constructor)
yaml.add_constructor(u"!context", context_constructor)
yaml.add_constructor(u"!delete", delete_constructor)
yaml.add_constructor(u"!url", url_constructor)
yaml.add_constructor(u"!eval", eval_constructor)
yaml.add_multi_constructor(u"!ref", ref_constructor)
yaml.add_constructor(u"!ir_set", ir_set_constructor)
add_constructors()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
mhei/linux
|
scripts/gdb/linux/symbols.py
|
467
|
6343
|
#
# gdb helper commands and functions for Linux kernel debugging
#
# load kernel and module symbols
#
# Copyright (c) Siemens AG, 2011-2013
#
# Authors:
# Jan Kiszka <[email protected]>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
import os
import re
from linux import modules
if hasattr(gdb, 'Breakpoint'):
class LoadModuleBreakpoint(gdb.Breakpoint):
def __init__(self, spec, gdb_command):
super(LoadModuleBreakpoint, self).__init__(spec, internal=True)
self.silent = True
self.gdb_command = gdb_command
def stop(self):
module = gdb.parse_and_eval("mod")
module_name = module['name'].string()
cmd = self.gdb_command
# enforce update if object file is not found
cmd.module_files_updated = False
# Disable pagination while reporting symbol (re-)loading.
# The console input is blocked in this context so that we would
# get stuck waiting for the user to acknowledge paged output.
show_pagination = gdb.execute("show pagination", to_string=True)
pagination = show_pagination.endswith("on.\n")
gdb.execute("set pagination off")
if module_name in cmd.loaded_modules:
gdb.write("refreshing all symbols to reload module "
"'{0}'\n".format(module_name))
cmd.load_all_symbols()
else:
cmd.load_module_symbols(module)
# restore pagination state
gdb.execute("set pagination %s" % ("on" if pagination else "off"))
return False
class LxSymbols(gdb.Command):
"""(Re-)load symbols of Linux kernel and currently loaded modules.
The kernel (vmlinux) is taken from the current working directly. Modules (.ko)
are scanned recursively, starting in the same directory. Optionally, the module
search path can be extended by a space separated list of paths passed to the
lx-symbols command."""
module_paths = []
module_files = []
module_files_updated = False
loaded_modules = []
breakpoint = None
def __init__(self):
super(LxSymbols, self).__init__("lx-symbols", gdb.COMMAND_FILES,
gdb.COMPLETE_FILENAME)
def _update_module_files(self):
self.module_files = []
for path in self.module_paths:
gdb.write("scanning for modules in {0}\n".format(path))
for root, dirs, files in os.walk(path):
for name in files:
if name.endswith(".ko"):
self.module_files.append(root + "/" + name)
self.module_files_updated = True
def _get_module_file(self, module_name):
module_pattern = ".*/{0}\.ko$".format(
module_name.replace("_", r"[_\-]"))
for name in self.module_files:
if re.match(module_pattern, name) and os.path.exists(name):
return name
return None
def _section_arguments(self, module):
try:
sect_attrs = module['sect_attrs'].dereference()
except gdb.error:
return ""
attrs = sect_attrs['attrs']
section_name_to_address = {
attrs[n]['name'].string(): attrs[n]['address']
for n in range(int(sect_attrs['nsections']))}
args = []
for section_name in [".data", ".data..read_mostly", ".rodata", ".bss"]:
address = section_name_to_address.get(section_name)
if address:
args.append(" -s {name} {addr}".format(
name=section_name, addr=str(address)))
return "".join(args)
def load_module_symbols(self, module):
module_name = module['name'].string()
module_addr = str(module['core_layout']['base']).split()[0]
module_file = self._get_module_file(module_name)
if not module_file and not self.module_files_updated:
self._update_module_files()
module_file = self._get_module_file(module_name)
if module_file:
gdb.write("loading @{addr}: {filename}\n".format(
addr=module_addr, filename=module_file))
cmdline = "add-symbol-file {filename} {addr}{sections}".format(
filename=module_file,
addr=module_addr,
sections=self._section_arguments(module))
gdb.execute(cmdline, to_string=True)
if module_name not in self.loaded_modules:
self.loaded_modules.append(module_name)
else:
gdb.write("no module object found for '{0}'\n".format(module_name))
def load_all_symbols(self):
gdb.write("loading vmlinux\n")
# Dropping symbols will disable all breakpoints. So save their states
# and restore them afterward.
saved_states = []
if hasattr(gdb, 'breakpoints') and not gdb.breakpoints() is None:
for bp in gdb.breakpoints():
saved_states.append({'breakpoint': bp, 'enabled': bp.enabled})
# drop all current symbols and reload vmlinux
gdb.execute("symbol-file", to_string=True)
gdb.execute("symbol-file vmlinux")
self.loaded_modules = []
module_list = modules.module_list()
if not module_list:
gdb.write("no modules found\n")
else:
[self.load_module_symbols(module) for module in module_list]
for saved_state in saved_states:
saved_state['breakpoint'].enabled = saved_state['enabled']
def invoke(self, arg, from_tty):
self.module_paths = [os.path.expanduser(p) for p in arg.split()]
self.module_paths.append(os.getcwd())
# enforce update
self.module_files = []
self.module_files_updated = False
self.load_all_symbols()
if hasattr(gdb, 'Breakpoint'):
if self.breakpoint is not None:
self.breakpoint.delete()
self.breakpoint = None
self.breakpoint = LoadModuleBreakpoint(
"kernel/module.c:do_init_module", self)
else:
gdb.write("Note: symbol update on module loading not supported "
"with this gdb version\n")
LxSymbols()
|
gpl-2.0
|
mabelcalim/tide-app
|
kivy/test8/.buildozer/venv/lib/python2.7/site-packages/pip/_vendor/distlib/locators.py
|
191
|
46946
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import gzip
from io import BytesIO
import json
import logging
import os
import posixpath
import re
try:
import threading
except ImportError:
import dummy_threading as threading
import zlib
from . import DistlibException
from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
queue, quote, unescape, string_types, build_opener,
HTTPRedirectHandler as BaseRedirectHandler,
Request, HTTPError, URLError)
from .database import Distribution, DistributionPath, make_dist
from .metadata import Metadata
from .util import (cached_property, parse_credentials, ensure_slash,
split_filename, get_project_data, parse_requirement,
parse_name_and_version, ServerProxy)
from .version import get_scheme, UnsupportedVersionError
from .wheel import Wheel, is_compatible
logger = logging.getLogger(__name__)
HASHER_HASH = re.compile('^(\w+)=([a-f0-9]+)')
CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
DEFAULT_INDEX = 'http://python.org/pypi'
def get_all_distribution_names(url=None):
"""
Return all distribution names known by an index.
:param url: The URL of the index.
:return: A list of all known distribution names.
"""
if url is None:
url = DEFAULT_INDEX
client = ServerProxy(url, timeout=3.0)
return client.list_packages()
class RedirectHandler(BaseRedirectHandler):
"""
A class to work around a bug in some Python 3.2.x releases.
"""
# There's a bug in the base version for some 3.2.x
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
# returns e.g. /abc, it bails because it says the scheme ''
# is bogus, when actually it should use the request's
# URL for the scheme. See Python issue #13696.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
newurl = None
for key in ('location', 'uri'):
if key in headers:
newurl = headers[key]
break
if newurl is None:
return
urlparts = urlparse(newurl)
if urlparts.scheme == '':
newurl = urljoin(req.get_full_url(), newurl)
if hasattr(headers, 'replace_header'):
headers.replace_header(key, newurl)
else:
headers[key] = newurl
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
class Locator(object):
"""
A base class for locators - things that locate distributions.
"""
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
binary_extensions = ('.egg', '.exe', '.whl')
excluded_extensions = ('.pdf',)
# A list of tags indicating which wheels you want to match. The default
# value of None matches against the tags compatible with the running
# Python. If you want to match other values, set wheel_tags on a locator
# instance to a list of tuples (pyver, abi, arch) which you want to match.
wheel_tags = None
downloadable_extensions = source_extensions + ('.whl',)
def __init__(self, scheme='default'):
"""
Initialise an instance.
:param scheme: Because locators look for most recent versions, they
need to know the version scheme to use. This specifies
the current PEP-recommended scheme - use ``'legacy'``
if you need to support existing distributions on PyPI.
"""
self._cache = {}
self.scheme = scheme
# Because of bugs in some of the handlers on some of the platforms,
# we use our own opener rather than just using urlopen.
self.opener = build_opener(RedirectHandler())
# If get_project() is called from locate(), the matcher instance
# is set from the requirement passed to locate(). See issue #18 for
# why this can be useful to know.
self.matcher = None
def clear_cache(self):
self._cache.clear()
def _get_scheme(self):
return self._scheme
def _set_scheme(self, value):
self._scheme = value
scheme = property(_get_scheme, _set_scheme)
def _get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This should be implemented in subclasses.
If called from a locate() request, self.matcher will be set to a
matcher for the requirement to satisfy, otherwise it will be None.
"""
raise NotImplementedError('Please implement in the subclass')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Please implement in the subclass')
def get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This calls _get_project to do all the work, and just implements a caching layer on top.
"""
if self._cache is None:
result = self._get_project(name)
elif name in self._cache:
result = self._cache[name]
else:
result = self._get_project(name)
self._cache[name] = result
return result
def score_url(self, url):
"""
Give an url a score which can be used to choose preferred URLs
for a given project release.
"""
t = urlparse(url)
return (t.scheme != 'https', 'pypi.python.org' in t.netloc,
posixpath.basename(t.path))
def prefer_url(self, url1, url2):
"""
Choose one of two URLs where both are candidates for distribution
archives for the same version of a distribution (for example,
.tar.gz vs. zip).
The current implement favours http:// URLs over https://, archives
from PyPI over those from other locations and then the archive name.
"""
result = url2
if url1:
s1 = self.score_url(url1)
s2 = self.score_url(url2)
if s1 > s2:
result = url1
if result != url2:
logger.debug('Not replacing %r with %r', url1, url2)
else:
logger.debug('Replacing %r with %r', url1, url2)
return result
def split_filename(self, filename, project_name):
"""
Attempt to split a filename in project name, version and Python version.
"""
return split_filename(filename, project_name)
def convert_url_to_download_info(self, url, project_name):
"""
See if a URL is a candidate for a download URL for a project (the URL
has typically been scraped from an HTML page).
If it is, a dictionary is returned with keys "name", "version",
"filename" and "url"; otherwise, None is returned.
"""
def same_project(name1, name2):
name1, name2 = name1.lower(), name2.lower()
if name1 == name2:
result = True
else:
# distribute replaces '-' by '_' in project names, so it
# can tell where the version starts in a filename.
result = name1.replace('_', '-') == name2.replace('_', '-')
return result
result = None
scheme, netloc, path, params, query, frag = urlparse(url)
if frag.lower().startswith('egg='):
logger.debug('%s: version hint in fragment: %r',
project_name, frag)
m = HASHER_HASH.match(frag)
if m:
algo, digest = m.groups()
else:
algo, digest = None, None
origpath = path
if path and path[-1] == '/':
path = path[:-1]
if path.endswith('.whl'):
try:
wheel = Wheel(path)
if is_compatible(wheel, self.wheel_tags):
if project_name is None:
include = True
else:
include = same_project(wheel.name, project_name)
if include:
result = {
'name': wheel.name,
'version': wheel.version,
'filename': wheel.filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
'python-version': ', '.join(
['.'.join(list(v[2:])) for v in wheel.pyver]),
}
except Exception as e:
logger.warning('invalid path for wheel: %s', path)
elif path.endswith(self.downloadable_extensions):
path = filename = posixpath.basename(path)
for ext in self.downloadable_extensions:
if path.endswith(ext):
path = path[:-len(ext)]
t = self.split_filename(path, project_name)
if not t:
logger.debug('No match for project/version: %s', path)
else:
name, version, pyver = t
if not project_name or same_project(project_name, name):
result = {
'name': name,
'version': version,
'filename': filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
#'packagetype': 'sdist',
}
if pyver:
result['python-version'] = pyver
break
if result and algo:
result['%s_digest' % algo] = digest
return result
def _get_digest(self, info):
"""
Get a digest from a dictionary by looking at keys of the form
'algo_digest'.
Returns a 2-tuple (algo, digest) if found, else None. Currently
looks only for SHA256, then MD5.
"""
result = None
for algo in ('sha256', 'md5'):
key = '%s_digest' % algo
if key in info:
result = (algo, info[key])
break
return result
def _update_version_data(self, result, info):
"""
Update a result dictionary (the final result from _get_project) with a dictionary for a
specific version, whih typically holds information gleaned from a filename or URL for an
archive for the distribution.
"""
name = info.pop('name')
version = info.pop('version')
if version in result:
dist = result[version]
md = dist.metadata
else:
dist = make_dist(name, version, scheme=self.scheme)
md = dist.metadata
dist.digest = self._get_digest(info)
if md.source_url != info['url']:
md.source_url = self.prefer_url(md.source_url, info['url'])
dist.locator = self
result[version] = dist
def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None:
raise DistlibException('Not a valid requirement: %r' % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if versions:
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
try:
if not matcher.match(k):
logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
else:
logger.debug('skipping pre-release '
'version %s of %s', k, matcher.name)
except Exception:
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
result = versions[slist[-1]]
if result and r.extras:
result.extras = r.extras
self.matcher = None
return result
class PyPIRPCLocator(Locator):
"""
This locator uses XML-RPC to locate distributions. It therefore
cannot be used with simple mirrors (that only mirror file content).
"""
def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
return set(self.client.list_packages())
def _get_project(self, name):
result = {}
versions = self.client.package_releases(name, True)
for v in versions:
urls = self.client.release_urls(name, v)
data = self.client.release_data(name, v)
metadata = Metadata(scheme=self.scheme)
metadata.name = data['name']
metadata.version = data['version']
metadata.license = data.get('license')
metadata.keywords = data.get('keywords', [])
metadata.summary = data.get('summary')
dist = Distribution(metadata)
if urls:
info = urls[0]
metadata.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[v] = dist
return result
class PyPIJSONLocator(Locator):
"""
This locator uses PyPI's JSON interface. It's very limited in functionality
nad probably not worth using.
"""
def __init__(self, url, **kwargs):
super(PyPIJSONLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {}
url = urljoin(self.base_url, '%s/json' % quote(name))
try:
resp = self.opener.open(url)
data = resp.read().decode() # for now
d = json.loads(data)
md = Metadata(scheme=self.scheme)
data = d['info']
md.name = data['name']
md.version = data['version']
md.license = data.get('license')
md.keywords = data.get('keywords', [])
md.summary = data.get('summary')
dist = Distribution(md)
urls = d['urls']
if urls:
info = urls[0]
md.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[md.version] = dist
except Exception as e:
logger.exception('JSON fetch failed: %s', e)
return result
class Page(object):
"""
This class represents a scraped HTML page.
"""
# The following slightly hairy-looking regex just looks for the contents of
# an anchor link, which has an attribute "href" either immediately preceded
# or immediately followed by a "rel" attribute. The attribute values can be
# declared with double quotes, single quotes or no quotes - which leads to
# the length of the expression.
_href = re.compile("""
(rel\s*=\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\s\n]*))\s+)?
href\s*=\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\s\n]*))
(\s+rel\s*=\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\s\n]*)))?
""", re.I | re.S | re.X)
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
@cached_property
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path),
params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result
class SimpleScrapingLocator(Locator):
"""
A locator which scrapes HTML pages to locate downloads for a distribution.
This runs multiple threads to do the I/O; performance is at least as good
as pip's PackageFinder, which works in an analogous fashion.
"""
# These are used to deal with various Content-Encoding schemes.
decoders = {
'deflate': zlib.decompress,
'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(),
'none': lambda b: b,
}
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
"""
Initialise an instance.
:param url: The root URL to use for scraping.
:param timeout: The timeout, in seconds, to be applied to requests.
This defaults to ``None`` (no timeout specified).
:param num_workers: The number of worker threads you want to do I/O,
This defaults to 10.
:param kwargs: Passed to the superclass.
"""
super(SimpleScrapingLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
self.timeout = timeout
self._page_cache = {}
self._seen = set()
self._to_fetch = queue.Queue()
self._bad_hosts = set()
self.skip_externals = False
self.num_workers = num_workers
self._lock = threading.RLock()
# See issue #45: we need to be resilient when the locator is used
# in a thread, e.g. with concurrent.futures. We can't use self._lock
# as it is for coordinating our internal threads - the ones created
# in _prepare_threads.
self._gplock = threading.RLock()
def _prepare_threads(self):
"""
Threads are created only when get_project is called, and terminate
before it returns. They are there primarily to parallelise I/O (i.e.
fetching web pages).
"""
self._threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self._fetch)
t.setDaemon(True)
t.start()
self._threads.append(t)
def _wait_threads(self):
"""
Tell all the threads to terminate (by sending a sentinel value) and
wait for them to do so.
"""
# Note that you need two loops, since you can't say which
# thread will get each sentinel
for t in self._threads:
self._to_fetch.put(None) # sentinel
for t in self._threads:
t.join()
self._threads = []
def _get_project(self, name):
result = {}
with self._gplock:
self.result = result
self.project_name = name
url = urljoin(self.base_url, '%s/' % quote(name))
self._seen.clear()
self._page_cache.clear()
self._prepare_threads()
try:
logger.debug('Queueing %s', url)
self._to_fetch.put(url)
self._to_fetch.join()
finally:
self._wait_threads()
del self.result
return result
platform_dependent = re.compile(r'\b(linux-(i\d86|x86_64|arm\w+)|'
r'win(32|-amd64)|macosx-?\d+)\b', re.I)
def _is_platform_dependent(self, url):
"""
Does an URL refer to a platform-specific download?
"""
return self.platform_dependent.search(url)
def _process_download(self, url):
"""
See if an URL is a suitable download for a project.
If it is, register information in the result dictionary (for
_get_project) about the specific version it's for.
Note that the return value isn't actually used other than as a boolean
value.
"""
if self._is_platform_dependent(url):
info = None
else:
info = self.convert_url_to_download_info(url, self.project_name)
logger.debug('process_download: %s -> %s', url, info)
if info:
with self._lock: # needed because self.result is shared
self._update_version_data(self.result, info)
return info
def _should_queue(self, link, referrer, rel):
"""
Determine whether a link URL from a referring page and with a
particular "rel" attribute should be queued for scraping.
"""
scheme, netloc, path, _, _, _ = urlparse(link)
if path.endswith(self.source_extensions + self.binary_extensions +
self.excluded_extensions):
result = False
elif self.skip_externals and not link.startswith(self.base_url):
result = False
elif not referrer.startswith(self.base_url):
result = False
elif rel not in ('homepage', 'download'):
result = False
elif scheme not in ('http', 'https', 'ftp'):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(':', 1)[0]
if host.lower() == 'localhost':
result = False
else:
result = True
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
referrer, result)
return result
def _fetch(self):
"""
Get a URL to fetch from the work queue, get the HTML page, examine its
links for download candidates and candidates for further scraping.
This is a handy method to run in a thread.
"""
while True:
url = self._to_fetch.get()
try:
if url:
page = self.get_page(url)
if page is None: # e.g. after an error
continue
for link, rel in page.links:
if link not in self._seen:
self._seen.add(link)
if (not self._process_download(link) and
self._should_queue(link, url, rel)):
logger.debug('Queueing %s from %s', link, url)
self._to_fetch.put(link)
finally:
# always do this, to avoid hangs :-)
self._to_fetch.task_done()
if not url:
#logger.debug('Sentinel seen, quitting.')
break
def get_page(self, url):
"""
Get the HTML for an URL, possibly from an in-memory cache.
XXX TODO Note: this cache is never actually cleared. It's assumed that
the data won't get stale over the lifetime of a locator instance (not
necessarily true for the default_locator).
"""
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
scheme, netloc, path, _, _, _ = urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
url = urljoin(ensure_slash(url), 'index.html')
if url in self._page_cache:
result = self._page_cache[url]
logger.debug('Returning %s from cache: %s', url, result)
else:
host = netloc.split(':', 1)[0]
result = None
if host in self._bad_hosts:
logger.debug('Skipping %s due to bad host %s', url, host)
else:
req = Request(url, headers={'Accept-encoding': 'identity'})
try:
logger.debug('Fetching %s', url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug('Fetched %s', url)
headers = resp.info()
content_type = headers.get('Content-Type', '')
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get('Content-Encoding')
if encoding:
decoder = self.decoders[encoding] # fail if not found
data = decoder(data)
encoding = 'utf-8'
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError:
data = data.decode('latin-1') # fallback
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if e.code != 404:
logger.exception('Fetch failed: %s: %s', url, e)
except URLError as e:
logger.exception('Fetch failed: %s: %s', url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e:
logger.exception('Fetch failed: %s: %s', url, e)
finally:
self._page_cache[url] = result # even if None (failure)
return result
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
page = self.get_page(self.base_url)
if not page:
raise DistlibException('Unable to get %s' % self.base_url)
for match in self._distname_re.finditer(page.data):
result.add(match.group(1))
return result
class DirectoryLocator(Locator):
"""
This class locates distributions in a directory tree.
"""
def __init__(self, path, **kwargs):
"""
Initialise an instance.
:param path: The root of the directory tree to search.
:param kwargs: Passed to the superclass constructor,
except for:
* recursive - if True (the default), subdirectories are
recursed into. If False, only the top-level directory
is searched,
"""
self.recursive = kwargs.pop('recursive', True)
super(DirectoryLocator, self).__init__(**kwargs)
path = os.path.abspath(path)
if not os.path.isdir(path):
raise DistlibException('Not a directory: %r' % path)
self.base_dir = path
def should_include(self, filename, parent):
"""
Should a filename be considered as a candidate for a distribution
archive? As well as the filename, the directory which contains it
is provided, though not used by the current implementation.
"""
return filename.endswith(self.downloadable_extensions)
def _get_project(self, name):
result = {}
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, name)
if info:
self._update_version_data(result, info)
if not self.recursive:
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, None)
if info:
result.add(info['name'])
if not self.recursive:
break
return result
class JSONLocator(Locator):
"""
This locator uses special extended metadata (not available on PyPI) and is
the basis of performant dependency resolution in distlib. Other locators
require archive downloads before dependencies can be determined! As you
might imagine, that can be slow.
"""
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {}
data = get_project_data(name)
if data:
for info in data.get('files', []):
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
continue
# We don't store summary in project metadata as it makes
# the data bigger for no benefit during dependency
# resolution
dist = make_dist(data['name'], info['version'],
summary=data.get('summary',
'Placeholder for summary'),
scheme=self.scheme)
md = dist.metadata
md.source_url = info['url']
# TODO SHA256 digest
if 'digest' in info and info['digest']:
dist.digest = ('md5', info['digest'])
md.dependencies = info.get('requirements', {})
dist.exports = info.get('exports', {})
result[dist.version] = dist
return result
class DistPathLocator(Locator):
"""
This locator finds installed distributions in a path. It can be useful for
adding to an :class:`AggregatingLocator`.
"""
def __init__(self, distpath, **kwargs):
"""
Initialise an instance.
:param distpath: A :class:`DistributionPath` instance to search.
"""
super(DistPathLocator, self).__init__(**kwargs)
assert isinstance(distpath, DistributionPath)
self.distpath = distpath
def _get_project(self, name):
dist = self.distpath.get_distribution(name)
if dist is None:
result = {}
else:
result = { dist.version: dist }
return result
class AggregatingLocator(Locator):
"""
This class allows you to chain and/or merge a list of locators.
"""
def __init__(self, *locators, **kwargs):
"""
Initialise an instance.
:param locators: The list of locators to search.
:param kwargs: Passed to the superclass constructor,
except for:
* merge - if False (the default), the first successful
search from any of the locators is returned. If True,
the results from all locators are merged (this can be
slow).
"""
self.merge = kwargs.pop('merge', False)
self.locators = locators
super(AggregatingLocator, self).__init__(**kwargs)
def clear_cache(self):
super(AggregatingLocator, self).clear_cache()
for locator in self.locators:
locator.clear_cache()
def _set_scheme(self, value):
self._scheme = value
for locator in self.locators:
locator.scheme = value
scheme = property(Locator.scheme.fget, _set_scheme)
def _get_project(self, name):
result = {}
for locator in self.locators:
d = locator.get_project(name)
if d:
if self.merge:
result.update(d)
else:
# See issue #18. If any dists are found and we're looking
# for specific constraints, we only return something if
# a match is found. For example, if a DirectoryLocator
# returns just foo (1.0) while we're looking for
# foo (>= 2.0), we'll pretend there was nothing there so
# that subsequent locators can be queried. Otherwise we
# would just return foo (1.0) which would then lead to a
# failure to find foo (>= 2.0), because other locators
# weren't searched. Note that this only matters when
# merge=False.
if self.matcher is None:
found = True
else:
found = False
for k in d:
if self.matcher.match(k):
found = True
break
if found:
result = d
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for locator in self.locators:
try:
result |= locator.get_distribution_names()
except NotImplementedError:
pass
return result
# We use a legacy scheme simply because most of the dists on PyPI use legacy
# versions which don't conform to PEP 426 / PEP 440.
default_locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.python.org/simple/',
timeout=3.0),
scheme='legacy')
locate = default_locator.locate
NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*'
r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$')
class DependencyFinder(object):
"""
Locate dependencies for distributions.
"""
def __init__(self, locator=None):
"""
Initialise an instance, using the specified locator
to locate distributions.
"""
self.locator = locator or default_locator
self.scheme = get_scheme(self.locator.scheme)
def add_distribution(self, dist):
"""
Add a distribution to the finder. This will update internal information
about who provides what.
:param dist: The distribution to add.
"""
logger.debug('adding distribution %s', dist)
name = dist.key
self.dists_by_name[name] = dist
self.dists[(name, dist.version)] = dist
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
self.provided.setdefault(name, set()).add((version, dist))
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug('removing distribution %s', dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name]
def get_matcher(self, reqt):
"""
Get a version matcher for a requirement.
:param reqt: The requirement
:type reqt: str
:return: A version matcher (an instance of
:class:`distlib.version.Matcher`).
"""
try:
matcher = self.scheme.matcher(reqt)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
name = reqt.split()[0]
matcher = self.scheme.matcher(name)
return matcher
def find_providers(self, reqt):
"""
Find the distributions which can fulfill a requirement.
:param reqt: The requirement.
:type reqt: str
:return: A set of distribution which can fulfill the requirement.
"""
matcher = self.get_matcher(reqt)
name = matcher.key # case-insensitive
result = set()
provided = self.provided
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
result.add(provider)
break
return result
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(('cantreplace', provider, other, unmatched))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result
def find(self, requirement, meta_extras=None, prereleases=False):
"""
Find a distribution and all distributions it depends on.
:param requirement: The requirement specifying the distribution to
find, or a Distribution instance.
:param meta_extras: A list of meta extras such as :test:, :build: and
so on.
:param prereleases: If ``True``, allow pre-release versions to be
returned - otherwise, don't return prereleases
unless they're all that's available.
Return a set of :class:`Distribution` instances and a set of
problems.
The distributions returned should be such that they have the
:attr:`required` attribute set to ``True`` if they were
from the ``requirement`` passed to ``find()``, and they have the
:attr:`build_time_dependency` attribute set to ``True`` unless they
are post-installation dependencies of the ``requirement``.
The problems should be a tuple consisting of the string
``'unsatisfied'`` and the requirement which couldn't be satisfied
by any distribution known to the locator.
"""
self.provided = {}
self.dists = {}
self.dists_by_name = {}
self.reqts = {}
meta_extras = set(meta_extras or [])
if ':*:' in meta_extras:
meta_extras.remove(':*:')
# :meta: and :run: are implicitly included
meta_extras |= set([':test:', ':build:', ':dev:'])
if isinstance(requirement, Distribution):
dist = odist = requirement
logger.debug('passed %s as requirement', odist)
else:
dist = odist = self.locator.locate(requirement,
prereleases=prereleases)
if dist is None:
raise DistlibException('Unable to locate %r' % requirement)
logger.debug('located %s', odist)
dist.requested = True
problems = set()
todo = set([dist])
install_dists = set([odist])
while todo:
dist = todo.pop()
name = dist.key # case-insensitive
if name not in self.dists_by_name:
self.add_distribution(dist)
else:
#import pdb; pdb.set_trace()
other = self.dists_by_name[name]
if other != dist:
self.try_to_replace(dist, other, problems)
ireqts = dist.run_requires | dist.meta_requires
sreqts = dist.build_requires
ereqts = set()
if dist in install_dists:
for key in ('test', 'build', 'dev'):
e = ':%s:' % key
if e in meta_extras:
ereqts |= getattr(dist, '%s_requires' % key)
all_reqts = ireqts | sreqts | ereqts
for r in all_reqts:
providers = self.find_providers(r)
if not providers:
logger.debug('No providers found for %r', r)
provider = self.locator.locate(r, prereleases=prereleases)
# If no provider is found and we didn't consider
# prereleases, consider them now.
if provider is None and not prereleases:
provider = self.locator.locate(r, prereleases=True)
if provider is None:
logger.debug('Cannot satisfy %r', r)
problems.add(('unsatisfied', r))
else:
n, v = provider.key, provider.version
if (n, v) not in self.dists:
todo.add(provider)
providers.add(provider)
if r in ireqts and dist in install_dists:
install_dists.add(provider)
logger.debug('Adding %s to install_dists',
provider.name_and_version)
for p in providers:
name = p.key
if name not in self.dists_by_name:
self.reqts.setdefault(p, set()).add(r)
else:
other = self.dists_by_name[name]
if other != p:
# see if other can be replaced by p
self.try_to_replace(p, other, problems)
dists = set(self.dists.values())
for dist in dists:
dist.build_time_dependency = dist not in install_dists
if dist.build_time_dependency:
logger.debug('%s is a build-time dependency only.',
dist.name_and_version)
logger.debug('find done for %s', odist)
return dists, problems
|
gpl-3.0
|
naousse/odoo
|
addons/stock/__openerp__.py
|
261
|
4008
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Warehouse Management',
'version': '1.1',
'author': 'OpenERP SA',
'summary': 'Inventory, Logistic, Storage',
'description': """
Manage multi-warehouses, multi- and structured stock locations
==============================================================
The warehouse and inventory management is based on a hierarchical location structure, from warehouses to storage bins.
The double entry inventory system allows you to manage customers, suppliers as well as manufacturing inventories.
OpenERP has the capacity to manage lots and serial numbers ensuring compliance with the traceability requirements imposed by the majority of industries.
Key Features
------------
* Moves history and planning,
* Minimum stock rules
* Support for barcodes
* Rapid detection of mistakes through double entry system
* Traceability (Serial Numbers, Packages, ...)
Dashboard / Reports for Warehouse Management will include:
----------------------------------------------------------
* Incoming Products (Graph)
* Outgoing Products (Graph)
* Procurement in Exception
* Inventory Analysis
* Last Product Inventories
* Moves Analysis
""",
'website': 'https://www.odoo.com/page/warehouse',
'depends': ['product', 'procurement', 'board', 'web_kanban_gauge', 'web_kanban_sparkline'],
'category': 'Warehouse Management',
'sequence': 16,
'demo': [
'stock_demo_pre.yml',
'stock_demo.xml',
'procurement_demo.xml',
'stock_orderpoint.xml',
'stock_orderpoint.yml',
'stock_demo.yml',
'stock_location_demo_cpu1.xml',
'stock_location_demo_cpu3.yml',
],
'data': [
'security/stock_security.xml',
'security/ir.model.access.csv',
'stock_data.xml',
'stock_data.yml',
'wizard/stock_move_view.xml',
'wizard/stock_change_product_qty_view.xml',
'wizard/stock_return_picking_view.xml',
'wizard/make_procurement_view.xml',
'wizard/orderpoint_procurement_view.xml',
'wizard/stock_transfer_details.xml',
'stock_incoterms.xml',
'stock_report.xml',
'stock_view.xml',
'stock_sequence.xml',
'product_view.xml',
'partner_view.xml',
'report/report_stock_view.xml',
'res_config_view.xml',
'views/report_package_barcode.xml',
'views/report_lot_barcode.xml',
'views/report_location_barcode.xml',
'views/report_stockpicking.xml',
'views/report_stockinventory.xml',
'views/stock.xml',
],
'test': [
'test/inventory.yml',
'test/move.yml',
'test/procrule.yml',
'test/stock_users.yml',
'stock_demo.yml',
'test/shipment.yml',
'test/packing.yml',
'test/packingneg.yml',
'test/wiseoperator.yml',
],
'installable': True,
'application': True,
'auto_install': False,
'qweb': ['static/src/xml/picking.xml'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
jlguardi/yowsup
|
yowsup/layers/protocol_chatstate/protocolentities/chatstate.py
|
70
|
1234
|
from yowsup.structs import ProtocolEntity, ProtocolTreeNode
class ChatstateProtocolEntity(ProtocolEntity):
'''
INCOMING
<chatstate from="[email protected]">
<{{composing|paused}}></{{composing|paused}}>
</chatstate>
OUTGOING
<chatstate to="[email protected]">
<{{composing|paused}}></{{composing|paused}}>
</chatstate>
'''
STATE_TYPING = "composing"
STATE_PAUSED = "paused"
STATES = (STATE_TYPING, STATE_PAUSED)
def __init__(self, _state):
super(ChatstateProtocolEntity, self).__init__("chatstate")
assert _state in self.__class__.STATES, "Expected chat state to be in %s, got %s" % (self.__class__.STATES, _state)
self._state = _state
def getState(self):
return self._state
def toProtocolTreeNode(self):
node = self._createProtocolTreeNode({}, None, data = None)
node.addChild(ProtocolTreeNode(self._state))
return node
def __str__(self):
out = "CHATSTATE:\n"
out += "State: %s\n" % self._state
return out
@staticmethod
def fromProtocolTreeNode(node):
return ChatstateProtocolEntity(
node.getAllChildren()[0].tag,
)
|
gpl-3.0
|
flyballlabs/threatdetectionservice
|
agents/rpi/EnableReplay.py
|
2
|
2198
|
'''
This script replays pcap capture over the net to host server
Make sure to re-enable ports after
@author: devopsec
'''
import subprocess, sys
from datetime import datetime
def run():
def iptablesFLUSH():
subprocess.run("iptables -Z", shell=True)# zero counters
subprocess.run("iptables -F", shell=True)# flush (delete) rules
subprocess.run("iptables -X", shell=True)# delete all extra chains
subprocess.run("ip6tables -Z", shell=True)# zero counters
subprocess.run("ip6tables -F", shell=True)# flush (delete) rules
subprocess.run("ip6tables -X", shell=True)# delete all extra chains
#delete iptables rules
iptablesFLUSH()
## set policies to let everything in
subprocess.run("iptables --policy INPUT ACCEPT", shell=True)
subprocess.run("iptables --policy OUTPUT ACCEPT", shell=True)
subprocess.run("iptables --policy FORWARD ACCEPT", shell=True)
subprocess.run("ip6tables --policy INPUT ACCEPT", shell=True)
subprocess.run("ip6tables --policy OUTPUT ACCEPT", shell=True)
subprocess.run("ip6tables --policy FORWARD ACCEPT", shell=True)
#enable all traffic in ufw
subprocess.run("ufw allow from any", shell=True)
subprocess.run("ufw allow to any", shell=True)
#forward all ports to remote host
subprocess.run("iptables -A PREROUTING -t nat -i enxb827ebcff441 -p tcp --sport 1:65535 -j DNAT --to-destination 50.253.243.17:6667",
shell=True, stdout=subprocess.PIPE)
subprocess.run("iptables -A FORWARD -i enxb827ebcff441 -d 50.253.243.17:6667 -j ACCEPT",
shell=True, stdout=subprocess.PIPE)
#get pcap file name
dt = datetime.now()
date = datetime.strftime(dt, '%Y-%m-%d')
fileIn = "/capture-data/" + date + ".pcap"
## replay packet capture ##
subprocess.Popen(["tcpreplay", "-q", "--topspeed", "-i", "enxb827ebcff441", fileIn], stdout=subprocess.PIPE)
#delete ufw rules
subprocess.run("ufw delete allow from any", shell=True)
subprocess.run("ufw delete allow to any", shell=True)
#delete added iptables rules
iptablesFLUSH()
## end run function ##
|
apache-2.0
|
softcert/vsroom
|
vsroom/common/sanitizers/ah.py
|
1
|
1092
|
import time
import re
from vsroom.common import timeconversion
from vsroom.common import sanitizer
# sanitizer.Sanitizer is the base class for a simple sanitizer bot.
class AbuseHelperSanitizer(sanitizer.Sanitizer):
# .sanitize(event) is the hook method for sanitizing events. This
# is the only method you have to implement to create a basic
# normalizer, sanitizer, modifier or filter.
def sanitize(self, event):
# if only one key (id) -> clearing event. No use to
# add sanitized stuff
# Return a list of events here. The list can contain 0-n events.
descr = event.value('decription', False)
if descr:
event.add('description', descr)
event.clear('decription')
time_sec = event.value('time', False)
if time_sec:
event.clear('time')
event.add('time', timeconversion.seconds2iso(time_sec))
return [event]
if __name__ == "__main__":
# Execute the sanitizer bot based on the command line options.
AbuseHelperSanitizer.from_command_line().execute()
|
mit
|
trezorg/django
|
django/core/management/commands/inspectdb.py
|
203
|
7614
|
import keyword
from optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from django.db import connections, DEFAULT_DB_ALIAS
class Command(NoArgsCommand):
help = "Introspects the database tables in the given database and outputs a Django model module."
option_list = NoArgsCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to '
'introspect. Defaults to using the "default" database.'),
)
requires_model_validation = False
db_module = 'django.db'
def handle_noargs(self, **options):
try:
for line in self.handle_inspection(options):
self.stdout.write("%s\n" % line)
except NotImplementedError:
raise CommandError("Database inspection isn't supported for the currently selected database backend.")
def handle_inspection(self, options):
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
table2model = lambda table_name: table_name.title().replace('_', '').replace(' ', '').replace('-', '')
cursor = connection.cursor()
yield "# This is an auto-generated Django model module."
yield "# You'll have to do the following manually to clean this up:"
yield "# * Rearrange models' order"
yield "# * Make sure each model has one field with primary_key=True"
yield "# Feel free to rename the models, but don't rename db_table values or field names."
yield "#"
yield "# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'"
yield "# into your database."
yield ''
yield 'from %s import models' % self.db_module
yield ''
for table_name in connection.introspection.get_table_list(cursor):
yield 'class %s(models.Model):' % table2model(table_name)
try:
relations = connection.introspection.get_relations(cursor, table_name)
except NotImplementedError:
relations = {}
try:
indexes = connection.introspection.get_indexes(cursor, table_name)
except NotImplementedError:
indexes = {}
for i, row in enumerate(connection.introspection.get_table_description(cursor, table_name)):
column_name = row[0]
att_name = column_name.lower()
comment_notes = [] # Holds Field notes, to be displayed in a Python comment.
extra_params = {} # Holds Field parameters such as 'db_column'.
# If the column name can't be used verbatim as a Python
# attribute, set the "db_column" for this Field.
if ' ' in att_name or '-' in att_name or keyword.iskeyword(att_name) or column_name != att_name:
extra_params['db_column'] = column_name
# Modify the field name to make it Python-compatible.
if ' ' in att_name:
att_name = att_name.replace(' ', '_')
comment_notes.append('Field renamed to remove spaces.')
if '-' in att_name:
att_name = att_name.replace('-', '_')
comment_notes.append('Field renamed to remove dashes.')
if column_name != att_name:
comment_notes.append('Field name made lowercase.')
if i in relations:
rel_to = relations[i][1] == table_name and "'self'" or table2model(relations[i][1])
field_type = 'ForeignKey(%s' % rel_to
if att_name.endswith('_id'):
att_name = att_name[:-3]
else:
extra_params['db_column'] = column_name
else:
# Calling `get_field_type` to get the field type string and any
# additional paramters and notes.
field_type, field_params, field_notes = self.get_field_type(connection, table_name, row)
extra_params.update(field_params)
comment_notes.extend(field_notes)
# Add primary_key and unique, if necessary.
if column_name in indexes:
if indexes[column_name]['primary_key']:
extra_params['primary_key'] = True
elif indexes[column_name]['unique']:
extra_params['unique'] = True
field_type += '('
if keyword.iskeyword(att_name):
att_name += '_field'
comment_notes.append('Field renamed because it was a Python reserved word.')
# Don't output 'id = meta.AutoField(primary_key=True)', because
# that's assumed if it doesn't exist.
if att_name == 'id' and field_type == 'AutoField(' and extra_params == {'primary_key': True}:
continue
# Add 'null' and 'blank', if the 'null_ok' flag was present in the
# table description.
if row[6]: # If it's NULL...
extra_params['blank'] = True
if not field_type in ('TextField(', 'CharField('):
extra_params['null'] = True
field_desc = '%s = models.%s' % (att_name, field_type)
if extra_params:
if not field_desc.endswith('('):
field_desc += ', '
field_desc += ', '.join(['%s=%r' % (k, v) for k, v in extra_params.items()])
field_desc += ')'
if comment_notes:
field_desc += ' # ' + ' '.join(comment_notes)
yield ' %s' % field_desc
for meta_line in self.get_meta(table_name):
yield meta_line
def get_field_type(self, connection, table_name, row):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
"""
field_params = {}
field_notes = []
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for DATA_TYPES_REVERSE to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = row[3]
if field_type == 'DecimalField':
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
return field_type, field_params, field_notes
def get_meta(self, table_name):
"""
Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model corresponding
to the given database table name.
"""
return [' class Meta:',
' db_table = %r' % table_name,
'']
|
bsd-3-clause
|
frumiousbandersnatch/supybot-code
|
src/world.py
|
6
|
7430
|
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Module for general worldly stuff, like global variables and whatnot.
"""
import gc
import os
import sys
import time
import atexit
import threading
import multiprocessing
if sys.version_info >= (2, 5, 0):
import re as sre
else:
import sre
from . import conf, drivers, ircutils, log, registry
startedAt = time.time() # Just in case it doesn't get set later.
starting = False
mainThread = threading.currentThread()
def isMainThread():
return mainThread is threading.currentThread()
threadsSpawned = 1 # Starts at one for the initial "thread."
class SupyThread(threading.Thread):
def __init__(self, *args, **kwargs):
global threadsSpawned
threadsSpawned += 1
super(SupyThread, self).__init__(*args, **kwargs)
log.debug('Spawning thread %q.', self.getName())
processesSpawned = 1 # Starts at one for the initial process.
class SupyProcess(multiprocessing.Process):
def __init__(self, *args, **kwargs):
global processesSpawned
processesSpawned += 1
super(SupyProcess, self).__init__(*args, **kwargs)
log.debug('Spawning process %q.', self.name)
commandsProcessed = 0
ircs = [] # A list of all the IRCs.
def getIrc(network):
network = network.lower()
for irc in ircs:
if irc.network.lower() == network:
return irc
return None
def _flushUserData():
userdataFilename = os.path.join(conf.supybot.directories.conf(),
'userdata.conf')
registry.close(conf.users, userdataFilename)
flushers = [_flushUserData] # A periodic function will flush all these.
registryFilename = None
def flush():
"""Flushes all the registered flushers."""
for (i, f) in enumerate(flushers):
try:
f()
except Exception, e:
log.exception('Uncaught exception in flusher #%s (%s):', i, f)
def debugFlush(s=''):
if conf.supybot.debug.flushVeryOften():
if s:
log.debug(s)
flush()
def upkeep():
"""Does upkeep (like flushing, garbage collection, etc.)"""
sys.exc_clear() # Just in case, let's clear the exception info.
if os.name == 'nt':
try:
import msvcrt
msvcrt.heapmin()
except ImportError:
pass
except IOError: # Win98 sux0rs!
pass
if conf.daemonized:
# If we're daemonized, sys.stdout has been replaced with a StringIO
# object, so let's see if anything's been printed, and if so, let's
# log.warning it (things shouldn't be printed, and we're more likely
# to get bug reports if we make it a warning).
assert not type(sys.stdout) == file, 'Not a StringIO object!'
if not hasattr(sys.stdout, 'getvalue'):
# Stupid twisted sometimes replaces our stdout with theirs, because
# "The Twisted Way Is The Right Way" (ha!). So we're stuck simply
# returning.
log.warning('Expected cStringIO as stdout, got %r.', sys.stdout)
return
s = sys.stdout.getvalue()
if s:
log.warning('Printed to stdout after daemonization: %s', s)
sys.stdout.reset() # Seeks to 0.
sys.stdout.truncate() # Truncates to current offset.
assert not type(sys.stderr) == file, 'Not a StringIO object!'
s = sys.stderr.getvalue()
if s:
log.error('Printed to stderr after daemonization: %s', s)
sys.stderr.reset() # Seeks to 0.
sys.stderr.truncate() # Truncates to current offset.
doFlush = conf.supybot.flush() and not starting
if doFlush:
flush()
# This is so registry._cache gets filled.
# This seems dumb, so we'll try not doing it anymore.
#if registryFilename is not None:
# registry.open(registryFilename)
if not dying:
log.debug('Regexp cache size: %s', len(sre._cache))
log.debug('Pattern cache size: %s', len(ircutils._patternCache))
log.debug('HostmaskPatternEqual cache size: %s',
len(ircutils._hostmaskPatternEqualCache))
#timestamp = log.timestamp()
if doFlush:
log.info('Flushers flushed and garbage collected.')
else:
log.info('Garbage collected.')
collected = gc.collect()
if gc.garbage:
log.warning('Noncollectable garbage (file this as a bug on SF.net): %s',
gc.garbage)
return collected
def makeDriversDie():
"""Kills drivers."""
log.info('Killing Driver objects.')
for driver in drivers._drivers.itervalues():
driver.die()
def makeIrcsDie():
"""Kills Ircs."""
log.info('Killing Irc objects.')
for irc in ircs[:]:
if not irc.zombie:
irc.die()
else:
log.debug('Not killing %s, it\'s already a zombie.', irc)
def startDying():
"""Starts dying."""
log.info('Shutdown initiated.')
global dying
dying = True
def finished():
log.info('Shutdown complete.')
# These are in order; don't reorder them for cosmetic purposes. The order
# in which they're registered is the reverse order in which they will run.
atexit.register(finished)
atexit.register(upkeep)
atexit.register(makeIrcsDie)
atexit.register(makeDriversDie)
atexit.register(startDying)
##################################################
##################################################
##################################################
## Don't even *think* about messing with these. ##
##################################################
##################################################
##################################################
dying = False
testing = False
starting = False
profiling = False
documenting = False
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
bsd-3-clause
|
eckardm/archivematica
|
src/dashboard/src/components/advanced_search.py
|
2
|
9710
|
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
from django.http import HttpResponse
import logging
import sys
import dateutil.parser
from elasticsearch import Elasticsearch
sys.path.append("/usr/lib/archivematica/archivematicaCommon")
import elasticSearchFunctions
logger = logging.getLogger("archivematica.dashboard.advanced_search")
OBJECT_FIELDS = (
"mets",
"transferMetadata",
)
OTHER_FIELDS = (
"transferMetadataOther"
)
def search_parameter_prep(request):
queries = request.GET.getlist('query')
ops = request.GET.getlist('op')
fields = request.GET.getlist('field')
types = request.GET.getlist('type')
other_fields = request.GET.getlist('fieldName')
# prepend default op arg as first op can't be set manually
# if there are no entries, insert the first as "or" (e.g. a "should" clause);
# otherwise copy the existing first entry
# this ensures that if the second clause is a "must," the first entry will be too, etc.
if len(ops) == 0:
ops.insert(0, 'or')
else:
ops.insert(0, ops[0])
if len(queries) == 0:
queries = ['*']
fields = ['']
else:
# make sure each query has field/ops set
for index, query in enumerate(queries):
# a blank query makes ES error
if queries[index] == '':
queries[index] = '*'
try:
fields[index]
except:
fields.insert(index, '')
try:
ops[index]
except:
ops.insert(index, 'or')
try:
types[index]
except:
types.insert(index, '')
# For "other" fields, the actual title of the subfield is located in a second array;
# search for any such fields and replace the placeholder value in the `fields` array
# with the full name.
# In Elasticsearch, "." is used to search subdocuments; for example,
# transferMetadata.Bagging-Date would be used to search for the value of Bagging-Date
# in this nested object:
# {
# "transferMetadata": {
# "Start-Date": 0000-00-00,
# "Bagging-Date": 0000-00-00
# }
# }
for index, field in enumerate(fields):
if field == "transferMetadataOther":
fields[index] = 'transferMetadata.' + other_fields[index]
return queries, ops, fields, types
# these are used in templates to prevent query params
def extract_url_search_params_from_request(request):
# set pagination-related variables
search_params = ''
try:
search_params = request.get_full_path().split('?')[1]
end_of_search_params = search_params.index('&page')
search_params = search_params[:end_of_search_params]
except:
pass
return search_params
def assemble_query(queries, ops, fields, types, search_index=None, doc_type=None, **kwargs):
must_haves = kwargs.get('must_haves', [])
filters = kwargs.get('filters', {})
should_haves = []
must_not_haves = []
index = 0
for query in queries:
if queries[index] != '':
clause = query_clause(index, queries, ops, fields, types, search_index=search_index, doc_type=doc_type)
if clause:
if ops[index] == 'not':
must_not_haves.append(clause)
elif ops[index] == 'and':
must_haves.append(clause)
else:
should_haves.append(clause)
index = index + 1
return {
"filter": filters,
"query": {
"bool": {
"must": must_haves,
"must_not": must_not_haves,
"should": should_haves,
}
},
}
def _fix_object_fields(fields):
"""
Adjusts field names for nested object fields.
Elasticsearch is able to search through nested object fields, provided that the field name is specified appropriately in the query.
Appending .* to the field name (for example, transferMetadata.*) causes Elasticsearch to consider any of the values within key/value pairs nested in the object being searched.
Without doing this, Elasticsearch will attempt to match the value of transferMetadata itself, which will always fail since it's an object and not a string.
"""
return [field + '.*' if field in OBJECT_FIELDS else field for field in fields]
def _parse_date_range(field):
"""
Splits a range field into start and end values.
Expects data in the following format:
start:end
"""
if ':' not in field:
return ('', field)
return field.split(':')[:2]
def _normalize_date(date):
try:
return dateutil.parser.parse(date).strftime('%Y-%m-%d')
except ValueError:
raise ValueError("Invalid date received ({}); ignoring date query".format(date))
def filter_search_fields(search_fields, index=None, doc_type=None):
"""
Given search fields which search nested documents with wildcards (such as "transferMetadata.*"), returns a list of subfields filtered to contain only string-type fields.
When searching all fields of nested documents of mixed types using query_string queries, query_string queries may fail because the way the query string is interpreted depends on the type of the field being searched.
For example, given a nested document containing a string field and a date field, a query_string of "foo" would fail when Elasticsearch attempts to parse it as a date to match it against the date field.
This function uses the actual current mapping, so it supports automatically-mapped fields.
Sample input and output, given a nested document containing three fields, "Bagging-Date" (date), "Bag-Name" (string), and "Bag-Type" (string):
["transferMetadata.*"] #=> ["transferMetadata.Bag-Name", "transferMetadata.Bag-Type"]
:param list search_fields: A list of strings representing nested object names.
:param str index: The name of the search index, used to look up the mapping document.
If not provided, the original search_fields is returned unmodified.
:param str doc_type: The name of the document type within the search index, used to look up the mapping document.
If not provided, the original search_fields is returned unmodified.
"""
if index is None or doc_type is None:
return search_fields
new_fields = []
for field in search_fields:
# Not a wildcard nested document search, so just add to the list as-is
if not field.endswith('.*'):
new_fields.append(field)
continue
try:
field_name = field.rsplit('.', 1)[0]
conn = elasticSearchFunctions.connect_and_create_index(index)
mapping = elasticSearchFunctions.get_type_mapping(conn, index, doc_type)
subfields = mapping[doc_type]['properties'][field_name]['properties']
except KeyError:
# The requested field doesn't exist in the index, so don't worry about validating subfields
new_fields.append(field)
else:
for subfield, field_properties in subfields.iteritems():
if field_properties['type'] == 'string':
new_fields.append(field_name + '.' + subfield)
return new_fields
def query_clause(index, queries, ops, fields, types, search_index=None, doc_type=None):
if fields[index] == '':
search_fields = []
else:
search_fields = filter_search_fields(_fix_object_fields([fields[index]]), index=search_index, doc_type=doc_type)
if types[index] == 'term':
# a blank term should be ignored because it prevents any results: you
# can never find a blank term
#
# TODO: add condition to deal with a query with no clauses because all have
# been ignored
if (queries[index] in ('', '*')):
return
else:
if len(search_fields) == 0:
search_fields = ['_all']
return {'multi_match': {'query': queries[index], 'fields': search_fields}}
elif types[index] == 'string':
return {'query_string': {'query': queries[index], 'fields': search_fields}}
elif types[index] == 'range':
start, end = _parse_date_range(queries[index])
try:
start = _normalize_date(start)
end = _normalize_date(end)
except ValueError as e:
logger.info(str(e))
return
return {'range': {fields[index]: {'gte': start, 'lte': end}}}
def indexed_count(index, types=None, query=None):
if types is not None:
types = ','.join(types)
try:
conn = Elasticsearch(hosts=elasticSearchFunctions.getElasticsearchServerHostAndPort())
return conn.count(index=index, doc_type=types, body=query)['count']
except:
return 0
|
agpl-3.0
|
ereOn/loaded
|
loaded/main.py
|
1
|
1773
|
"""
Scripts.
"""
import click
import logging
import platform
import six
from tornado.ioloop import (
IOLoop,
PeriodicCallback,
)
from .server import agent_application
@click.group()
@click.option('-d', '--debug/--no-debug', default=False)
@click.pass_context
def main_loaded(ctx, debug):
"""
Loaded build agent.
"""
ctx.obj = {}
if debug:
click.secho("Running in debug mode.", fg='cyan')
ctx.obj['DEBUG'] = debug
logging.basicConfig()
@main_loaded.command(help="Run a Loaded agent locally.")
@click.option('--port', type=int, default=9995, help="The port to listen on.")
@click.option(
'--address',
type=six.text_type,
default='0.0.0.0',
help="The address to listen on.",
)
@click.pass_context
def agent(ctx, port, address):
if not ctx.obj['DEBUG']:
logging.getLogger('tornado.access').setLevel(logging.ERROR)
agent_application.listen(port=port, address=address)
click.echo(agent_application.default_host)
click.echo(
"Started web server on {address}:{port}".format(
address=address,
port=port,
),
)
if platform.system() == 'Windows':
# On Windows, select call can't be interrupted by SIGINT so we add
# a periodic callback that will wake-up the event loop and raise
# the KeyboardInterrupt if needed.
periodic_callback = PeriodicCallback(lambda: None, 100)
periodic_callback.start()
try:
IOLoop.current().start()
except KeyboardInterrupt as ex:
click.secho(
"Received Ctrl+C: shutting down web server...",
fg='yellow',
bold=True,
)
finally:
IOLoop.current().stop()
click.echo("Web server stopped.")
|
lgpl-3.0
|
lsyiverson/shadowsocks
|
shadowsocks/daemon.py
|
694
|
5602
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import logging
import signal
import time
from shadowsocks import common, shell
# this module is ported from ShadowVPN daemon.c
def daemon_exec(config):
if 'daemon' in config:
if os.name != 'posix':
raise Exception('daemon mode is only supported on Unix')
command = config['daemon']
if not command:
command = 'start'
pid_file = config['pid-file']
log_file = config['log-file']
if command == 'start':
daemon_start(pid_file, log_file)
elif command == 'stop':
daemon_stop(pid_file)
# always exit after daemon_stop
sys.exit(0)
elif command == 'restart':
daemon_stop(pid_file)
daemon_start(pid_file, log_file)
else:
raise Exception('unsupported daemon command %s' % command)
def write_pid_file(pid_file, pid):
import fcntl
import stat
try:
fd = os.open(pid_file, os.O_RDWR | os.O_CREAT,
stat.S_IRUSR | stat.S_IWUSR)
except OSError as e:
shell.print_exception(e)
return -1
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
assert flags != -1
flags |= fcntl.FD_CLOEXEC
r = fcntl.fcntl(fd, fcntl.F_SETFD, flags)
assert r != -1
# There is no platform independent way to implement fcntl(fd, F_SETLK, &fl)
# via fcntl.fcntl. So use lockf instead
try:
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB, 0, 0, os.SEEK_SET)
except IOError:
r = os.read(fd, 32)
if r:
logging.error('already started at pid %s' % common.to_str(r))
else:
logging.error('already started')
os.close(fd)
return -1
os.ftruncate(fd, 0)
os.write(fd, common.to_bytes(str(pid)))
return 0
def freopen(f, mode, stream):
oldf = open(f, mode)
oldfd = oldf.fileno()
newfd = stream.fileno()
os.close(newfd)
os.dup2(oldfd, newfd)
def daemon_start(pid_file, log_file):
def handle_exit(signum, _):
if signum == signal.SIGTERM:
sys.exit(0)
sys.exit(1)
signal.signal(signal.SIGINT, handle_exit)
signal.signal(signal.SIGTERM, handle_exit)
# fork only once because we are sure parent will exit
pid = os.fork()
assert pid != -1
if pid > 0:
# parent waits for its child
time.sleep(5)
sys.exit(0)
# child signals its parent to exit
ppid = os.getppid()
pid = os.getpid()
if write_pid_file(pid_file, pid) != 0:
os.kill(ppid, signal.SIGINT)
sys.exit(1)
os.setsid()
signal.signal(signal.SIGHUP, signal.SIG_IGN)
print('started')
os.kill(ppid, signal.SIGTERM)
sys.stdin.close()
try:
freopen(log_file, 'a', sys.stdout)
freopen(log_file, 'a', sys.stderr)
except IOError as e:
shell.print_exception(e)
sys.exit(1)
def daemon_stop(pid_file):
import errno
try:
with open(pid_file) as f:
buf = f.read()
pid = common.to_str(buf)
if not buf:
logging.error('not running')
except IOError as e:
shell.print_exception(e)
if e.errno == errno.ENOENT:
# always exit 0 if we are sure daemon is not running
logging.error('not running')
return
sys.exit(1)
pid = int(pid)
if pid > 0:
try:
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno == errno.ESRCH:
logging.error('not running')
# always exit 0 if we are sure daemon is not running
return
shell.print_exception(e)
sys.exit(1)
else:
logging.error('pid is not positive: %d', pid)
# sleep for maximum 10s
for i in range(0, 200):
try:
# query for the pid
os.kill(pid, 0)
except OSError as e:
if e.errno == errno.ESRCH:
break
time.sleep(0.05)
else:
logging.error('timed out when stopping pid %d', pid)
sys.exit(1)
print('stopped')
os.unlink(pid_file)
def set_user(username):
if username is None:
return
import pwd
import grp
try:
pwrec = pwd.getpwnam(username)
except KeyError:
logging.error('user not found: %s' % username)
raise
user = pwrec[0]
uid = pwrec[2]
gid = pwrec[3]
cur_uid = os.getuid()
if uid == cur_uid:
return
if cur_uid != 0:
logging.error('can not set user as nonroot user')
# will raise later
# inspired by supervisor
if hasattr(os, 'setgroups'):
groups = [grprec[2] for grprec in grp.getgrall() if user in grprec[3]]
groups.insert(0, gid)
os.setgroups(groups)
os.setgid(gid)
os.setuid(uid)
|
apache-2.0
|
github-borat/cinder
|
cinder/tests/integrated/api/client.py
|
1
|
7946
|
# Copyright (c) 2011 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import requests
import six.moves.urllib.parse as urlparse
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import jsonutils
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class OpenStackApiException(Exception):
def __init__(self, message=None, response=None):
self.response = response
if not message:
message = 'Unspecified error'
if response:
message = _('%(message)s\nStatus Code: %(_status)s\n'
'Body: %(_body)s') % {'_status': response.status_code,
'_body': response.text}
super(OpenStackApiException, self).__init__(message)
class OpenStackApiAuthenticationException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = _("Authentication error")
super(OpenStackApiAuthenticationException, self).__init__(message,
response)
class OpenStackApiAuthorizationException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = _("Authorization error")
super(OpenStackApiAuthorizationException, self).__init__(message,
response)
class OpenStackApiNotFoundException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = _("Item not found")
super(OpenStackApiNotFoundException, self).__init__(message, response)
class TestOpenStackClient(object):
"""Simple OpenStack API Client.
This is a really basic OpenStack API client that is under our control,
so we can make changes / insert hooks for testing
"""
def __init__(self, auth_user, auth_key, auth_uri):
super(TestOpenStackClient, self).__init__()
self.auth_result = None
self.auth_user = auth_user
self.auth_key = auth_key
self.auth_uri = auth_uri
# default project_id
self.project_id = 'openstack'
def request(self, url, method='GET', body=None, headers=None,
ssl_verify=True, stream=False):
_headers = {'Content-Type': 'application/json'}
_headers.update(headers or {})
parsed_url = urlparse.urlparse(url)
port = parsed_url.port
hostname = parsed_url.hostname
scheme = parsed_url.scheme
if netaddr.valid_ipv6(hostname):
hostname = "[%s]" % hostname
relative_url = parsed_url.path
if parsed_url.query:
relative_url = relative_url + "?" + parsed_url.query
LOG.info(_("Doing %(method)s on %(relative_url)s"),
{'method': method, 'relative_url': relative_url})
if body:
LOG.info(_("Body: %s") % body)
if port:
_url = "%s://%s:%d%s" % (scheme, hostname, int(port), relative_url)
else:
_url = "%s://%s%s" % (scheme, hostname, relative_url)
response = requests.request(method, _url, data=body, headers=_headers,
verify=ssl_verify, stream=stream)
return response
def _authenticate(self):
if self.auth_result:
return self.auth_result
auth_uri = self.auth_uri
headers = {'X-Auth-User': self.auth_user,
'X-Auth-Key': self.auth_key,
'X-Auth-Project-Id': self.project_id}
response = self.request(auth_uri,
headers=headers)
http_status = response.status_code
LOG.debug("%(auth_uri)s => code %(http_status)s",
{'auth_uri': auth_uri, 'http_status': http_status})
if http_status == 401:
raise OpenStackApiAuthenticationException(response=response)
self.auth_result = response.headers
return self.auth_result
def api_request(self, relative_uri, check_response_status=None, **kwargs):
auth_result = self._authenticate()
# NOTE(justinsb): httplib 'helpfully' converts headers to lower case
base_uri = auth_result['x-server-management-url']
full_uri = '%s/%s' % (base_uri, relative_uri)
headers = kwargs.setdefault('headers', {})
headers['X-Auth-Token'] = auth_result['x-auth-token']
response = self.request(full_uri, **kwargs)
http_status = response.status_code
LOG.debug("%(relative_uri)s => code %(http_status)s",
{'relative_uri': relative_uri, 'http_status': http_status})
if check_response_status:
if http_status not in check_response_status:
if http_status == 404:
raise OpenStackApiNotFoundException(response=response)
elif http_status == 401:
raise OpenStackApiAuthorizationException(response=response)
else:
raise OpenStackApiException(
message=_("Unexpected status code"),
response=response)
return response
def _decode_json(self, response):
body = response.text
LOG.debug("Decoding JSON: %s" % (body))
if body:
return jsonutils.loads(body)
else:
return ""
def api_get(self, relative_uri, **kwargs):
kwargs.setdefault('check_response_status', [200])
response = self.api_request(relative_uri, **kwargs)
return self._decode_json(response)
def api_post(self, relative_uri, body, **kwargs):
kwargs['method'] = 'POST'
if body:
headers = kwargs.setdefault('headers', {})
headers['Content-Type'] = 'application/json'
kwargs['body'] = jsonutils.dumps(body)
kwargs.setdefault('check_response_status', [200, 202])
response = self.api_request(relative_uri, **kwargs)
return self._decode_json(response)
def api_put(self, relative_uri, body, **kwargs):
kwargs['method'] = 'PUT'
if body:
headers = kwargs.setdefault('headers', {})
headers['Content-Type'] = 'application/json'
kwargs['body'] = jsonutils.dumps(body)
kwargs.setdefault('check_response_status', [200, 202, 204])
response = self.api_request(relative_uri, **kwargs)
return self._decode_json(response)
def api_delete(self, relative_uri, **kwargs):
kwargs['method'] = 'DELETE'
kwargs.setdefault('check_response_status', [200, 202, 204])
return self.api_request(relative_uri, **kwargs)
def get_volume(self, volume_id):
return self.api_get('/volumes/%s' % volume_id)['volume']
def get_volumes(self, detail=True):
rel_url = '/volumes/detail' if detail else '/volumes'
return self.api_get(rel_url)['volumes']
def post_volume(self, volume):
return self.api_post('/volumes', volume)['volume']
def delete_volume(self, volume_id):
return self.api_delete('/volumes/%s' % volume_id)
def put_volume(self, volume_id, volume):
return self.api_put('/volumes/%s' % volume_id, volume)['volume']
|
apache-2.0
|
apehua/pilas
|
pilasengine/actores/estudiante.py
|
5
|
7013
|
# -*- encoding: utf-8 -*-
# pilas engine: un motor para hacer videojuegos
#
# Copyright 2010-2014 - Hugo Ruscitti
# License: LGPLv3 (see http://www.gnu.org/licenses/lgpl.html)
#
# Website - http://www.pilas-engine.com.ar
import collections
from pilasengine import habilidades
from pilasengine import comportamientos
estructura_comportamiento = collections.namedtuple(
"Comportamiento",
['objeto', 'args', 'kwargs'])
class Estudiante(object):
"""Componente que permite a los actores aprender habilidades o
realizar comportamientos.
"""
def __init__(self):
"""Inicializa el componente."""
self._habilidades = []
self.comportamiento_actual = None
self.comportamientos = []
self.repetir_comportamientos_por_siempre = False
self.habilidades = habilidades.ProxyHabilidades(self._habilidades)
def aprender(self, classname, *k, **w):
"""Comienza a realizar una habilidad indicada por parametros.
:param classname: Referencia a la clase que representa la habilidad.
"""
if isinstance(classname, str):
classname = self.pilas.habilidades.buscar_habilidad_por_nombre(classname)
if issubclass(classname, habilidades.Habilidad):
if self.tiene_habilidad(classname):
self.eliminar_habilidad(classname)
self.agregar_habilidad(classname, *k, **w)
else:
raise Exception('El actor solo puede aprender clases que hereden \
de pilasengine.habilidades.Habilidad')
def agregar_habilidad(self, classname, *k, **w):
"""Agrega una habilidad a la lista de cosas que puede hacer un actor.
:param habilidad: Referencia a la clase que representa la habilidad.
"""
habilidad = classname(self.pilas)
habilidad.iniciar(self, *k, **w)
self._habilidades.append(habilidad)
def eliminar_habilidad(self, classname):
""" Elimina una habilidad asociada a un Actor.
:param classname: Referencia a la clase que representa la habilidad.
"""
referencia_habilidad = self.obtener_habilidad(classname)
if referencia_habilidad:
self._habilidades.remove(referencia_habilidad)
def tiene_habilidad(self, classname):
"""Comprueba si el actor ha aprendido la habilidad indicada.
:param classname: Referencia a la clase que representa la habilidad.
:return: Devuelve True si el actor tiene asignada la habilidad
"""
habilidades_actuales = [habilidad.__class__ for habilidad in self._habilidades]
return (classname in habilidades_actuales)
def obtener_habilidad(self, classname):
"""Obtiene la habilidad asociada a un Actor.
:param habilidad: Referencia a la clase que representa la habilidad.
:return: Devuelve None si no se encontró.
"""
su_habilidad = None
if isinstance(classname, str):
classname = self.pilas.habilidades.buscar_habilidad_por_nombre(classname)
for h in self._habilidades:
if h.__class__ == classname:
su_habilidad = h
break
return su_habilidad
def eliminar_habilidades(self):
"Elimina todas las habilidades asociadas al actor."
for h in self._habilidades:
h.eliminar()
def actualizar_habilidades(self):
"Realiza una actualización sobre todas las habilidades."
for h in self._habilidades:
h.actualizar()
def tiene_comportamiento(self, classname):
"""Comprueba si el actor tiene el comportamiento indicado.
:param classname: Referencia a la clase que representa el
comportamiento.
"""
comportamientos_actuales = [comportamiento.objeto.__class__
for comportamiento in self.comportamientos]
return (classname in comportamientos_actuales)
def hacer_luego(self, classname, repetir_por_siempre=False, *args, **kwargs):
"""Define un nuevo comportamiento para realizar al final.
Los actores pueden tener una cadena de comportamientos, este
metodo agrega el comportamiento al final de la cadena.
:param comportamiento: Referencia al comportamiento.
:param repetir_por_siempre: Si el comportamiento se volverá a ejecutar
luego de terminar.
"""
print "Este metodo entra en desuso, utilice el metodo 'hacer' en su lugar ..."
return self.hacer(classname, *args, **kwargs)
def hacer_inmediatamente(self, classname, *args, **kwargs):
self.eliminar_comportamientos()
self._adoptar_el_siguiente_comportamiento()
self.hacer(classname, *args, **kwargs)
def hacer(self, classname, *args, **kwargs):
"""Define el comportamiento para el actor de manera inmediata.
:param classname: Referencia al comportamiento a realizar.
"""
if isinstance(classname, str):
classname = self.pilas.comportamientos.buscar_comportamiento_por_nombre(classname)
if issubclass(classname, comportamientos.Comportamiento):
self._hacer(classname, *args, **kwargs)
else:
raise Exception('''El actor solo puede "hacer" clases que hereden
de pilasengine.comportamientos.Comportamiento''')
def _hacer(self, classname, *args, **kwargs):
comportamiento = estructura_comportamiento(
classname(self.pilas), args, kwargs)
self.comportamientos.append(comportamiento)
def eliminar_comportamientos(self):
"Elimina todos los comportamientos que tiene que hacer el actor."
for c in list(self.comportamientos):
self.comportamientos.remove(c)
def actualizar_comportamientos(self):
"Actualiza la lista de comportamientos"
termina = None
if self.comportamiento_actual:
termina = self.comportamiento_actual.objeto.actualizar()
if termina:
if self.repetir_comportamientos_por_siempre:
self.comportamientos.insert(0, self.comportamiento_actual)
self._adoptar_el_siguiente_comportamiento()
else:
self._adoptar_el_siguiente_comportamiento()
def _adoptar_el_siguiente_comportamiento(self):
""" Obtiene el siguiente comportamiento de la lista de comportamientos
y ejecuta su método iniciar"""
if self.comportamientos:
comportamiento = self.comportamientos.pop(0)
self.comportamiento_actual = comportamiento
self.comportamiento_actual.objeto.iniciar(
self, *self.comportamiento_actual.args,
**self.comportamiento_actual.kwargs)
else:
self.comportamiento_actual = None
|
lgpl-3.0
|
jnovinger/django
|
tests/m2m_regress/tests.py
|
273
|
4695
|
from __future__ import unicode_literals
from django.core.exceptions import FieldError
from django.test import TestCase
from django.utils import six
from .models import (
Entry, Line, Post, RegressionModelSplit, SelfRefer, SelfReferChild,
SelfReferChildSibling, Tag, TagCollection, Worksheet,
)
class M2MRegressionTests(TestCase):
def test_multiple_m2m(self):
# Multiple m2m references to model must be distinguished when
# accessing the relations through an instance attribute.
s1 = SelfRefer.objects.create(name='s1')
s2 = SelfRefer.objects.create(name='s2')
s3 = SelfRefer.objects.create(name='s3')
s1.references.add(s2)
s1.related.add(s3)
e1 = Entry.objects.create(name='e1')
t1 = Tag.objects.create(name='t1')
t2 = Tag.objects.create(name='t2')
e1.topics.add(t1)
e1.related.add(t2)
self.assertQuerysetEqual(s1.references.all(), ["<SelfRefer: s2>"])
self.assertQuerysetEqual(s1.related.all(), ["<SelfRefer: s3>"])
self.assertQuerysetEqual(e1.topics.all(), ["<Tag: t1>"])
self.assertQuerysetEqual(e1.related.all(), ["<Tag: t2>"])
def test_internal_related_name_not_in_error_msg(self):
# The secret internal related names for self-referential many-to-many
# fields shouldn't appear in the list when an error is made.
six.assertRaisesRegex(
self, FieldError,
"Choices are: id, name, references, related, selfreferchild, selfreferchildsibling$",
lambda: SelfRefer.objects.filter(porcupine='fred')
)
def test_m2m_inheritance_symmetry(self):
# Test to ensure that the relationship between two inherited models
# with a self-referential m2m field maintains symmetry
sr_child = SelfReferChild(name="Hanna")
sr_child.save()
sr_sibling = SelfReferChildSibling(name="Beth")
sr_sibling.save()
sr_child.related.add(sr_sibling)
self.assertQuerysetEqual(sr_child.related.all(), ["<SelfRefer: Beth>"])
self.assertQuerysetEqual(sr_sibling.related.all(), ["<SelfRefer: Hanna>"])
def test_m2m_pk_field_type(self):
# Regression for #11311 - The primary key for models in a m2m relation
# doesn't have to be an AutoField
w = Worksheet(id='abc')
w.save()
w.delete()
def test_add_m2m_with_base_class(self):
# Regression for #11956 -- You can add an object to a m2m with the
# base class without causing integrity errors
t1 = Tag.objects.create(name='t1')
t2 = Tag.objects.create(name='t2')
c1 = TagCollection.objects.create(name='c1')
c1.tags = [t1, t2]
c1 = TagCollection.objects.get(name='c1')
self.assertQuerysetEqual(c1.tags.all(), ["<Tag: t1>", "<Tag: t2>"], ordered=False)
self.assertQuerysetEqual(t1.tag_collections.all(), ["<TagCollection: c1>"])
def test_manager_class_caching(self):
e1 = Entry.objects.create()
e2 = Entry.objects.create()
t1 = Tag.objects.create()
t2 = Tag.objects.create()
# Get same manager twice in a row:
self.assertIs(t1.entry_set.__class__, t1.entry_set.__class__)
self.assertIs(e1.topics.__class__, e1.topics.__class__)
# Get same manager for different instances
self.assertIs(e1.topics.__class__, e2.topics.__class__)
self.assertIs(t1.entry_set.__class__, t2.entry_set.__class__)
def test_m2m_abstract_split(self):
# Regression for #19236 - an abstract class with a 'split' method
# causes a TypeError in add_lazy_relation
m1 = RegressionModelSplit(name='1')
m1.save()
def test_assigning_invalid_data_to_m2m_doesnt_clear_existing_relations(self):
t1 = Tag.objects.create(name='t1')
t2 = Tag.objects.create(name='t2')
c1 = TagCollection.objects.create(name='c1')
c1.tags = [t1, t2]
with self.assertRaises(TypeError):
c1.tags = 7
c1.refresh_from_db()
self.assertQuerysetEqual(c1.tags.order_by('name'), ["<Tag: t1>", "<Tag: t2>"])
def test_multiple_forwards_only_m2m(self):
# Regression for #24505 - Multiple ManyToManyFields to same "to"
# model with related_name set to '+'.
foo = Line.objects.create(name='foo')
bar = Line.objects.create(name='bar')
post = Post.objects.create()
post.primary_lines.add(foo)
post.secondary_lines.add(bar)
self.assertQuerysetEqual(post.primary_lines.all(), ['<Line: foo>'])
self.assertQuerysetEqual(post.secondary_lines.all(), ['<Line: bar>'])
|
bsd-3-clause
|
peterjoel/servo
|
tests/wpt/web-platform-tests/tools/third_party/py/py/_log/log.py
|
60
|
6003
|
"""
basic logging functionality based on a producer/consumer scheme.
XXX implement this API: (maybe put it into slogger.py?)
log = Logger(
info=py.log.STDOUT,
debug=py.log.STDOUT,
command=None)
log.info("hello", "world")
log.command("hello", "world")
log = Logger(info=Logger(something=...),
debug=py.log.STDOUT,
command=None)
"""
import py
import sys
class Message(object):
def __init__(self, keywords, args):
self.keywords = keywords
self.args = args
def content(self):
return " ".join(map(str, self.args))
def prefix(self):
return "[%s] " % (":".join(self.keywords))
def __str__(self):
return self.prefix() + self.content()
class Producer(object):
""" (deprecated) Log producer API which sends messages to be logged
to a 'consumer' object, which then prints them to stdout,
stderr, files, etc. Used extensively by PyPy-1.1.
"""
Message = Message # to allow later customization
keywords2consumer = {}
def __init__(self, keywords, keywordmapper=None, **kw):
if hasattr(keywords, 'split'):
keywords = tuple(keywords.split())
self._keywords = keywords
if keywordmapper is None:
keywordmapper = default_keywordmapper
self._keywordmapper = keywordmapper
def __repr__(self):
return "<py.log.Producer %s>" % ":".join(self._keywords)
def __getattr__(self, name):
if '_' in name:
raise AttributeError(name)
producer = self.__class__(self._keywords + (name,))
setattr(self, name, producer)
return producer
def __call__(self, *args):
""" write a message to the appropriate consumer(s) """
func = self._keywordmapper.getconsumer(self._keywords)
if func is not None:
func(self.Message(self._keywords, args))
class KeywordMapper:
def __init__(self):
self.keywords2consumer = {}
def getstate(self):
return self.keywords2consumer.copy()
def setstate(self, state):
self.keywords2consumer.clear()
self.keywords2consumer.update(state)
def getconsumer(self, keywords):
""" return a consumer matching the given keywords.
tries to find the most suitable consumer by walking, starting from
the back, the list of keywords, the first consumer matching a
keyword is returned (falling back to py.log.default)
"""
for i in range(len(keywords), 0, -1):
try:
return self.keywords2consumer[keywords[:i]]
except KeyError:
continue
return self.keywords2consumer.get('default', default_consumer)
def setconsumer(self, keywords, consumer):
""" set a consumer for a set of keywords. """
# normalize to tuples
if isinstance(keywords, str):
keywords = tuple(filter(None, keywords.split()))
elif hasattr(keywords, '_keywords'):
keywords = keywords._keywords
elif not isinstance(keywords, tuple):
raise TypeError("key %r is not a string or tuple" % (keywords,))
if consumer is not None and not py.builtin.callable(consumer):
if not hasattr(consumer, 'write'):
raise TypeError(
"%r should be None, callable or file-like" % (consumer,))
consumer = File(consumer)
self.keywords2consumer[keywords] = consumer
def default_consumer(msg):
""" the default consumer, prints the message to stdout (using 'print') """
sys.stderr.write(str(msg)+"\n")
default_keywordmapper = KeywordMapper()
def setconsumer(keywords, consumer):
default_keywordmapper.setconsumer(keywords, consumer)
def setstate(state):
default_keywordmapper.setstate(state)
def getstate():
return default_keywordmapper.getstate()
#
# Consumers
#
class File(object):
""" log consumer wrapping a file(-like) object """
def __init__(self, f):
assert hasattr(f, 'write')
# assert isinstance(f, file) or not hasattr(f, 'open')
self._file = f
def __call__(self, msg):
""" write a message to the log """
self._file.write(str(msg) + "\n")
if hasattr(self._file, 'flush'):
self._file.flush()
class Path(object):
""" log consumer that opens and writes to a Path """
def __init__(self, filename, append=False,
delayed_create=False, buffering=False):
self._append = append
self._filename = str(filename)
self._buffering = buffering
if not delayed_create:
self._openfile()
def _openfile(self):
mode = self._append and 'a' or 'w'
f = open(self._filename, mode)
self._file = f
def __call__(self, msg):
""" write a message to the log """
if not hasattr(self, "_file"):
self._openfile()
self._file.write(str(msg) + "\n")
if not self._buffering:
self._file.flush()
def STDOUT(msg):
""" consumer that writes to sys.stdout """
sys.stdout.write(str(msg)+"\n")
def STDERR(msg):
""" consumer that writes to sys.stderr """
sys.stderr.write(str(msg)+"\n")
class Syslog:
""" consumer that writes to the syslog daemon """
def __init__(self, priority=None):
if priority is None:
priority = self.LOG_INFO
self.priority = priority
def __call__(self, msg):
""" write a message to the log """
import syslog
syslog.syslog(self.priority, str(msg))
try:
import syslog
except ImportError:
pass
else:
for _prio in "EMERG ALERT CRIT ERR WARNING NOTICE INFO DEBUG".split():
_prio = "LOG_" + _prio
try:
setattr(Syslog, _prio, getattr(syslog, _prio))
except AttributeError:
pass
|
mpl-2.0
|
emk/pyjamas
|
examples/addonsgallery/AddonsGallery.py
|
5
|
2840
|
import pyjd # dummy for pyjs
from pyjamas.ui.Button import Button
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui.HTML import HTML
from pyjamas.ui.DockPanel import DockPanel
from pyjamas.ui import HasAlignment
from pyjamas.ui.Hyperlink import Hyperlink
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas import Window
from SinkList import SinkList
from pyjamas import History
import IntroTab
import TooltipTab
import AutoCompleteTab
import CanvasTab
class AddonsGallery:
def onHistoryChanged(self, token):
info = self.sink_list.find(token)
if info:
self.show(info, False)
else:
self.showIntro()
def onModuleLoad(self):
self.curInfo=''
self.curSink=None
self.description=HTML()
self.sink_list=SinkList()
self.panel=DockPanel()
self.loadSinks()
self.sinkContainer = DockPanel()
self.sinkContainer.setStyleName("ks-Sink")
vp=VerticalPanel()
vp.setWidth("100%")
vp.add(self.description)
vp.add(self.sinkContainer)
self.description.setStyleName("ks-Info")
self.panel.add(self.sink_list, DockPanel.WEST)
self.panel.add(vp, DockPanel.CENTER)
self.panel.setCellVerticalAlignment(self.sink_list, HasAlignment.ALIGN_TOP)
self.panel.setCellWidth(vp, "100%")
History.addHistoryListener(self)
RootPanel().add(self.panel)
initToken = History.getToken()
if len(initToken):
self.onHistoryChanged(initToken)
else:
self.showIntro()
def show(self, info, affectHistory=None):
if info == self.curInfo: return
self.curInfo = info
if self.curSink <> None:
self.curSink.onHide()
self.sinkContainer.remove(self.curSink)
self.curSink = info.getInstance()
self.sink_list.setSinkSelection(info.getName())
self.description.setHTML(info.getDescription())
if (affectHistory):
History.newItem(info.getName())
self.sinkContainer.add(self.curSink, DockPanel.CENTER)
self.sinkContainer.setCellWidth(self.curSink, "100%")
self.sinkContainer.setCellHeight(self.curSink, "100%")
self.sinkContainer.setCellVerticalAlignment(self.curSink, HasAlignment.ALIGN_TOP)
self.curSink.onShow()
def loadSinks(self):
self.sink_list.addSink(IntroTab.init())
self.sink_list.addSink(TooltipTab.init())
self.sink_list.addSink(AutoCompleteTab.init())
self.sink_list.addSink(CanvasTab.init())
def showIntro(self):
self.show(self.sink_list.find("Intro"))
if __name__ == '__main__':
pyjd.setup("./public/AddonsGallery.html")
app = AddonsGallery()
app.onModuleLoad()
pyjd.run()
|
apache-2.0
|
sarahgrogan/scikit-learn
|
sklearn/utils/multiclass.py
|
83
|
12343
|
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.ptp(y.data) == 0 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.all(clf.classes_ == unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
|
bsd-3-clause
|
avanov/django
|
tests/admin_changelist/tests.py
|
6
|
37419
|
from __future__ import unicode_literals
import datetime
from django.contrib import admin
from django.contrib.admin.models import LogEntry
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.templatetags.admin_list import pagination
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.contrib.admin.views.main import ALL_VAR, SEARCH_VAR, ChangeList
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.template import Context, Template
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.utils import formats, six
from .admin import (
BandAdmin, ChildAdmin, ChordsBandAdmin, ConcertAdmin,
CustomPaginationAdmin, CustomPaginator, DynamicListDisplayChildAdmin,
DynamicListDisplayLinksChildAdmin, DynamicListFilterChildAdmin,
DynamicSearchFieldsChildAdmin, FilteredChildAdmin, GroupAdmin,
InvitationAdmin, NoListDisplayLinksParentAdmin, ParentAdmin, QuartetAdmin,
SwallowAdmin, site as custom_site,
)
from .models import (
Band, Child, ChordsBand, ChordsMusician, Concert, CustomIdUser, Event,
Genre, Group, Invitation, Membership, Musician, OrderedObject, Parent,
Quartet, Swallow, SwallowOneToOne, UnorderedObject,
)
@override_settings(ROOT_URLCONF="admin_changelist.urls")
class ChangeListTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
def _create_superuser(self, username):
return User.objects.create(username=username, is_superuser=True)
def _mocked_authenticated_request(self, url, user):
request = self.factory.get(url)
request.user = user
return request
def test_select_related_preserved(self):
"""
Regression test for #10348: ChangeList.get_queryset() shouldn't
overwrite a custom select_related provided by ModelAdmin.get_queryset().
"""
m = ChildAdmin(Child, custom_site)
request = self.factory.get('/child/')
list_select_related = m.get_list_select_related(request)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
self.assertEqual(cl.queryset.query.select_related, {
'parent': {'name': {}}
})
def test_select_related_as_tuple(self):
ia = InvitationAdmin(Invitation, custom_site)
request = self.factory.get('/invitation/')
list_select_related = ia.get_list_select_related(request)
cl = ChangeList(request, Child, ia.list_display, ia.list_display_links,
ia.list_filter, ia.date_hierarchy, ia.search_fields,
list_select_related, ia.list_per_page,
ia.list_max_show_all, ia.list_editable, ia)
self.assertEqual(cl.queryset.query.select_related, {'player': {}})
def test_select_related_as_empty_tuple(self):
ia = InvitationAdmin(Invitation, custom_site)
ia.list_select_related = ()
request = self.factory.get('/invitation/')
list_select_related = ia.get_list_select_related(request)
cl = ChangeList(request, Child, ia.list_display, ia.list_display_links,
ia.list_filter, ia.date_hierarchy, ia.search_fields,
list_select_related, ia.list_per_page,
ia.list_max_show_all, ia.list_editable, ia)
self.assertEqual(cl.queryset.query.select_related, False)
def test_get_select_related_custom_method(self):
class GetListSelectRelatedAdmin(admin.ModelAdmin):
list_display = ('band', 'player')
def get_list_select_related(self, request):
return ('band', 'player')
ia = GetListSelectRelatedAdmin(Invitation, custom_site)
request = self.factory.get('/invitation/')
list_select_related = ia.get_list_select_related(request)
cl = ChangeList(request, Child, ia.list_display, ia.list_display_links,
ia.list_filter, ia.date_hierarchy, ia.search_fields,
list_select_related, ia.list_per_page,
ia.list_max_show_all, ia.list_editable, ia)
self.assertEqual(cl.queryset.query.select_related, {'player': {}, 'band': {}})
def test_result_list_empty_changelist_value(self):
"""
Regression test for #14982: EMPTY_CHANGELIST_VALUE should be honored
for relationship fields
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = '<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th><td class="field-parent nowrap">-</td></tr></tbody>' % link
self.assertNotEqual(table_output.find(row_html), -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_html(self):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = '<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th><td class="field-parent nowrap">Parent object</td></tr></tbody>' % link
self.assertNotEqual(table_output.find(row_html), -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_editable_html(self):
"""
Regression tests for #11791: Inclusion tag result_list generates a
table and this checks that the items are nested within the table
element tags.
Also a regression test for #13599, verifies that hidden fields
when list_editable is enabled are rendered in a div outside the
table.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
# make sure that hidden fields are in the correct place
hiddenfields_div = '<div class="hiddenfields"><input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /></div>' % new_child.id
self.assertInHTML(hiddenfields_div, table_output, msg_prefix='Failed to find hidden fields')
# make sure that list editable fields are rendered in divs correctly
editable_name_field = '<input name="form-0-name" value="name" class="vTextField" maxlength="30" type="text" id="id_form-0-name" />'
self.assertInHTML('<td class="field-name">%s</td>' % editable_name_field, table_output, msg_prefix='Failed to find "name" list_editable field')
def test_result_list_editable(self):
"""
Regression test for #14312: list_editable with pagination
"""
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/', data={'p': -1}) # Anything outside range
m = ChildAdmin(Child, custom_site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
self.assertRaises(IncorrectLookupParameters, lambda:
ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m))
def test_custom_paginator(self):
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/')
m = CustomPaginationAdmin(Child, custom_site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
self.assertIsInstance(cl.paginator, CustomPaginator)
def test_distinct_for_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Basic ManyToMany.
"""
blues = Genre.objects.create(name='Blues')
band = Band.objects.create(name='B.B. King Review', nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, custom_site)
request = self.factory.get('/band/', data={'genres': blues.pk})
cl = ChangeList(request, Band, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. With an intermediate model.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = GroupAdmin(Group, custom_site)
request = self.factory.get('/group/', data={'members': lead.pk})
cl = ChangeList(request, Group, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_at_second_level_in_list_filter(self):
"""
When using a ManyToMany in list_filter at the second level behind a
ForeignKey, distinct() must be called and results shouldn't appear more
than once.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Concert.objects.create(name='Woodstock', group=band)
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = ConcertAdmin(Concert, custom_site)
request = self.factory.get('/concert/', data={'group__members': lead.pk})
cl = ChangeList(request, Concert, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Concert instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_inherited_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Model managed in the
admin inherits from the one that defins the relationship.
"""
lead = Musician.objects.create(name='John')
four = Quartet.objects.create(name='The Beatles')
Membership.objects.create(group=four, music=lead, role='lead voice')
Membership.objects.create(group=four, music=lead, role='guitar player')
m = QuartetAdmin(Quartet, custom_site)
request = self.factory.get('/quartet/', data={'members': lead.pk})
cl = ChangeList(request, Quartet, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Quartet instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_m2m_to_inherited_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Target of the relationship
inherits from another.
"""
lead = ChordsMusician.objects.create(name='Player A')
three = ChordsBand.objects.create(name='The Chords Trio')
Invitation.objects.create(band=three, player=lead, instrument='guitar')
Invitation.objects.create(band=three, player=lead, instrument='bass')
m = ChordsBandAdmin(ChordsBand, custom_site)
request = self.factory.get('/chordsband/', data={'members': lead.pk})
cl = ChangeList(request, ChordsBand, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one ChordsBand instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_non_unique_related_object_in_list_filter(self):
"""
Regressions tests for #15819: If a field listed in list_filters
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
# Two children with the same name
Child.objects.create(parent=parent, name='Daniel')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, custom_site)
request = self.factory.get('/parent/', data={'child__name': 'Daniel'})
cl = ChangeList(request, Parent, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_non_unique_related_object_in_search_fields(self):
"""
Regressions tests for #15819: If a field listed in search_fields
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
Child.objects.create(parent=parent, name='Danielle')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, custom_site)
request = self.factory.get('/parent/', data={SEARCH_VAR: 'daniel'})
cl = ChangeList(request, Parent, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_many_to_many_at_second_level_in_search_fields(self):
"""
When using a ManyToMany in search_fields at the second level behind a
ForeignKey, distinct() must be called and results shouldn't appear more
than once.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Concert.objects.create(name='Woodstock', group=band)
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = ConcertAdmin(Concert, custom_site)
request = self.factory.get('/concert/', data={SEARCH_VAR: 'vox'})
cl = ChangeList(request, Concert, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# There's only one Concert instance
self.assertEqual(cl.queryset.count(), 1)
def test_pagination(self):
"""
Regression tests for #12893: Pagination in admins changelist doesn't
use queryset set by modeladmin.
"""
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
request = self.factory.get('/child/')
# Test default queryset
m = ChildAdmin(Child, custom_site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all,
m.list_editable, m)
self.assertEqual(cl.queryset.count(), 60)
self.assertEqual(cl.paginator.count, 60)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3, 4, 5, 6])
# Test custom queryset
m = FilteredChildAdmin(Child, custom_site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all,
m.list_editable, m)
self.assertEqual(cl.queryset.count(), 30)
self.assertEqual(cl.paginator.count, 30)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3])
def test_computed_list_display_localization(self):
"""
Regression test for #13196: output of functions should be localized
in the changelist.
"""
User.objects.create_superuser(
username='super', email='super@localhost', password='secret')
self.client.login(username='super', password='secret')
event = Event.objects.create(date=datetime.date.today())
response = self.client.get(reverse('admin:admin_changelist_event_changelist'))
self.assertContains(response, formats.localize(event.date))
self.assertNotContains(response, six.text_type(event.date))
def test_dynamic_list_display(self):
"""
Regression tests for #14206: dynamic list_display support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertNotContains(response, 'Parent object')
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ['name', 'age'])
self.assertEqual(list_display_links, ['name'])
# Test with user 'parents'
m = DynamicListDisplayChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
custom_site.unregister(Child)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['parent'])
# Test default implementation
custom_site.register(Child, ChildAdmin)
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
def test_show_all(self):
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
# Add "show all" parameter to request
request = self.factory.get('/child/', data={ALL_VAR: ''})
# Test valid "show all" request (number of total objects is under max)
m = ChildAdmin(Child, custom_site)
# 200 is the max we'll pass to ChangeList
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, 200, m.list_editable, m)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 60)
# Test invalid "show all" request (number of total objects over max)
# falls back to paginated pages
m = ChildAdmin(Child, custom_site)
# 30 is the max we'll pass to ChangeList for this test
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, 30, m.list_editable, m)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 10)
def test_dynamic_list_display_links(self):
"""
Regression tests for #16257: dynamic list_display_links support.
"""
parent = Parent.objects.create(name='parent')
for i in range(1, 10):
Child.objects.create(id=i, name='child %s' % i, parent=parent, age=i)
m = DynamicListDisplayLinksChildAdmin(Child, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/child/', superuser)
response = m.changelist_view(request)
for i in range(1, 10):
link = reverse('admin:admin_changelist_child_change', args=(i,))
self.assertContains(response, '<a href="%s">%s</a>' % (link, i))
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['age'])
def test_no_list_display_links(self):
"""#15185 -- Allow no links from the 'change list' view grid."""
p = Parent.objects.create(name='parent')
m = NoListDisplayLinksParentAdmin(Parent, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/parent/', superuser)
response = m.changelist_view(request)
link = reverse('admin:admin_changelist_parent_change', args=(p.pk,))
self.assertNotContains(response, '<a href="%s">' % link)
def test_tuple_list_display(self):
"""
Regression test for #17128
(ChangeList failing under Python 2.5 after r16319)
"""
swallow = Swallow.objects.create(origin='Africa', load='12.34', speed='22.2')
swallow2 = Swallow.objects.create(origin='Africa', load='12.34', speed='22.2')
swallow_o2o = SwallowOneToOne.objects.create(swallow=swallow2)
model_admin = SwallowAdmin(Swallow, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/swallow/', superuser)
response = model_admin.changelist_view(request)
# just want to ensure it doesn't blow up during rendering
self.assertContains(response, six.text_type(swallow.origin))
self.assertContains(response, six.text_type(swallow.load))
self.assertContains(response, six.text_type(swallow.speed))
# Reverse one-to-one relations should work.
self.assertContains(response, '<td class="field-swallowonetoone">-</td>')
self.assertContains(response, '<td class="field-swallowonetoone">%s</td>' % swallow_o2o)
def test_deterministic_order_for_unordered_model(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model doesn't have any default ordering defined.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
UnorderedObject.objects.create(id=counter, bool=True)
class UnorderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
custom_site.register(UnorderedObject, UnorderedObjectAdmin)
model_admin = UnorderedObjectAdmin(UnorderedObject, custom_site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/unorderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
custom_site.unregister(UnorderedObject)
# When no order is defined at all, everything is ordered by '-pk'.
check_results_order()
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
UnorderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
UnorderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
UnorderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
UnorderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
UnorderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_deterministic_order_for_model_ordered_by_its_manager(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model has a manager that defines a default ordering.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
OrderedObject.objects.create(id=counter, bool=True, number=counter)
class OrderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
custom_site.register(OrderedObject, OrderedObjectAdmin)
model_admin = OrderedObjectAdmin(OrderedObject, custom_site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/orderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
custom_site.unregister(OrderedObject)
# When no order is defined at all, use the model's default ordering (i.e. 'number')
check_results_order(ascending=True)
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
OrderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
OrderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
OrderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
OrderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
OrderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_dynamic_list_filter(self):
"""
Regression tests for ticket #17646: dynamic list_filter support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = DynamicListFilterChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ['name', 'age'])
# Test with user 'parents'
m = DynamicListFilterChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ('parent', 'name', 'age'))
def test_dynamic_search_fields(self):
child = self._create_superuser('child')
m = DynamicSearchFieldsChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', child)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].search_fields, ('name', 'age'))
def test_pagination_page_range(self):
"""
Regression tests for ticket #15653: ensure the number of pages
generated for changelist views are correct.
"""
# instantiating and setting up ChangeList object
m = GroupAdmin(Group, custom_site)
request = self.factory.get('/group/')
cl = ChangeList(request, Group, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
per_page = cl.list_per_page = 10
for page_num, objects_count, expected_page_range in [
(0, per_page, []),
(0, per_page * 2, list(range(2))),
(5, per_page * 11, list(range(11))),
(5, per_page * 12, [0, 1, 2, 3, 4, 5, 6, 7, 8, '.', 10, 11]),
(6, per_page * 12, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, 10, 11]),
(6, per_page * 13, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, '.', 11, 12]),
]:
# assuming we have exactly `objects_count` objects
Group.objects.all().delete()
for i in range(objects_count):
Group.objects.create(name='test band')
# setting page number and calculating page range
cl.page_num = page_num
cl.get_results(request)
real_page_range = pagination(cl)['page_range']
self.assertListEqual(
expected_page_range,
list(real_page_range),
)
class AdminLogNodeTestCase(TestCase):
def test_get_admin_log_templatetag_custom_user(self):
"""
Regression test for ticket #20088: admin log depends on User model
having id field as primary key.
The old implementation raised an AttributeError when trying to use
the id field.
"""
context = Context({'user': CustomIdUser()})
template_string = '{% load log %}{% get_admin_log 10 as admin_log for_user user %}'
template = Template(template_string)
# Rendering should be u'' since this templatetag just logs,
# it doesn't render any string.
self.assertEqual(template.render(context), '')
def test_get_admin_log_templatetag_no_user(self):
"""
The {% get_admin_log %} tag should work without specifying a user.
"""
user = User(username='jondoe', password='secret', email='[email protected]')
user.save()
ct = ContentType.objects.get_for_model(User)
LogEntry.objects.log_action(user.pk, ct.pk, user.pk, repr(user), 1)
t = Template(
'{% load log %}'
'{% get_admin_log 100 as admin_log %}'
'{% for entry in admin_log %}'
'{{ entry|safe }}'
'{% endfor %}'
)
self.assertEqual(t.render(Context({})), 'Added "<User: jondoe>".')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_changelist.urls")
class SeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_changelist'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def setUp(self):
# password = "secret"
User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='[email protected]',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime.datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def test_add_row_selection(self):
"""
Ensure that the status line for selected rows gets updated correcly (#22038)
"""
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:auth_user_changelist')))
form_id = '#changelist-form'
# Test amount of rows in the Changelist
rows = self.selenium.find_elements_by_css_selector(
'%s #result_list tbody tr' % form_id)
self.assertEqual(len(rows), 1)
# Test current selection
selection_indicator = self.selenium.find_element_by_css_selector(
'%s .action-counter' % form_id)
self.assertEqual(selection_indicator.text, "0 of 1 selected")
# Select a row and check again
row_selector = self.selenium.find_element_by_css_selector(
'%s #result_list tbody tr:first-child .action-select' % form_id)
row_selector.click()
self.assertEqual(selection_indicator.text, "1 of 1 selected")
class SeleniumChromeTests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class SeleniumIETests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
|
bsd-3-clause
|
CognitionGuidedSurgery/msml-gui
|
src/msmlgui/text/xmllexer.py
|
1
|
9663
|
__author__ = 'Alexander Weigl'
# From: https://github.com/behnam/python-ply-xml/blob/master/parser.py
#
#
import sys
from UserString import UserString
from ply import lex, yacc
_VERSION = '1.0'
################################
# LEXER
class XmlLexer:
'''The XML lexer'''
# states:
# default: The default context, non-tag
# tag: The document tag context
# attrvalue1: Single-quoted tag attribute value
# attrvalue2: Double-quoted tag attribute value
states = (
('tag', 'exclusive'),
('attrvalue1', 'exclusive'),
('attrvalue2', 'exclusive'),
)
tokens = [
# state: INITIAL
'PCDATA',
'OPENTAGOPEN',
'CLOSETAGOPEN',
# state: tag
'TAGATTRNAME',
'TAGCLOSE',
'LONETAGCLOSE',
'ATTRASSIGN',
# state: attrvalue1
'ATTRVALUE1OPEN',
'ATTRVALUE1STRING',
'ATTRVALUE1CLOSE',
# state: attrvalue2
'ATTRVALUE2OPEN',
'ATTRVALUE2STRING',
'ATTRVALUE2CLOSE',
]
# Complex patterns
re_digit = r'([0-9])'
re_nondigit = r'([_A-Za-z:\-])'
re_identifier = r'(' + re_nondigit + r'(' + re_digit + r'|' + re_nondigit + r')*)'
# ANY
def t_ANY_error(self, t):
raise SyntaxError("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
pass
# INITIAL
t_ignore = ''
def t_CLOSETAGOPEN(self, t):
r'</'
t.lexer.push_state('tag')
return t
def t_OPENTAGOPEN(self, t):
r'<'
t.lexer.push_state('tag')
return t
def t_PCDATA(self, t):
'[^<]+'
return t
# tag: name
t_tag_ignore = ' \t'
def t_tag_TAGATTRNAME(self, t):
return t
t_tag_TAGATTRNAME.__doc__ = re_identifier
def t_tag_TAGCLOSE(self, t):
r'>'
t.lexer.pop_state()
return t
def t_tag_LONETAGCLOSE(self, t):
r'/>'
t.lexer.pop_state()
return t
# tag: attr
t_tag_ATTRASSIGN = r'='
def t_tag_ATTRVALUE1OPEN(self, t):
r'\''
t.lexer.push_state('attrvalue1')
return t
def t_tag_ATTRVALUE2OPEN(self, t):
r'"'
t.lexer.push_state('attrvalue2')
return t
# attrvalue1
def t_attrvalue1_ATTRVALUE1STRING(self, t):
r'[^\']+'
t.value = unicode(t.value)
return t
def t_attrvalue1_ATTRVALUE1CLOSE(self, t):
r'\''
t.lexer.pop_state()
return t
t_attrvalue1_ignore = ''
# attrvalue2
def t_attrvalue2_ATTRVALUE2STRING(self, t):
r'[^"]+'
t.value = unicode(t.value)
return t
def t_attrvalue2_ATTRVALUE2CLOSE(self, t):
r'"'
t.lexer.pop_state()
return t
t_attrvalue2_ignore = ''
# misc
literals = '$%^'
def t_ANY_newline(self, t):
r'\n'
t.lexer.lineno += len(t.value)
# Build the lexer
def build(self, **kwargs):
self.lexer = lex.lex(object=self, **kwargs)
# Test it output
def test(self, data):
self.lexer.input(data)
while 1:
tok = self.lexer.token()
if not tok: break
_debug_print_('LEXER', '[%-12s] %s' % (self.lexer.lexstate, tok))
# Customization
class SyntaxError(Exception):
pass
def tokenize(text):
lexer = XmlLexer()
lexer.build()
lexer = lexer.lexer
lexer.input(text)
tokens = []
while True:
tok = lexer.token()
if not tok:
break
tokens.append(tok)
return tokens
################################
# PARSER
tag_stack = []
# Grammer
def p_root_element(p):
'''root : element
| element PCDATA
'''
_parser_trace(p)
p[0] = p[1]
def p_root_pcdata_element(p):
'''root : PCDATA element
| PCDATA element PCDATA
'''
_parser_trace(p)
p[0] = p[2]
def p_element(p):
'''element : opentag children closetag
| lonetag
'''
_parser_trace(p)
if len(p) == 4:
p[1].children = p[2]
p[0] = p[1]
# tag
def p_opentag(p):
'''opentag : OPENTAGOPEN TAGATTRNAME attributes TAGCLOSE
'''
_parser_trace(p)
tag_stack.append(p[2])
p[0] = DOM.Element(p[2], p[3])
def p_closetag(p):
'''closetag : CLOSETAGOPEN TAGATTRNAME TAGCLOSE
'''
_parser_trace(p)
n = tag_stack.pop()
if p[2] != n:
raise ParserError('Close tag name ("%s") does not match the corresponding open tag ("%s").' % (p[2], n))
def p_lonetag(p):
'''lonetag : OPENTAGOPEN TAGATTRNAME attributes LONETAGCLOSE
'''
_parser_trace(p)
p[0] = DOM.Element(p[2], p[3])
# attr
def p_attributes(p):
'''attributes : attribute attributes
| empty
'''
_parser_trace(p)
if len(p) == 3:
if p[2]:
p[1].update(p[2])
p[0] = p[1]
else:
p[0] = p[1]
else:
p[0] = {}
def p_attribute(p):
'''attribute : TAGATTRNAME ATTRASSIGN attrvalue
'''
_parser_trace(p)
p[0] = {p[1]: p[3]}
def p_attrvalue(p):
'''attrvalue : ATTRVALUE1OPEN ATTRVALUE1STRING ATTRVALUE1CLOSE
| ATTRVALUE2OPEN ATTRVALUE2STRING ATTRVALUE2CLOSE
'''
_parser_trace(p)
p[0] = _xml_unescape(p[2])
# child
def p_children(p):
'''children : child children
| empty
'''
_parser_trace(p)
if len(p) > 2:
if p[2]:
p[0] = [p[1]] + p[2]
else:
p[0] = [p[1]]
else:
p[0] = []
def p_child_element(p):
'''child : element'''
_parser_trace(p)
p[0] = p[1]
def p_child_pcdata(p):
'''child : PCDATA'''
_parser_trace(p)
p[0] = DOM.Pcdata(p[1])
# empty
def p_empty(p):
'''empty :'''
pass
# Error rule for syntax errors
class ParserError(Exception):
pass
def p_error(p):
raise ParserError("Parse error: %s" % (p,))
pass
# Customization
def _parser_trace(x):
_debug_print_('PARSER', '[%-16s] %s' % (sys._getframe(1).f_code.co_name, x))
def _yacc_production__str(p):
#return "YaccProduction(%s, %s)" % (str(p.slice), str(p.stack))
return "YaccP%s" % (str([i.value for i in p.slice]))
yacc.YaccProduction.__str__ = _yacc_production__str
################################
# DOM
class DOM:
class Element:
# Document object model
#
# Parser returns the root element of the XML document
def __init__(self, name, attributes={}, children=[]):
self.name = name
self.attributes = attributes
self.children = children
def __str__(self):
attributes_str = ''
for attr in self.attributes:
attributes_str += ' %s="%s"' % (attr, _xml_escape(self.attributes[attr]))
children_str = ''
for child in self.children:
if isinstance(child, self.__class__):
children_str += str(child)
else:
children_str += child
return '<%s%s>%s</%s>'% (self.name, attributes_str, children_str, self.name)
def __repr__(self):
return str(self)
class Pcdata(UserString):
pass
################################
# ESCAPE
_xml_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
def _xml_escape(text):
L=[]
for c in text:
L.append(_xml_escape_table.get(c,c))
return "".join(L)
def _xml_unescape(s):
rules = _xml_escape_table.items()
rules.reverse()
for x, y in rules:
s = s.replace(y, x)
return s
################################
# INTERFACE
def xml_parse(data):
_debug_header('INPUT')
_debug_print_('INPUT', data)
_debug_footer('INPUT')
# Tokenizer
xml_lexer = XmlLexer()
xml_lexer.build()
_debug_header('LEXER')
xml_lexer.test(data)
_debug_footer('LEXER')
# Parser
global tokens
tokens = XmlLexer.tokens
yacc.yacc(method="SLR")
_debug_header('PARSER')
root = yacc.parse(data, lexer=xml_lexer.lexer, debug=False)
_debug_footer('PARSER')
_debug_header('OUTPUT')
_debug_print_('OUTPUT', root)
_debug_footer('OUTPUT')
return root
def tree(node, level=0, init_prefix=''):
'Returns a tree view of the XML data'
prefix = ' '
attr_prefix = '@'
tag_postfix = ':\t'
attr_postfix = ':\t'
s_node = init_prefix + node.name + tag_postfix
s_attributes = ''
s_children = ''
for attr in node.attributes:
s_attributes += init_prefix + prefix + attr_prefix + attr + attr_postfix + node.attributes[attr] + '\n'
if len(node.children) == 1 and not isinstance(node.children[0], DOM.Element):
s_node += node.children[0] + '\n'
else:
for child in node.children:
if isinstance(child, DOM.Element):
s_children += tree(child, level+1, init_prefix + prefix)
s_node += '\n'
return s_node + s_attributes + s_children
################################
# DEBUG
_DEBUG = {
'INPUT': False,
'LEXER': False,
'PARSER': False,
'OUTPUT': False,
}
def _debug_header(part):
if _DEBUG[part]:
print '--------'
print '%s:' % part
def _debug_footer(part):
if _DEBUG[part]:
pass
def _debug_print_(part, s):
if _DEBUG[part]:
print s
################################
# MAIN
def main():
data = open(sys.argv[1]).read()
root = xml_parse(data)
print tree(root)
if __name__ == '__main__':
main()
|
gpl-3.0
|
singlebrook/AWS-ElasticBeanstalk-CLI
|
eb/macosx/python3/scli/core.py
|
8
|
5504
|
#!/usr/bin/env python
#==============================================================================
# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions
# and limitations under the License.
#==============================================================================
import json
import codecs
import logging
from logging import config as _config
import os
import sys
from pprint import pprint
from scli import command, cli_parse, config_file, prompt
from scli.constants import EbLogFile, EbLocalDir, OutputLevel, ParameterName, \
ParameterSource, ServiceDefault
from scli.parameter import DefaultParameterValue, ParameterPool, ParameterValidator
from lib.utility import misc, shell_utils
log = logging.getLogger('cli')
def _getLogFile(filename):
return os.getcwd() + os.path.sep + filename
def _set_log_filename(config_dict, filename):
#Keyerror
config_dict['handlers']['default']['filename'] = filename
def _set_log_handlers(config_dict, formatter):
config_dict['root']['handlers'] = [formatter]
config_dict['loggers']['aws']['handlers'] = [formatter]
config_dict['loggers']['eb']['handlers'] = [formatter]
config_dict['loggers']['op']['handlers'] = [formatter]
config_dict['loggers']['cli']['handlers'] = [formatter]
def _disable_logging(config_dict = None):
logging.disable(logging.CRITICAL)
if config_dict is not None:
_set_log_handlers(config_dict, 'null')
del config_dict['handlers']['default']
def configureLogging(level = None, quiet = False,
filename = EbLogFile.Name,
spec_dir = os.getcwd() + os.path.sep + EbLocalDir.Path):
if not spec_dir:
output_file=_getLogFile(filename)
else:
shell_utils.create_directory(spec_dir)
output_file = spec_dir + os.path.sep + filename
ori_path = shell_utils.ori_path()
log_config_location = os.path.join(ori_path, 'logconfig.json')
try:
with codecs.open(log_config_location, 'r', encoding='utf-8') as input_file:
config_dict = json.loads(input_file.read())
_set_log_filename(config_dict, output_file)
if level is None and config_dict['root']['level'].upper() == 'NONE':
# completely disable log
config_dict['root']['level'] = 'NOTSET'
_disable_logging(config_dict)
else:
if level is not None:
config_dict['root']['level'] = level
_set_log_handlers(config_dict, 'default')
except (IOError, ValueError, KeyError) as ex:
#JSON logging config file parsing error
if not quiet:
print(('Encountered error when reading logging configuration file from "{0}": {1}.'.\
format(log_config_location, ex)))
_disable_logging()
return
try:
_config.dictConfig(config_dict)
except IOError:
if not quiet:
print('Could not open {0} for logging. Using stderr instead.'.\
format(output_file), file=sys.stderr)
_set_log_handlers(config_dict, 'to_stderr')
_config.dictConfig(config_dict)
config_file.set_access_permission(output_file, True)
def _exit(code):
log.info('EB CLI exit')
sys.exit(code)
def _print_op_results(results):
for index, result in enumerate(results):
prompt.info('------------ Operation {0}: {1}----------------'.format\
(index + 1, result.operation.__class__.__name__))
pprint(result.result, depth = 3);
print(result.message)
def main(cmdline = None):
# Initialization
configureLogging(quiet=False)
log.info('EB CLI start')
parameter_pool = ParameterPool() # pool of all parameters
validator = ParameterValidator()
DefaultParameterValue.fill_default(parameter_pool)
log.debug('Finished initialization')
try:
# Parse command line arguments
cli_parse.parse(parameter_pool, cmdline)
log.debug('Finished parsing command line arguments.')
# TODO: set quiet level here.
if parameter_pool.get_value(ParameterName.Verbose) == ServiceDefault.ENABLED:
prompt.set_level(OutputLevel.Info)
else:
prompt.set_level(OutputLevel.ResultOnly)
validator.validate(parameter_pool, ParameterSource.CliArgument)
# Compile operation queue
queue = command.compile_operation_queue(parameter_pool.command)
except SystemExit as ex:
_exit(0)
except BaseException as ex:
print((misc.to_unicode(ex)))
log.exception(ex)
_exit(1)
# Execute queue
results = []
try:
queue.run(parameter_pool, results)
log.debug('Finished executing operation queue')
except BaseException as ex:
print((misc.to_unicode(ex)))
log.exception(ex)
_exit(1)
_exit(0)
|
apache-2.0
|
pcamp/google-appengine-wx-launcher
|
launcher/resizing_listctrl.py
|
28
|
1054
|
#!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""ListCtrl that automatically resizes the path column"""
import wx
import wx.lib.mixins.listctrl as listmixin
class ResizingListCtrl(wx.ListCtrl, listmixin.ListCtrlAutoWidthMixin):
def __init__(self, parent, ID, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=0):
wx.ListCtrl.__init__(self, parent, ID, pos, size, style)
listmixin.ListCtrlAutoWidthMixin.__init__(self)
self.setResizeColumn(3) # the "path" column
|
apache-2.0
|
doublebits/osf.io
|
website/addons/mendeley/tests/utils.py
|
18
|
16261
|
# -*- coding: utf-8 -*-
from modularodm import storage
from framework.mongo import set_up_storage
from website.addons.base.testing import OAuthAddonTestCaseMixin, AddonTestCase
from website.addons.base.testing.utils import MockFolder
from website.addons.mendeley.tests.factories import MendeleyAccountFactory
from website.addons.mendeley.model import Mendeley
from website.addons.mendeley import MODELS
from json import dumps
class MendeleyTestCase(OAuthAddonTestCaseMixin, AddonTestCase):
ADDON_SHORT_NAME = 'mendeley'
ExternalAccountFactory = MendeleyAccountFactory
Provider = Mendeley
def set_node_settings(self, settings):
super(MendeleyTestCase, self).set_node_settings(settings)
settings.list_id = MockFolder().json['id']
settings.external_account = self.external_account
settings.save()
def init_storage():
set_up_storage(MODELS, storage_class=storage.MongoStorage)
mock_responses = {
'folders': [
{
"id": "4901a8f5-9840-49bf-8a17-bdb3d5900417",
"name": "subfolder",
"created": "2015-02-13T20:34:42.000Z",
"modified": "2015-02-13T20:34:44.000Z"
},
{
"id": "a6b12ebf-bd07-4f4e-ad73-f9704555f032",
"name": "subfolder2",
"created": "2015-02-13T20:34:42.000Z",
"modified": "2015-02-13T20:34:44.000Z",
"parent_id": "4901a8f5-9840-49bf-8a17-bdb3d5900417"
},
{
"id": "e843da05-8818-47c2-8c37-41eebfc4fe3f",
"name": "subfolder3",
"created": "2015-02-17T15:27:13.000Z",
"modified": "2015-02-17T15:27:13.000Z",
"parent_id": "a6b12ebf-bd07-4f4e-ad73-f9704555f032"
}
],
'documents': [
{
"id": "547a1215-efdb-36d2-93b2-e3ef8991264f",
"title": "Cloud Computing",
"type": "journal",
"authors": [
{
"first_name": "Shivaji P",
"last_name": "Mirashe"
},
{
"first_name": "N V",
"last_name": "Kalyankar"
}
],
"year": 2010,
"source": "Communications of the ACM",
"identifiers": {
"issn": "03621340",
"doi": "10.1145/358438.349303",
"pmid": "22988693",
"arxiv": "1003.4074",
"isbn": "1-58113-199-2"
},
"created": "2015-02-13T18:17:47.000Z",
"profile_id": "53f383b4-1100-30d5-9473-2dde614dfcaa",
"last_modified": "2015-02-13T20:34:44.000Z",
"abstract": "Computing as you know it is about to change, your applications and documents are going to move from the desktop into the cloud. I'm talking about cloud computing, where applications and files are hosted on a \"cloud\" consisting of thousands of computers and servers, all linked together and accessible via the Internet. With cloud computing, everything you do is now web based instead of being desktop based. You can access all your programs and documents from any computer that's connected to the Internet. How will cloud computing change the way you work? For one thing, you're no longer tied to a single computer. You can take your work anywhere because it's always accessible via the web. In addition, cloud computing facilitates group collaboration, as all group members can access the same programs and documents from wherever they happen to be located. Cloud computing might sound far-fetched, but chances are you're already using some cloud applications. If you're using a web-based email program, such as Gmail or Hotmail, you're computing in the cloud. If you're using a web-based application such as Google Calendar or Apple Mobile Me, you're computing in the cloud. If you're using a file- or photo-sharing site, such as Flickr or Picasa Web Albums, you're computing in the cloud. It's the technology of the future, available to use today."
},
{
"id": "5e95a1a9-d789-3576-9943-35eee8e59ea9",
"title": "The Google file system",
"type": "generic",
"authors": [
{
"first_name": "Sanjay",
"last_name": "Ghemawat"
},
{
"first_name": "Howard",
"last_name": "Gobioff"
},
{
"first_name": "Shun-Tak",
"last_name": "Leung"
}
],
"year": 2003,
"source": "ACM SIGOPS Operating Systems Review",
"identifiers": {
"pmid": "191",
"issn": "01635980"
},
"created": "2015-02-13T18:17:48.000Z",
"profile_id": "53f383b4-1100-30d5-9473-2dde614dfcaa",
"last_modified": "2015-02-13T20:34:44.000Z",
"abstract": "We have designed and implemented the Google File System, a scalable distributed file system for large distributed data-intensive applications. It provides fault tolerance while running on inexpensive commodity hardware, and it delivers high aggregate performance to a large number of clients. While sharing many of the same goals as previous distributed file systems, our design has been driven by observations of our application workloads and technological environment, both current and anticipated, that reflect a marked departure from some earlier file system assumptions. This has led us to reexamine traditional choices and explore radically different design points. The file system has successfully met our storage needs. It is widely deployed within Google as the storage platform for the generation and processing of data used by our service as well as research and development efforts that require large data sets. The largest cluster to date provides hundreds of terabytes of storage across thousands of disks on over a thousand machines, and it is concurrently accessed by hundreds of clients. In this paper, we present file system interface extensions designed to support distributed applications, discuss many aspects of our design, and report measurements from both micro-benchmarks and real world use."
},
{
"id": "3480056e-fe4d-339b-afed-4312d03739a4",
"title": "Above the clouds: A Berkeley view of cloud computing",
"type": "journal",
"authors": [
{
"first_name": "M",
"last_name": "Armbrust"
},
{
"first_name": "A",
"last_name": "Fox"
},
{
"first_name": "R",
"last_name": "Griffith"
},
{
"first_name": "AD",
"last_name": "Joseph"
},
{
"last_name": "RH"
}
],
"year": 2009,
"source": " University of California, Berkeley, Tech. Rep. UCB ",
"identifiers": {
"pmid": "11242594",
"arxiv": "0521865719 9780521865715"
},
"keywords": [
"cloud computing",
"distributed system economics",
"internet datacenters",
"utility computing"
],
"created": "2015-02-13T18:17:48.000Z",
"profile_id": "53f383b4-1100-30d5-9473-2dde614dfcaa",
"last_modified": "2015-02-13T20:34:45.000Z",
"abstract": "Cloud Computing, the long-held dream of computing as a utility, has the potential to transform a large part of the IT industry, making software even more attractive as a service and shaping the way IT hardware is designed and purchased. Developers with innovative ideas for new Internet services no longer require the large capital outlays in hardware to deploy their service or the human expense to operate it. They need not be concerned about over- provisioning for a service whose popularity does not meet their predictions, thus wasting costly resources, or under- provisioning for one that becomes wildly popular, thus missing potential customers and revenue. Moreover, companies with large batch-oriented tasks can get results as quickly as their programs can scale, since using 1000 servers for one hour costs no more than using one server for 1000 hlarge scale, is unprecedented in the history of IT. "
},
{
"id": "e917dd51-8b94-3748-810b-cafa2accc18a",
"title": "Toward the next generation of recommender systems: A survey of the state-of-the-art and possible extensions",
"type": "generic",
"authors": [
{
"first_name": "Gediminas",
"last_name": "Adomavicius"
},
{
"first_name": "Alexander",
"last_name": "Tuzhilin"
}
],
"year": 2005,
"source": "IEEE Transactions on Knowledge and Data Engineering",
"identifiers": {
"issn": "10414347",
"pmid": "1423975",
"arxiv": "3"
},
"keywords": [
"Collaborative filtering",
"Extensions to recommander systems",
"Rating estimation methods",
"Recommander systems"
],
"created": "2015-02-13T18:17:48.000Z",
"profile_id": "53f383b4-1100-30d5-9473-2dde614dfcaa",
"last_modified": "2015-02-13T20:34:45.000Z",
"abstract": " This paper presents an overview of the field of recommender systems and describes the current generation of recommendation methods that are usually classified into the following three main categories: content-based, collaborative, and hybrid recommendation approaches. This paper also describes various limitations of current recommendation methods and discusses possible extensions that can improve recommendation capabilities and make recommender systems applicable to an even broader range of applications. These extensions include, among others, an improvement of understanding of users and items, incorporation of the contextual information into the recommendation process, support for multicriteria ratings, and a provision of more flexible and less intrusive types of recommendations."
},
{
"id": "8cd60008-888a-3212-966f-29d481b4b7b7",
"title": "An Introduction to Information Retrieval",
"type": "patent",
"authors": [
{
"first_name": "Christopher D.",
"last_name": "Manning"
},
{
"first_name": "Prabhakar",
"last_name": "Raghavan"
}
],
"year": 2009,
"source": "Online",
"identifiers": {
"issn": "13864564",
"doi": "10.1109/LPT.2009.2020494",
"pmid": "10575050",
"arxiv": "0521865719 9780521865715",
"isbn": "0521865719"
},
"keywords": [
"keyword"
],
"created": "2015-02-13T18:17:48.000Z",
"profile_id": "53f383b4-1100-30d5-9473-2dde614dfcaa",
"last_modified": "2015-02-17T15:27:14.000Z",
"abstract": "Class-tested and coherent, this groundbreaking new textbook teaches web-era information retrieval, including web search and the related areas of text classification and text clustering from basic concepts. Written from a computer science perspective by three leading experts in the field, it gives an up-to-date treatment of all aspects of the design and implementation of systems for gathering, indexing, and searching documents; methods for evaluating systems; and an introduction to the use of machine learning methods on text collections. All the important ideas are explained using examples and figures, making it perfect for introductory courses in information retrieval for advanced undergraduates and graduate students in computer science. Based on feedback from extensive classroom experience, the book has been carefully structured in order to make teaching more natural and effective. Although originally designed as the primary text for a graduate or advanced undergraduate course in information retrieval, the book will also create a buzz for researchers and professionals alike."
},
{
"id": "de25a64f-493b-330e-a48c-4089bab938f5",
"title": "Learning of ontologies for the web: The analysis of existent approaches",
"type": "journal",
"authors": [
{
"first_name": "Borys",
"last_name": "Omelayenko"
}
],
"year": 2001,
"source": "CEUR Workshop Proceedings",
"identifiers": {
"issn": "16130073"
},
"created": "2015-02-13T18:17:52.000Z",
"profile_id": "53f383b4-1100-30d5-9473-2dde614dfcaa",
"last_modified": "2015-02-13T20:34:43.000Z",
"abstract": "The next generation of the Web, called Semantic Web, has to improve\\nthe Web with semantic (ontological) page annotations to enable knowledge-level\\nquerying and searches. Manual construction of these ontologies will\\nrequire tremendous efforts that force future integration of machine\\nlearning with knowledge acquisition to enable highly automated ontology\\nlearning. In the paper we present the state of the-art in the field\\nof ontology learning from the Web to see how it can contribute to\\nthe task..."
}
]
}
mock_responses = {k:dumps(v) for k,v in mock_responses.iteritems()}
|
apache-2.0
|
alexei-matveev/ase-local
|
ase/io/siesta.py
|
9
|
8034
|
from numpy import zeros
from os import fstat
from re import compile
from ase.io.fortranfile import FortranFile
def read_rho(fname):
"Read unformatted Siesta charge density file"
# TODO:
#
# Handle formatted and NetCDF files.
#
# Siesta source code (at least 2.0.2) can possibly also
# save RHO as a _formatted_ file (the source code seems
# prepared, but there seems to be no fdf-options for it though).
# Siesta >= 3 has support for saving RHO as a NetCDF file
# (according to manual)
fh = FortranFile(fname)
# Read (but ignore) unit cell vectors
x = fh.readReals('d')
if len(x) != 3 * 3:
raise IOError('Failed to read cell vectors')
# Read number of grid points and spin components
x = fh.readInts()
if len(x) != 4:
raise IOError('Failed to read grid size')
gpts = x # number of 'X', 'Y', 'Z', 'spin' gridpoints
rho = zeros(gpts)
for ispin in range(gpts[3]):
for n3 in range(gpts[2]):
for n2 in range(gpts[1]):
x = fh.readReals('f')
if len(x) != gpts[0]:
raise IOError('Failed to read RHO[:,%i,%i,%i]' %
(n2, n3, ispin))
rho[:, n2, n3, ispin] = x
fh.close()
return rho
#
# Helper functions for read_fdf
#
_label_strip_re = compile(r'[\s._-]')
def _labelize(raw_label):
# Labels are case insensitive and -_. should be ignored, lower and strip it
return _label_strip_re.sub('', raw_label).lower()
def _is_block(val):
# Tell whether value is a block-value or an ordinary value.
# A block is represented as a list of lists of strings,
# and a ordinary value is represented as a list of strings
if type(val) is list and \
len(val) > 0 and \
type(val[0]) is list:
return True
return False
def _get_stripped_lines(fd):
# Remove comments, leading blanks, and empty lines
return filter(None, [L.split('#')[0].strip() for L in fd])
def _read_fdf_lines(file, inodes=[]):
# Read lines and resolve includes
if type(file) is str:
file = open(file, 'r')
fst = fstat(file.fileno())
inode = (fst.st_dev, fst.st_ino)
if inode in inodes:
raise IOError('Cyclic include in fdf file')
inodes = inodes + [inode]
lbz = _labelize
lines = []
for L in _get_stripped_lines(file):
w0 = lbz(L.split(None, 1)[0])
if w0 == '%include':
# Include the contents of fname
fname = L.split(None, 1)[1].strip()
lines += _read_fdf_lines(fname, inodes)
elif '<' in L:
L, fname = L.split('<', 1)
w = L.split()
fname = fname.strip()
if w0 == '%block':
# "%block label < filename" means that the block contents should be read from filename
if len(w) != 2:
raise IOError('Bad %%block-statement "%s < %s"' % (L, fname))
label = lbz(w[1])
lines.append('%%block %s' % label)
lines += _get_stripped_lines(open(fname))
lines.append('%%endblock %s' % label)
else:
# "label < filename.fdf" means that the label (_only_ that label) is to be resolved from filename.fdf
label = lbz(w[0])
fdf = _read_fdf(fname, inodes)
if label in fdf:
if _is_block(fdf[label]):
lines.append('%%block %s' % label)
lines += [' '.join(x) for x in fdf[label]]
lines.append('%%endblock %s' % label)
else:
lines.append('%s %s' % (label, ' '.join(fdf[label])))
#else: label unresolved! One should possibly issue a warning about this!
else:
# Simple include line L
lines.append(L)
return lines
#
# The reason for creating a separate _read_fdf is simply to hide the inodes-argument
#
def _read_fdf(fname, inodes=[]):
# inodes is used to detect cyclic includes
fdf = {}
lbz = _labelize
lines = _read_fdf_lines(fname, inodes)
while lines:
w = lines.pop(0).split(None, 1)
if lbz(w[0]) == '%block':
# Block value
if len(w) == 2:
label = lbz(w[1])
content = []
while True:
if len(lines) == 0:
raise IOError('Unexpected EOF reached in %s, '
'un-ended block %s' % (fname, label))
w = lines.pop(0).split()
if lbz(w[0]) == '%endblock' and lbz(w[1]) == label:
break
content.append(w)
if not label in fdf:
# Only first appearance of label is to be used
fdf[label] = content
else:
raise IOError('%%block statement without label' )
else:
# Ordinary value
label = lbz(w[0])
if len(w) == 1:
# Siesta interpret blanks as True for logical variables
fdf[label] = []
else:
fdf[label] = w[1].split()
return fdf
def read_fdf(fname):
"""Read a siesta style fdf-file.
The data is returned as a dictionary
( label:value ).
All labels are converted to lower case characters and
are stripped of any '-', '_', or '.'.
Ordinary values are stored as a list of strings (splitted on WS),
and block values are stored as list of lists of strings
(splitted per line, and on WS).
If a label occurres more than once, the first occurrence
takes precedence.
The implementation applies no intelligence, and does not
"understand" the data or the concept of units etc.
Values are never parsed in any way, just stored as
split strings.
The implementation tries to comply with the fdf-format
specification as presented in the siesta 2.0.2 manual.
An fdf-dictionary could e.g. look like this::
{'atomiccoordinatesandatomicspecies': [
['4.9999998', '5.7632392', '5.6095972', '1'],
['5.0000000', '6.5518100', '4.9929091', '2'],
['5.0000000', '4.9746683', '4.9929095', '2']],
'atomiccoordinatesformat': ['Ang'],
'chemicalspecieslabel': [['1', '8', 'O'],
['2', '1', 'H']],
'dmmixingweight': ['0.1'],
'dmnumberpulay': ['5'],
'dmusesavedm': ['True'],
'latticeconstant': ['1.000000', 'Ang'],
'latticevectors': [
['10.00000000', '0.00000000', '0.00000000'],
['0.00000000', '11.52647800', '0.00000000'],
['0.00000000', '0.00000000', '10.59630900']],
'maxscfiterations': ['120'],
'meshcutoff': ['2721.139566', 'eV'],
'numberofatoms': ['3'],
'numberofspecies': ['2'],
'paobasissize': ['dz'],
'solutionmethod': ['diagon'],
'systemlabel': ['H2O'],
'wavefunckpoints': [['0.0', '0.0', '0.0']],
'writedenchar': ['T'],
'xcauthors': ['PBE'],
'xcfunctional': ['GGA']}
"""
return _read_fdf(fname)
def read_struct(fname):
"""Read a siesta struct file"""
from ase.atoms import Atoms, Atom
f = open(fname, 'r')
cell = []
for i in range(3):
cell.append([float(x) for x in f.readline().split()])
natoms = int(f.readline())
atoms = Atoms()
for atom in f:
Z, pos_x, pos_y, pos_z = atom.split()[1:]
atoms.append(Atom(int(Z), position = (float(pos_x), float(pos_y), float(pos_z))))
if len(atoms) != natoms:
raise IOError('Badly structured input file')
atoms.set_cell(cell, scale_atoms = True)
return atoms
|
gpl-2.0
|
theguardian/headphones
|
lib/beetsplug/fetchart.py
|
14
|
13052
|
# This file is part of beets.
# Copyright 2014, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Fetches album art.
"""
from contextlib import closing
import logging
import os
import re
from tempfile import NamedTemporaryFile
import requests
from beets import plugins
from beets import importer
from beets import ui
from beets import util
from beets import config
from beets.util.artresizer import ArtResizer
try:
import itunes
HAVE_ITUNES = True
except ImportError:
HAVE_ITUNES = False
IMAGE_EXTENSIONS = ['png', 'jpg', 'jpeg']
CONTENT_TYPES = ('image/jpeg',)
DOWNLOAD_EXTENSION = '.jpg'
log = logging.getLogger('beets')
requests_session = requests.Session()
requests_session.headers = {'User-Agent': 'beets'}
def _fetch_image(url):
"""Downloads an image from a URL and checks whether it seems to
actually be an image. If so, returns a path to the downloaded image.
Otherwise, returns None.
"""
log.debug(u'fetchart: downloading art: {0}'.format(url))
try:
with closing(requests_session.get(url, stream=True)) as resp:
if 'Content-Type' not in resp.headers \
or resp.headers['Content-Type'] not in CONTENT_TYPES:
log.debug(u'fetchart: not an image')
return
# Generate a temporary file with the correct extension.
with NamedTemporaryFile(suffix=DOWNLOAD_EXTENSION, delete=False) \
as fh:
for chunk in resp.iter_content():
fh.write(chunk)
log.debug(u'fetchart: downloaded art to: {0}'.format(
util.displayable_path(fh.name)
))
return fh.name
except (IOError, requests.RequestException):
log.debug(u'fetchart: error fetching art')
# ART SOURCES ################################################################
# Cover Art Archive.
CAA_URL = 'http://coverartarchive.org/release/{mbid}/front-500.jpg'
CAA_GROUP_URL = 'http://coverartarchive.org/release-group/{mbid}/front-500.jpg'
def caa_art(album):
"""Return the Cover Art Archive and Cover Art Archive release group URLs
using album MusicBrainz release ID and release group ID.
"""
if album.mb_albumid:
yield CAA_URL.format(mbid=album.mb_albumid)
if album.mb_releasegroupid:
yield CAA_GROUP_URL.format(mbid=album.mb_releasegroupid)
# Art from Amazon.
AMAZON_URL = 'http://images.amazon.com/images/P/%s.%02i.LZZZZZZZ.jpg'
AMAZON_INDICES = (1, 2)
def art_for_asin(album):
"""Generate URLs using Amazon ID (ASIN) string.
"""
if album.asin:
for index in AMAZON_INDICES:
yield AMAZON_URL % (album.asin, index)
# AlbumArt.org scraper.
AAO_URL = 'http://www.albumart.org/index_detail.php'
AAO_PAT = r'href\s*=\s*"([^>"]*)"[^>]*title\s*=\s*"View larger image"'
def aao_art(album):
"""Return art URL from AlbumArt.org using album ASIN.
"""
if not album.asin:
return
# Get the page from albumart.org.
try:
resp = requests_session.get(AAO_URL, params={'asin': album.asin})
log.debug(u'fetchart: scraped art URL: {0}'.format(resp.url))
except requests.RequestException:
log.debug(u'fetchart: error scraping art page')
return
# Search the page for the image URL.
m = re.search(AAO_PAT, resp.text)
if m:
image_url = m.group(1)
yield image_url
else:
log.debug(u'fetchart: no image found on page')
# Google Images scraper.
GOOGLE_URL = 'https://ajax.googleapis.com/ajax/services/search/images'
def google_art(album):
"""Return art URL from google.org given an album title and
interpreter.
"""
if not (album.albumartist and album.album):
return
search_string = (album.albumartist + ',' + album.album).encode('utf-8')
response = requests_session.get(GOOGLE_URL, params={
'v': '1.0',
'q': search_string,
'start': '0',
})
# Get results using JSON.
try:
results = response.json()
data = results['responseData']
dataInfo = data['results']
for myUrl in dataInfo:
yield myUrl['unescapedUrl']
except:
log.debug(u'fetchart: error scraping art page')
return
# Art from the iTunes Store.
def itunes_art(album):
"""Return art URL from iTunes Store given an album title.
"""
search_string = (album.albumartist + ' ' + album.album).encode('utf-8')
try:
# Isolate bugs in the iTunes library while searching.
try:
itunes_album = itunes.search_album(search_string)[0]
except Exception as exc:
log.debug('fetchart: iTunes search failed: {0}'.format(exc))
return
if itunes_album.get_artwork()['100']:
small_url = itunes_album.get_artwork()['100']
big_url = small_url.replace('100x100', '1200x1200')
yield big_url
else:
log.debug(u'fetchart: album has no artwork in iTunes Store')
except IndexError:
log.debug(u'fetchart: album not found in iTunes Store')
# Art from the filesystem.
def filename_priority(filename, cover_names):
"""Sort order for image names.
Return indexes of cover names found in the image filename. This
means that images with lower-numbered and more keywords will have higher
priority.
"""
return [idx for (idx, x) in enumerate(cover_names) if x in filename]
def art_in_path(path, cover_names, cautious):
"""Look for album art files in a specified directory.
"""
if not os.path.isdir(path):
return
# Find all files that look like images in the directory.
images = []
for fn in os.listdir(path):
for ext in IMAGE_EXTENSIONS:
if fn.lower().endswith('.' + ext):
images.append(fn)
# Look for "preferred" filenames.
images = sorted(images, key=lambda x: filename_priority(x, cover_names))
cover_pat = r"(\b|_)({0})(\b|_)".format('|'.join(cover_names))
for fn in images:
if re.search(cover_pat, os.path.splitext(fn)[0], re.I):
log.debug(u'fetchart: using well-named art file {0}'.format(
util.displayable_path(fn)
))
return os.path.join(path, fn)
# Fall back to any image in the folder.
if images and not cautious:
log.debug(u'fetchart: using fallback art file {0}'.format(
util.displayable_path(images[0])
))
return os.path.join(path, images[0])
# Try each source in turn.
SOURCES_ALL = [u'coverart', u'itunes', u'amazon', u'albumart', u'google']
ART_FUNCS = {
u'coverart': caa_art,
u'itunes': itunes_art,
u'albumart': aao_art,
u'amazon': art_for_asin,
u'google': google_art,
}
def _source_urls(album, sources=SOURCES_ALL):
"""Generate possible source URLs for an album's art. The URLs are
not guaranteed to work so they each need to be attempted in turn.
This allows the main `art_for_album` function to abort iteration
through this sequence early to avoid the cost of scraping when not
necessary.
"""
for s in sources:
urls = ART_FUNCS[s](album)
for url in urls:
yield url
def art_for_album(album, paths, maxwidth=None, local_only=False):
"""Given an Album object, returns a path to downloaded art for the
album (or None if no art is found). If `maxwidth`, then images are
resized to this maximum pixel size. If `local_only`, then only local
image files from the filesystem are returned; no network requests
are made.
"""
out = None
# Local art.
cover_names = config['fetchart']['cover_names'].as_str_seq()
cover_names = map(util.bytestring_path, cover_names)
cautious = config['fetchart']['cautious'].get(bool)
if paths:
for path in paths:
out = art_in_path(path, cover_names, cautious)
if out:
break
# Web art sources.
remote_priority = config['fetchart']['remote_priority'].get(bool)
if not local_only and (remote_priority or not out):
for url in _source_urls(album,
config['fetchart']['sources'].as_str_seq()):
if maxwidth:
url = ArtResizer.shared.proxy_url(maxwidth, url)
candidate = _fetch_image(url)
if candidate:
out = candidate
break
if maxwidth and out:
out = ArtResizer.shared.resize(maxwidth, out)
return out
# PLUGIN LOGIC ###############################################################
def batch_fetch_art(lib, albums, force, maxwidth=None):
"""Fetch album art for each of the albums. This implements the manual
fetchart CLI command.
"""
for album in albums:
if album.artpath and not force:
message = 'has album art'
else:
# In ordinary invocations, look for images on the
# filesystem. When forcing, however, always go to the Web
# sources.
local_paths = None if force else [album.path]
path = art_for_album(album, local_paths, maxwidth)
if path:
album.set_art(path, False)
album.store()
message = ui.colorize('green', 'found album art')
else:
message = ui.colorize('red', 'no art found')
log.info(u'{0} - {1}: {2}'.format(album.albumartist, album.album,
message))
class FetchArtPlugin(plugins.BeetsPlugin):
def __init__(self):
super(FetchArtPlugin, self).__init__()
self.config.add({
'auto': True,
'maxwidth': 0,
'remote_priority': False,
'cautious': False,
'google_search': False,
'cover_names': ['cover', 'front', 'art', 'album', 'folder'],
'sources': SOURCES_ALL,
})
# Holds paths to downloaded images between fetching them and
# placing them in the filesystem.
self.art_paths = {}
self.maxwidth = self.config['maxwidth'].get(int)
if self.config['auto']:
# Enable two import hooks when fetching is enabled.
self.import_stages = [self.fetch_art]
self.register_listener('import_task_files', self.assign_art)
available_sources = list(SOURCES_ALL)
if not HAVE_ITUNES and u'itunes' in available_sources:
available_sources.remove(u'itunes')
self.config['sources'] = plugins.sanitize_choices(
self.config['sources'].as_str_seq(), available_sources)
# Asynchronous; after music is added to the library.
def fetch_art(self, session, task):
"""Find art for the album being imported."""
if task.is_album: # Only fetch art for full albums.
if task.choice_flag == importer.action.ASIS:
# For as-is imports, don't search Web sources for art.
local = True
elif task.choice_flag == importer.action.APPLY:
# Search everywhere for art.
local = False
else:
# For any other choices (e.g., TRACKS), do nothing.
return
path = art_for_album(task.album, task.paths, self.maxwidth, local)
if path:
self.art_paths[task] = path
# Synchronous; after music files are put in place.
def assign_art(self, session, task):
"""Place the discovered art in the filesystem."""
if task in self.art_paths:
path = self.art_paths.pop(task)
album = task.album
src_removed = (config['import']['delete'].get(bool) or
config['import']['move'].get(bool))
album.set_art(path, not src_removed)
album.store()
if src_removed:
task.prune(path)
# Manual album art fetching.
def commands(self):
cmd = ui.Subcommand('fetchart', help='download album art')
cmd.parser.add_option('-f', '--force', dest='force',
action='store_true', default=False,
help='re-download art when already present')
def func(lib, opts, args):
batch_fetch_art(lib, lib.albums(ui.decargs(args)), opts.force,
self.maxwidth)
cmd.func = func
return [cmd]
|
gpl-3.0
|
YtvwlD/od-oe
|
contrib/python/generate-manifest-2.4.py
|
45
|
13550
|
#!/usr/bin/env python
# generate Python Manifest for the OpenEmbedded build system
# (C) 2002-2007 Michael 'Mickey' Lauer <[email protected]>
# MIT license
import os
import sys
import time
VERSION = "2.4.4"
# increase when touching python-core
BASEREV = 2
__author__ = "Michael 'Mickey' Lauer <[email protected]>"
__version__ = "20070721"
class MakefileMaker:
def __init__( self, outfile ):
"""initialize"""
self.packages = {}
self.sourcePrefix = "/lib/python%s/" % VERSION[:3]
self.targetPrefix = "${libdir}/python%s" % VERSION[:3]
self.output = outfile
self.out( "#" * 120 )
self.out( "### AUTO-GENERATED by '%s' [(C) 2002-2007 Michael 'Mickey' Lauer <[email protected]>] on %s" % ( sys.argv[0], time.asctime() ) )
self.out( "###" )
self.out( "### Visit THE Python for Embedded Systems Site => http://www.Vanille.de/projects/python.spy" )
self.out( "###" )
self.out( "### Warning: Manual edits will be lost!" )
self.out( "###" )
self.out( "#" * 120 )
#
# helper functions
#
def out( self, data ):
"""print a line to the output file"""
print >> self.output, data
def setPrefix( self, sourcePrefix, targetPrefix ):
"""set a file prefix for addPackage files"""
self.sourcePrefix = sourcePrefix
self.targetPrefix = targetPrefix
def doProlog( self ):
self.out( """ """ )
self.out( "" )
def addPackage( self, revision, name, description, dependencies, filenames ):
"""add a package to the Makefile"""
if type( filenames ) == type( "" ):
filenames = filenames.split()
fullFilenames = []
for filename in filenames:
if filename[0] != "/":
fullFilenames.append( ( "%s%s" % ( self.sourcePrefix, filename ), "%s%s" % ( self.targetPrefix, filename ) ) )
else:
fullFilenames.append( ( filename, filename ) )
self.packages[name] = revision, description, dependencies, fullFilenames
def doBody( self ):
"""generate body of Makefile"""
global VERSION
#
# generate provides line
#
provideLine = 'PROVIDES+="'
for name in self.packages:
provideLine += "%s " % name
provideLine += '"'
self.out( provideLine )
self.out( "" )
#
# generate package line
#
packageLine = 'PACKAGES="'
for name in self.packages:
packageLine += "%s " % name
packageLine += '"'
self.out( packageLine )
self.out( "" )
#
# generate package variables
#
for name, data in self.packages.iteritems():
rev, desc, deps, files = data
#
# write out the description, revision and dependencies
#
self.out( 'DESCRIPTION_%s="%s"' % ( name, desc ) )
self.out( 'PR_%s="ml%d"' % ( name, rev + BASEREV ) )
self.out( 'RDEPENDS_%s="%s"' % ( name, deps.replace( ",", "" ) ) )
line = 'FILES_%s="' % name
#
# check which directories to make in the temporary directory
#
dirset = {} # if python had a set-datatype this would be sufficient. for now, we're using a dict instead.
for source, target in files:
dirset[os.path.dirname( target )] = True
#
# generate which files to copy for the target (-dfR because whole directories are also allowed)
#
for source, target in files:
line += "%s " % target
line += '"'
self.out( line )
self.out( "" )
def doEpilog( self ):
self.out( """""" )
self.out( "" )
def make( self ):
self.doProlog()
self.doBody()
self.doEpilog()
if __name__ == "__main__":
if len( sys.argv ) > 1:
os.popen( "rm -f ./%s" % sys.argv[1] )
outfile = file( sys.argv[1], "w" )
else:
outfile = sys.stdout
m = MakefileMaker( outfile )
# Add packages here. Only specify dlopen-style library dependencies here, no ldd-style dependencies!
# Parameters: revision, name, description, dependencies, filenames
#
m.setPrefix( "/", "/usr/" )
m.addPackage( 2, "python-core", "Python Interpreter and core modules (needed!)", "",
"lib/python2.4/__future__.* lib/python2.4/copy.* lib/python2.4/copy_reg.* lib/python2.4/ConfigParser.* " +
"lib/python2.4/getopt.* lib/python2.4/linecache.* lib/python2.4/new.* " +
"lib/python2.4/os.* lib/python2.4/posixpath.* " +
"lib/python2.4/warnings.* lib/python2.4/site.* lib/python2.4/stat.* " +
"lib/python2.4/UserDict.* lib/python2.4/UserList.* lib/python2.4/UserString.* " +
"lib/python2.4/lib-dynload/binascii.so lib/python2.4/lib-dynload/struct.so lib/python2.4/lib-dynload/time.so " +
"lib/python2.4/lib-dynload/xreadlines.so lib/python2.4/types.* bin/python*" )
m.addPackage( 0, "python-core-dbg", "Python core module debug information", "python-core",
"lib/python2.4/lib-dynload/.debug bin/.debug lib/.debug" )
m.addPackage( 0, "python-devel", "Python Development Package", "python-core",
"include lib/python2.4/config" ) # package
m.addPackage( 0, "python-idle", "Python Integrated Development Environment", "python-core, python-tkinter",
"bin/idle lib/python2.4/idlelib" ) # package
m.addPackage( 0, "python-pydoc", "Python Interactive Help Support", "python-core, python-lang, python-stringold, python-re",
"bin/pydoc lib/python2.4/pydoc.*" )
m.addPackage( 0, "python-smtpd", "Python Simple Mail Transport Daemon", "python-core python-netserver python-email python-mime",
"bin/smtpd.*" )
m.setPrefix( "/lib/python2.4/", "${libdir}/python2.4/" )
m.addPackage( 0, "python-audio", "Python Audio Handling", "python-core",
"wave.* chunk.* sndhdr.* lib-dynload/ossaudiodev.so lib-dynload/audioop.so" )
m.addPackage( 0, "python-bsddb", "Python Berkeley Database Bindings", "python-core",
"bsddb" ) # package
m.addPackage( 0, "python-codecs", "Python Codecs, Encodings & i18n Support", "python-core",
"codecs.* encodings gettext.* locale.* lib-dynload/_locale.so lib-dynload/unicodedata.so stringprep.* xdrlib.*" )
m.addPackage( 0, "python-compile", "Python Bytecode Compilation Support", "python-core",
"py_compile.* compileall.*" )
m.addPackage( 0, "python-compiler", "Python Compiler Support", "python-core",
"compiler" ) # package
m.addPackage( 0, "python-compression", "Python High Level Compression Support", "python-core, python-zlib",
"gzip.* zipfile.*" )
m.addPackage( 0, "python-crypt", "Python Basic Cryptographic and Hashing Support", "python-core",
"lib-dynload/crypt.so lib-dynload/md5.so lib-dynload/rotor.so lib-dynload/sha.so" )
m.addPackage( 0, "python-textutils", "Python Option Parsing, Text Wrapping and Comma-Separated-Value Support", "python-core, python-io, python-re, python-stringold",
"lib-dynload/_csv.so csv.* optparse.* textwrap.*" )
m.addPackage( 0, "python-curses", "Python Curses Support", "python-core",
"curses lib-dynload/_curses.so lib-dynload/_curses_panel.so" ) # package
m.addPackage( 0, "python-datetime", "Python Calendar and Time support", "python-core, python-codecs",
"_strptime.* calendar.* lib-dynload/datetime.so" )
m.addPackage( 0, "python-db", "Python File-Based Database Support", "python-core",
"anydbm.* dumbdbm.* whichdb.* " )
m.addPackage( 0, "python-debugger", "Python Debugger", "python-core, python-io, python-lang, python-re, python-stringold, python-shell",
"bdb.* pdb.*" )
m.addPackage( 0, "python-distutils", "Python Distribution Utilities", "python-core",
"config distutils" ) # package
m.addPackage( 0, "python-email", "Python Email Support", "python-core, python-io, python-re, python-mime, python-audio python-image",
"email" ) # package
m.addPackage( 0, "python-fcntl", "Python's fcntl Interface", "python-core",
"lib-dynload/fcntl.so" )
m.addPackage( 0, "python-hotshot", "Python Hotshot Profiler", "python-core",
"hotshot lib-dynload/_hotshot.so" )
m.addPackage( 0, "python-html", "Python HTML Processing", "python-core",
"formatter.* htmlentitydefs.* htmllib.* markupbase.* sgmllib.* " )
m.addPackage( 0, "python-gdbm", "Python GNU Database Support", "python-core",
"lib-dynload/gdbm.so" )
m.addPackage( 0, "python-image", "Python Graphical Image Handling", "python-core",
"colorsys.* imghdr.* lib-dynload/imageop.so lib-dynload/rgbimg.so" )
m.addPackage( 0, "python-io", "Python Low-Level I/O", "python-core, python-math",
"lib-dynload/_socket.so lib-dynload/_ssl.so lib-dynload/select.so lib-dynload/termios.so lib-dynload/cStringIO.so "
"pipes.* socket.* tempfile.* StringIO.* " )
m.addPackage( 0, "python-lang", "Python Low-Level Language Support", "python-core",
"lib-dynload/array.so lib-dynload/parser.so lib-dynload/operator.so lib-dynload/_weakref.so " +
"lib-dynload/itertools.so lib-dynload/collections.so lib-dynload/_bisect.so lib-dynload/_heapq.so " +
"atexit.* bisect.* code.* codeop.* dis.* heapq.* inspect.* keyword.* opcode.* repr.* token.* tokenize.* " +
"traceback.* linecache.* weakref.*" )
m.addPackage( 0, "python-logging", "Python Logging Support", "python-core",
"logging" ) # package
m.addPackage( 0, "python-lib-old-and-deprecated", "Python Deprecated Libraries", "python-core",
"lib-old" ) # package
m.addPackage( 0, "python-tkinter", "Python Tcl/Tk Bindings", "python-core",
"lib-dynload/_tkinter.so lib-tk" ) # package
m.addPackage( 0, "python-math", "Python Math Support", "python-core",
"lib-dynload/cmath.so lib-dynload/math.so lib-dynload/_random.so random.* sets.*" )
m.addPackage( 0, "python-mime", "Python MIME Handling APIs", "python-core, python-io",
"mimetools.* uu.* quopri.* rfc822.*" )
m.addPackage( 0, "python-mmap", "Python Memory-Mapped-File Support", "python-core, python-io",
"lib-dynload/mmap.so " )
m.addPackage( 0, "python-unixadmin", "Python Unix Administration Support", "python-core",
"lib-dynload/nis.so lib-dynload/grp.so lib-dynload/pwd.so getpass.*" )
m.addPackage( 0, "python-netclient", "Python Internet Protocol Clients", "python-core, python-datetime, python-io, python-lang, python-logging, python-mime",
"*Cookie*.* " +
"base64.* cookielib.* ftplib.* gopherlib.* hmac.* httplib.* mimetypes.* nntplib.* poplib.* smtplib.* telnetlib.* urllib.* urllib2.* urlparse.*" )
m.addPackage( 0, "python-netserver", "Python Internet Protocol Servers", "python-core, python-netclient",
"cgi.* BaseHTTPServer.* SimpleHTTPServer.* SocketServer.*" )
m.addPackage( 0, "python-pickle", "Python Persistence Support", "python-core, python-codecs, python-io, python-re",
"pickle.* shelve.* lib-dynload/cPickle.so" )
m.addPackage( 0, "python-pprint", "Python Pretty-Print Support", "python-core",
"pprint.*" )
m.addPackage( 0, "python-profile", "Python Basic Profiling Support", "python-core",
"profile.* pstats.*" )
m.addPackage( 0, "python-re", "Python Regular Expression APIs", "python-core",
"re.* sre.* sre_compile.* sre_constants* sre_parse.*" ) # _sre is builtin
m.addPackage( 0, "python-readline", "Python Readline Support", "python-core",
"lib-dynload/readline.so rlcompleter.*" )
m.addPackage( 0, "python-resource", "Python Resource Control Interface", "python-core",
"lib-dynload/resource.so" )
m.addPackage( 0, "python-shell", "Python Shell-Like Functionality", "python-core, python-re",
"cmd.* commands.* dircache.* fnmatch.* glob.* popen2.* shutil.*" )
m.addPackage( 0, "python-robotparser", "Python robots.txt parser", "python-core, python-netclient",
"robotparser.*")
m.addPackage( 0, "python-subprocess", "Python Subprocess Support", "python-core, python-io, python-re, python-fcntl, python-pickle",
"subprocess.*" )
m.addPackage( 0, "python-stringold", "Python String APIs [deprecated]", "python-core, python-re",
"lib-dynload/strop.so string.*" )
m.addPackage( 0, "python-syslog", "Python's syslog Interface", "python-core",
"lib-dynload/syslog.so" )
m.addPackage( 0, "python-terminal", "Python Terminal Controlling Support", "python-core, python-io",
"pty.* tty.*" )
m.addPackage( 0, "python-tests", "Python Tests", "python-core",
"test" ) # package
m.addPackage( 0, "python-threading", "Python Threading & Synchronization Support", "python-core, python-lang",
"_threading_local.* dummy_thread.* dummy_threading.* mutex.* threading.* Queue.*" )
m.addPackage( 0, "python-unittest", "Python Unit Testing Framework", "python-core, python-stringold, python-lang",
"unittest.*" )
m.addPackage( 0, "python-xml", "Python basic XML support.", "python-core, python-re",
"lib-dynload/pyexpat.so xml xmllib.*" ) # package
m.addPackage( 0, "python-xmlrpc", "Python XMLRPC Support", "python-core, python-xml, python-netserver, python-lang",
"xmlrpclib.* SimpleXMLRPCServer.*" )
m.addPackage( 0, "python-zlib", "Python zlib Support.", "python-core",
"lib-dynload/zlib.so" )
m.addPackage( 0, "python-mailbox", "Python Mailbox Format Support", "python-core, python-mime",
"mailbox.*" )
m.make()
|
mit
|
alfa-jor/addon
|
plugin.video.alfa/servers/videobb.py
|
1
|
1037
|
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector videobb By Alfa development Group
# --------------------------------------------------------
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "no longer exists" in data:
return False, "[videobb] El video ha sido borrado"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
id = scrapertools.find_single_match("v/(\w+)", page_url)
post = "r=&d=videobb.ru"
headers = {"x-requested-with":"XMLHttpRequest"}
data = httptools.downloadpage(page_url, post=post, headers=headers).json
for url in data["data"]:
video_urls.append([url["label"] + " [videobb]", url["file"]])
#logger.info("Intel11 %s" %data)
return video_urls
|
gpl-3.0
|
ribag/ganeti-experiments
|
test/py/ganeti.daemon_unittest.py
|
5
|
24843
|
#!/usr/bin/python
#
# Copyright (C) 2010 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Script for unittesting the daemon module"""
import unittest
import signal
import os
import socket
import time
import tempfile
import shutil
from ganeti import daemon
from ganeti import errors
from ganeti import constants
from ganeti import utils
import testutils
class TestMainloop(testutils.GanetiTestCase):
"""Test daemon.Mainloop"""
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.mainloop = daemon.Mainloop()
self.sendsig_events = []
self.onsignal_events = []
def _CancelEvent(self, handle):
self.mainloop.scheduler.cancel(handle)
def _SendSig(self, sig):
self.sendsig_events.append(sig)
os.kill(os.getpid(), sig)
def OnSignal(self, signum):
self.onsignal_events.append(signum)
def testRunAndTermBySched(self):
self.mainloop.scheduler.enter(0.1, 1, self._SendSig, [signal.SIGTERM])
self.mainloop.Run() # terminates by _SendSig being scheduled
self.assertEquals(self.sendsig_events, [signal.SIGTERM])
def testTerminatingSignals(self):
self.mainloop.scheduler.enter(0.1, 1, self._SendSig, [signal.SIGCHLD])
self.mainloop.scheduler.enter(0.2, 1, self._SendSig, [signal.SIGINT])
self.mainloop.Run()
self.assertEquals(self.sendsig_events, [signal.SIGCHLD, signal.SIGINT])
self.mainloop.scheduler.enter(0.1, 1, self._SendSig, [signal.SIGTERM])
self.mainloop.Run()
self.assertEquals(self.sendsig_events, [signal.SIGCHLD, signal.SIGINT,
signal.SIGTERM])
def testSchedulerCancel(self):
handle = self.mainloop.scheduler.enter(0.1, 1, self._SendSig,
[signal.SIGTERM])
self.mainloop.scheduler.cancel(handle)
self.mainloop.scheduler.enter(0.2, 1, self._SendSig, [signal.SIGCHLD])
self.mainloop.scheduler.enter(0.3, 1, self._SendSig, [signal.SIGTERM])
self.mainloop.Run()
self.assertEquals(self.sendsig_events, [signal.SIGCHLD, signal.SIGTERM])
def testRegisterSignal(self):
self.mainloop.RegisterSignal(self)
self.mainloop.scheduler.enter(0.1, 1, self._SendSig, [signal.SIGCHLD])
handle = self.mainloop.scheduler.enter(0.1, 1, self._SendSig,
[signal.SIGTERM])
self.mainloop.scheduler.cancel(handle)
self.mainloop.scheduler.enter(0.2, 1, self._SendSig, [signal.SIGCHLD])
self.mainloop.scheduler.enter(0.3, 1, self._SendSig, [signal.SIGTERM])
# ...not delievered because they are scheduled after TERM
self.mainloop.scheduler.enter(0.4, 1, self._SendSig, [signal.SIGCHLD])
self.mainloop.scheduler.enter(0.5, 1, self._SendSig, [signal.SIGCHLD])
self.mainloop.Run()
self.assertEquals(self.sendsig_events,
[signal.SIGCHLD, signal.SIGCHLD, signal.SIGTERM])
self.assertEquals(self.onsignal_events, self.sendsig_events)
def testDeferredCancel(self):
self.mainloop.RegisterSignal(self)
now = time.time()
self.mainloop.scheduler.enterabs(now + 0.1, 1, self._SendSig,
[signal.SIGCHLD])
handle1 = self.mainloop.scheduler.enterabs(now + 0.3, 2, self._SendSig,
[signal.SIGCHLD])
handle2 = self.mainloop.scheduler.enterabs(now + 0.4, 2, self._SendSig,
[signal.SIGCHLD])
self.mainloop.scheduler.enterabs(now + 0.2, 1, self._CancelEvent,
[handle1])
self.mainloop.scheduler.enterabs(now + 0.2, 1, self._CancelEvent,
[handle2])
self.mainloop.scheduler.enter(0.5, 1, self._SendSig, [signal.SIGTERM])
self.mainloop.Run()
self.assertEquals(self.sendsig_events, [signal.SIGCHLD, signal.SIGTERM])
self.assertEquals(self.onsignal_events, self.sendsig_events)
def testReRun(self):
self.mainloop.RegisterSignal(self)
self.mainloop.scheduler.enter(0.1, 1, self._SendSig, [signal.SIGCHLD])
self.mainloop.scheduler.enter(0.2, 1, self._SendSig, [signal.SIGCHLD])
self.mainloop.scheduler.enter(0.3, 1, self._SendSig, [signal.SIGTERM])
self.mainloop.scheduler.enter(0.4, 1, self._SendSig, [signal.SIGCHLD])
self.mainloop.scheduler.enter(0.5, 1, self._SendSig, [signal.SIGCHLD])
self.mainloop.Run()
self.assertEquals(self.sendsig_events,
[signal.SIGCHLD, signal.SIGCHLD, signal.SIGTERM])
self.assertEquals(self.onsignal_events, self.sendsig_events)
self.mainloop.scheduler.enter(0.3, 1, self._SendSig, [signal.SIGTERM])
self.mainloop.Run()
self.assertEquals(self.sendsig_events,
[signal.SIGCHLD, signal.SIGCHLD, signal.SIGTERM,
signal.SIGCHLD, signal.SIGCHLD, signal.SIGTERM])
self.assertEquals(self.onsignal_events, self.sendsig_events)
def testPriority(self):
# for events at the same time, the highest priority one executes first
now = time.time()
self.mainloop.scheduler.enterabs(now + 0.1, 2, self._SendSig,
[signal.SIGCHLD])
self.mainloop.scheduler.enterabs(now + 0.1, 1, self._SendSig,
[signal.SIGTERM])
self.mainloop.Run()
self.assertEquals(self.sendsig_events, [signal.SIGTERM])
self.mainloop.scheduler.enter(0.2, 1, self._SendSig, [signal.SIGTERM])
self.mainloop.Run()
self.assertEquals(self.sendsig_events,
[signal.SIGTERM, signal.SIGCHLD, signal.SIGTERM])
class _MyAsyncUDPSocket(daemon.AsyncUDPSocket):
def __init__(self, family):
daemon.AsyncUDPSocket.__init__(self, family)
self.received = []
self.error_count = 0
def handle_datagram(self, payload, ip, port):
self.received.append((payload))
if payload == "terminate":
os.kill(os.getpid(), signal.SIGTERM)
elif payload == "error":
raise errors.GenericError("error")
def handle_error(self):
self.error_count += 1
raise
class _BaseAsyncUDPSocketTest:
"""Base class for AsyncUDPSocket tests"""
family = None
address = None
def setUp(self):
self.mainloop = daemon.Mainloop()
self.server = _MyAsyncUDPSocket(self.family)
self.client = _MyAsyncUDPSocket(self.family)
self.server.bind((self.address, 0))
self.port = self.server.getsockname()[1]
# Save utils.IgnoreSignals so we can do evil things to it...
self.saved_utils_ignoresignals = utils.IgnoreSignals
def tearDown(self):
self.server.close()
self.client.close()
# ...and restore it as well
utils.IgnoreSignals = self.saved_utils_ignoresignals
testutils.GanetiTestCase.tearDown(self)
def testNoDoubleBind(self):
self.assertRaises(socket.error, self.client.bind, (self.address, self.port))
def testAsyncClientServer(self):
self.client.enqueue_send(self.address, self.port, "p1")
self.client.enqueue_send(self.address, self.port, "p2")
self.client.enqueue_send(self.address, self.port, "terminate")
self.mainloop.Run()
self.assertEquals(self.server.received, ["p1", "p2", "terminate"])
def testSyncClientServer(self):
self.client.handle_write()
self.client.enqueue_send(self.address, self.port, "p1")
self.client.enqueue_send(self.address, self.port, "p2")
while self.client.writable():
self.client.handle_write()
self.server.process_next_packet()
self.assertEquals(self.server.received, ["p1"])
self.server.process_next_packet()
self.assertEquals(self.server.received, ["p1", "p2"])
self.client.enqueue_send(self.address, self.port, "p3")
while self.client.writable():
self.client.handle_write()
self.server.process_next_packet()
self.assertEquals(self.server.received, ["p1", "p2", "p3"])
def testErrorHandling(self):
self.client.enqueue_send(self.address, self.port, "p1")
self.client.enqueue_send(self.address, self.port, "p2")
self.client.enqueue_send(self.address, self.port, "error")
self.client.enqueue_send(self.address, self.port, "p3")
self.client.enqueue_send(self.address, self.port, "error")
self.client.enqueue_send(self.address, self.port, "terminate")
self.assertRaises(errors.GenericError, self.mainloop.Run)
self.assertEquals(self.server.received,
["p1", "p2", "error"])
self.assertEquals(self.server.error_count, 1)
self.assertRaises(errors.GenericError, self.mainloop.Run)
self.assertEquals(self.server.received,
["p1", "p2", "error", "p3", "error"])
self.assertEquals(self.server.error_count, 2)
self.mainloop.Run()
self.assertEquals(self.server.received,
["p1", "p2", "error", "p3", "error", "terminate"])
self.assertEquals(self.server.error_count, 2)
def testSignaledWhileReceiving(self):
utils.IgnoreSignals = lambda fn, *args, **kwargs: None
self.client.enqueue_send(self.address, self.port, "p1")
self.client.enqueue_send(self.address, self.port, "p2")
self.server.handle_read()
self.assertEquals(self.server.received, [])
self.client.enqueue_send(self.address, self.port, "terminate")
utils.IgnoreSignals = self.saved_utils_ignoresignals
self.mainloop.Run()
self.assertEquals(self.server.received, ["p1", "p2", "terminate"])
def testOversizedDatagram(self):
oversized_data = (constants.MAX_UDP_DATA_SIZE + 1) * "a"
self.assertRaises(errors.UdpDataSizeError, self.client.enqueue_send,
self.address, self.port, oversized_data)
class TestAsyncIP4UDPSocket(testutils.GanetiTestCase, _BaseAsyncUDPSocketTest):
"""Test IP4 daemon.AsyncUDPSocket"""
family = socket.AF_INET
address = "127.0.0.1"
def setUp(self):
testutils.GanetiTestCase.setUp(self)
_BaseAsyncUDPSocketTest.setUp(self)
def tearDown(self):
testutils.GanetiTestCase.tearDown(self)
_BaseAsyncUDPSocketTest.tearDown(self)
class TestAsyncIP6UDPSocket(testutils.GanetiTestCase, _BaseAsyncUDPSocketTest):
"""Test IP6 daemon.AsyncUDPSocket"""
family = socket.AF_INET6
address = "::1"
def setUp(self):
testutils.GanetiTestCase.setUp(self)
_BaseAsyncUDPSocketTest.setUp(self)
def tearDown(self):
testutils.GanetiTestCase.tearDown(self)
_BaseAsyncUDPSocketTest.tearDown(self)
class _MyAsyncStreamServer(daemon.AsyncStreamServer):
def __init__(self, family, address, handle_connection_fn):
daemon.AsyncStreamServer.__init__(self, family, address)
self.handle_connection_fn = handle_connection_fn
self.error_count = 0
self.expt_count = 0
def handle_connection(self, connected_socket, client_address):
self.handle_connection_fn(connected_socket, client_address)
def handle_error(self):
self.error_count += 1
self.close()
raise
def handle_expt(self):
self.expt_count += 1
self.close()
class _MyMessageStreamHandler(daemon.AsyncTerminatedMessageStream):
def __init__(self, connected_socket, client_address, terminator, family,
message_fn, client_id, unhandled_limit):
daemon.AsyncTerminatedMessageStream.__init__(self, connected_socket,
client_address,
terminator, family,
unhandled_limit)
self.message_fn = message_fn
self.client_id = client_id
self.error_count = 0
def handle_message(self, message, message_id):
self.message_fn(self, message, message_id)
def handle_error(self):
self.error_count += 1
raise
class TestAsyncStreamServerTCP(testutils.GanetiTestCase):
"""Test daemon.AsyncStreamServer with a TCP connection"""
family = socket.AF_INET
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.mainloop = daemon.Mainloop()
self.address = self.getAddress()
self.server = _MyAsyncStreamServer(self.family, self.address,
self.handle_connection)
self.client_handler = _MyMessageStreamHandler
self.unhandled_limit = None
self.terminator = "\3"
self.address = self.server.getsockname()
self.clients = []
self.connections = []
self.messages = {}
self.connect_terminate_count = 0
self.message_terminate_count = 0
self.next_client_id = 0
# Save utils.IgnoreSignals so we can do evil things to it...
self.saved_utils_ignoresignals = utils.IgnoreSignals
def tearDown(self):
for c in self.clients:
c.close()
for c in self.connections:
c.close()
self.server.close()
# ...and restore it as well
utils.IgnoreSignals = self.saved_utils_ignoresignals
testutils.GanetiTestCase.tearDown(self)
def getAddress(self):
return ("127.0.0.1", 0)
def countTerminate(self, name):
value = getattr(self, name)
if value is not None:
value -= 1
setattr(self, name, value)
if value <= 0:
os.kill(os.getpid(), signal.SIGTERM)
def handle_connection(self, connected_socket, client_address):
client_id = self.next_client_id
self.next_client_id += 1
client_handler = self.client_handler(connected_socket, client_address,
self.terminator, self.family,
self.handle_message,
client_id, self.unhandled_limit)
self.connections.append(client_handler)
self.countTerminate("connect_terminate_count")
def handle_message(self, handler, message, message_id):
self.messages.setdefault(handler.client_id, [])
# We should just check that the message_ids are monotonically increasing.
# If in the unit tests we never remove messages from the received queue,
# though, we can just require that the queue length is the same as the
# message id, before pushing the message to it. This forces a more
# restrictive check, but we can live with this for now.
self.assertEquals(len(self.messages[handler.client_id]), message_id)
self.messages[handler.client_id].append(message)
if message == "error":
raise errors.GenericError("error")
self.countTerminate("message_terminate_count")
def getClient(self):
client = socket.socket(self.family, socket.SOCK_STREAM)
client.connect(self.address)
self.clients.append(client)
return client
def tearDown(self):
testutils.GanetiTestCase.tearDown(self)
self.server.close()
def testConnect(self):
self.getClient()
self.mainloop.Run()
self.assertEquals(len(self.connections), 1)
self.getClient()
self.mainloop.Run()
self.assertEquals(len(self.connections), 2)
self.connect_terminate_count = 4
self.getClient()
self.getClient()
self.getClient()
self.getClient()
self.mainloop.Run()
self.assertEquals(len(self.connections), 6)
def testBasicMessage(self):
self.connect_terminate_count = None
client = self.getClient()
client.send("ciao\3")
self.mainloop.Run()
self.assertEquals(len(self.connections), 1)
self.assertEquals(len(self.messages[0]), 1)
self.assertEquals(self.messages[0][0], "ciao")
def testDoubleMessage(self):
self.connect_terminate_count = None
client = self.getClient()
client.send("ciao\3")
self.mainloop.Run()
client.send("foobar\3")
self.mainloop.Run()
self.assertEquals(len(self.connections), 1)
self.assertEquals(len(self.messages[0]), 2)
self.assertEquals(self.messages[0][1], "foobar")
def testComposedMessage(self):
self.connect_terminate_count = None
self.message_terminate_count = 3
client = self.getClient()
client.send("one\3composed\3message\3")
self.mainloop.Run()
self.assertEquals(len(self.messages[0]), 3)
self.assertEquals(self.messages[0], ["one", "composed", "message"])
def testLongTerminator(self):
self.terminator = "\0\1\2"
self.connect_terminate_count = None
self.message_terminate_count = 3
client = self.getClient()
client.send("one\0\1\2composed\0\1\2message\0\1\2")
self.mainloop.Run()
self.assertEquals(len(self.messages[0]), 3)
self.assertEquals(self.messages[0], ["one", "composed", "message"])
def testErrorHandling(self):
self.connect_terminate_count = None
self.message_terminate_count = None
client = self.getClient()
client.send("one\3two\3error\3three\3")
self.assertRaises(errors.GenericError, self.mainloop.Run)
self.assertEquals(self.connections[0].error_count, 1)
self.assertEquals(self.messages[0], ["one", "two", "error"])
client.send("error\3")
self.assertRaises(errors.GenericError, self.mainloop.Run)
self.assertEquals(self.connections[0].error_count, 2)
self.assertEquals(self.messages[0], ["one", "two", "error", "three",
"error"])
def testDoubleClient(self):
self.connect_terminate_count = None
self.message_terminate_count = 2
client1 = self.getClient()
client2 = self.getClient()
client1.send("c1m1\3")
client2.send("c2m1\3")
self.mainloop.Run()
self.assertEquals(self.messages[0], ["c1m1"])
self.assertEquals(self.messages[1], ["c2m1"])
def testUnterminatedMessage(self):
self.connect_terminate_count = None
self.message_terminate_count = 3
client1 = self.getClient()
client2 = self.getClient()
client1.send("message\3unterminated")
client2.send("c2m1\3c2m2\3")
self.mainloop.Run()
self.assertEquals(self.messages[0], ["message"])
self.assertEquals(self.messages[1], ["c2m1", "c2m2"])
client1.send("message\3")
self.mainloop.Run()
self.assertEquals(self.messages[0], ["message", "unterminatedmessage"])
def testSignaledWhileAccepting(self):
utils.IgnoreSignals = lambda fn, *args, **kwargs: None
client1 = self.getClient()
self.server.handle_accept()
# When interrupted while accepting we don't have a connection, but we
# didn't crash either.
self.assertEquals(len(self.connections), 0)
utils.IgnoreSignals = self.saved_utils_ignoresignals
self.mainloop.Run()
self.assertEquals(len(self.connections), 1)
def testSendMessage(self):
self.connect_terminate_count = None
self.message_terminate_count = 3
client1 = self.getClient()
client2 = self.getClient()
client1.send("one\3composed\3message\3")
self.mainloop.Run()
self.assertEquals(self.messages[0], ["one", "composed", "message"])
self.assertFalse(self.connections[0].writable())
self.assertFalse(self.connections[1].writable())
self.connections[0].send_message("r0")
self.assert_(self.connections[0].writable())
self.assertFalse(self.connections[1].writable())
self.connections[0].send_message("r1")
self.connections[0].send_message("r2")
# We currently have no way to terminate the mainloop on write events, but
# let's assume handle_write will be called if writable() is True.
while self.connections[0].writable():
self.connections[0].handle_write()
client1.setblocking(0)
client2.setblocking(0)
self.assertEquals(client1.recv(4096), "r0\3r1\3r2\3")
self.assertRaises(socket.error, client2.recv, 4096)
def testLimitedUnhandledMessages(self):
self.connect_terminate_count = None
self.message_terminate_count = 3
self.unhandled_limit = 2
client1 = self.getClient()
client2 = self.getClient()
client1.send("one\3composed\3long\3message\3")
client2.send("c2one\3")
self.mainloop.Run()
self.assertEquals(self.messages[0], ["one", "composed"])
self.assertEquals(self.messages[1], ["c2one"])
self.assertFalse(self.connections[0].readable())
self.assert_(self.connections[1].readable())
self.connections[0].send_message("r0")
self.message_terminate_count = None
client1.send("another\3")
# when we write replies messages queued also get handled, but not the ones
# in the socket.
while self.connections[0].writable():
self.connections[0].handle_write()
self.assertFalse(self.connections[0].readable())
self.assertEquals(self.messages[0], ["one", "composed", "long"])
self.connections[0].send_message("r1")
self.connections[0].send_message("r2")
while self.connections[0].writable():
self.connections[0].handle_write()
self.assertEquals(self.messages[0], ["one", "composed", "long", "message"])
self.assert_(self.connections[0].readable())
def testLimitedUnhandledMessagesOne(self):
self.connect_terminate_count = None
self.message_terminate_count = 2
self.unhandled_limit = 1
client1 = self.getClient()
client2 = self.getClient()
client1.send("one\3composed\3message\3")
client2.send("c2one\3")
self.mainloop.Run()
self.assertEquals(self.messages[0], ["one"])
self.assertEquals(self.messages[1], ["c2one"])
self.assertFalse(self.connections[0].readable())
self.assertFalse(self.connections[1].readable())
self.connections[0].send_message("r0")
self.message_terminate_count = None
while self.connections[0].writable():
self.connections[0].handle_write()
self.assertFalse(self.connections[0].readable())
self.assertEquals(self.messages[0], ["one", "composed"])
self.connections[0].send_message("r2")
self.connections[0].send_message("r3")
while self.connections[0].writable():
self.connections[0].handle_write()
self.assertEquals(self.messages[0], ["one", "composed", "message"])
self.assert_(self.connections[0].readable())
class TestAsyncStreamServerUnixPath(TestAsyncStreamServerTCP):
"""Test daemon.AsyncStreamServer with a Unix path connection"""
family = socket.AF_UNIX
def getAddress(self):
self.tmpdir = tempfile.mkdtemp()
return os.path.join(self.tmpdir, "server.sock")
def tearDown(self):
shutil.rmtree(self.tmpdir)
TestAsyncStreamServerTCP.tearDown(self)
class TestAsyncStreamServerUnixAbstract(TestAsyncStreamServerTCP):
"""Test daemon.AsyncStreamServer with a Unix abstract connection"""
family = socket.AF_UNIX
def getAddress(self):
return "\0myabstractsocketaddress"
class TestAsyncAwaker(testutils.GanetiTestCase):
"""Test daemon.AsyncAwaker"""
family = socket.AF_INET
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.mainloop = daemon.Mainloop()
self.awaker = daemon.AsyncAwaker(signal_fn=self.handle_signal)
self.signal_count = 0
self.signal_terminate_count = 1
def tearDown(self):
self.awaker.close()
def handle_signal(self):
self.signal_count += 1
self.signal_terminate_count -= 1
if self.signal_terminate_count <= 0:
os.kill(os.getpid(), signal.SIGTERM)
def testBasicSignaling(self):
self.awaker.signal()
self.mainloop.Run()
self.assertEquals(self.signal_count, 1)
def testDoubleSignaling(self):
self.awaker.signal()
self.awaker.signal()
self.mainloop.Run()
# The second signal is never delivered
self.assertEquals(self.signal_count, 1)
def testReallyDoubleSignaling(self):
self.assert_(self.awaker.readable())
self.awaker.signal()
# Let's suppose two threads overlap, and both find need_signal True
self.awaker.need_signal = True
self.awaker.signal()
self.mainloop.Run()
# We still get only one signaling
self.assertEquals(self.signal_count, 1)
def testNoSignalFnArgument(self):
myawaker = daemon.AsyncAwaker()
self.assertRaises(socket.error, myawaker.handle_read)
myawaker.signal()
myawaker.handle_read()
self.assertRaises(socket.error, myawaker.handle_read)
myawaker.signal()
myawaker.signal()
myawaker.handle_read()
self.assertRaises(socket.error, myawaker.handle_read)
myawaker.close()
def testWrongSignalFnArgument(self):
self.assertRaises(AssertionError, daemon.AsyncAwaker, 1)
self.assertRaises(AssertionError, daemon.AsyncAwaker, "string")
self.assertRaises(AssertionError, daemon.AsyncAwaker, signal_fn=1)
self.assertRaises(AssertionError, daemon.AsyncAwaker, signal_fn="string")
if __name__ == "__main__":
testutils.GanetiTestProgram()
|
gpl-2.0
|
ktosiek/spacewalk
|
client/rhel/rhn-client-tools/src/firstboot-legacy-rhel6/rhn_choose_server_gui.py
|
4
|
3314
|
# Copyright 2006--2010 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# Authors:
# Jan Pazdziora jpazdziora at redhat dot com
# Daniel Benamy <[email protected]>
import sys
sys.path.append("/usr/share/rhn")
from up2date_client import rhnreg
from up2date_client import rhnregGui
from up2date_client import up2dateErrors
import gtk
from gtk import glade
import gettext
_ = lambda x: gettext.ldgettext("rhn-client-tools", x)
gtk.glade.bindtextdomain("rhn-client-tools")
from firstboot.module import Module
from firstboot.constants import *
class moduleClass(Module):
def __init__(self):
Module.__init__(self)
self.priority = 106.5
self.sidebarTitle = _("Choose Service")
self.title = _("Choose Service")
self.support_sm = False
self.rhsmActive = True
def needsNetwork(self):
return True
def apply(self, interface, testing=False):
if testing:
return RESULT_SUCCESS
if self.support_sm \
and not self.chooseServerPage.chooseServerXml.get_widget("satelliteButton").get_active():
i = 0
while not interface.moduleList[i].__module__.startswith('rhsm_'):
i += 1
interface.moveToPage(pageNum=i)
self.rhsmActive = True
return RESULT_JUMP
try:
self.rhsmActive = False
if self.chooseServerPage.chooseServerPageApply() is False:
interface.moveToPage(moduleTitle=_("Red Hat Account"))
return RESULT_JUMP
else:
return RESULT_FAILURE
except up2dateErrors.SSLCertificateVerifyFailedError:
interface.moveToPage(moduleTitle=_("Provide Certificate"))
return RESULT_JUMP
# return RESULT_SUCCESS should work just as well since the
# certificate page with priority 107 is the next one anyway
def createScreen(self):
self.chooseServerPage = rhnregGui.ChooseServerPage()
self.vbox = gtk.VBox(spacing=5)
self.vbox.pack_start(self.chooseServerPage.chooseServerPageVbox(), True, True)
if sys.modules.has_key('rhsm_login'):
self.support_sm = True
self.rhsmButton = self.chooseServerPage.chooseServerXml.get_widget("rhsmButton")
self.rhsmButton.set_no_show_all(False)
self.rhsmButton.show_all()
def initializeUI(self):
self.chooseServerPage.chooseServerPagePrepare()
if self.support_sm and self.rhsmActive:
self.rhsmButton.set_active(True)
def shouldAppear(self):
if rhnreg.registered():
return False
return True
|
gpl-2.0
|
douwekiela/nncg-negation
|
acl17/scripts/train-test-split.py
|
1
|
1925
|
import json as js
import numpy as np
from argparse import ArgumentParser
def main(data_file, train_file, test_file, dev_file, num_noise):
data = js.load(open(data_file, "r"))
train_data = []
test_data = []
dev_data = []
np.random.seed(100)
test_indices = np.random.choice(a=range(len(data)),
size=int(0.2*len(data)),
replace=False)
def distinct_choice(n, k):
num = 0
indices = []
while num<k:
ind = np.random.randint(low=0, high=len(data), size=1)
if not (ind in [n] + indices):
indices.append(ind[0])
num +=1
print "indices: ", indices
return indices
dev_ind = int(0.25*len(test_indices))
for j, i in enumerate(test_indices):
example = data[i]
noise_inds = distinct_choice(i, num_noise)
noise = []
for l in noise_inds:
noise.append(data[l]["output"])
example["noise"] = noise
if j < dev_ind:
dev_data.append(example)
else:
test_data.append(example)
train_data = [ex for i,ex in enumerate(data) if not (i in test_indices)]
js.dump(train_data, open(train_file, "w"))
js.dump(test_data, open(test_file, "w"))
js.dump(dev_data, open(dev_file, "w"))
if __name__=="__main__":
parser = ArgumentParser()
parser.add_argument("-df", action="store", dest="data_file")
parser.add_argument("-dvf", action="store", dest="dev_file")
parser.add_argument("-trf", action="store", dest="train_file")
parser.add_argument("-tef", action="store", dest="test_file")
parser.add_argument("-numn", action="store", dest="num_noise",
type = int)
arg = parser.parse_args()
main(arg.data_file, arg.train_file, arg.test_file, arg.dev_file, arg.num_noise)
|
mit
|
egaxegax/django-dbcartajs
|
django/conf/__init__.py
|
95
|
9136
|
"""
Settings and configuration for Django.
Values will be read from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from django.conf.global_settings; see the global settings file for
a list of all possible variables.
"""
import logging
import os
import sys
import time # Needed for Windows
import warnings
from django.conf import global_settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import LazyObject, empty
from django.utils import importlib
from django.utils import six
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
class LazySettings(LazyObject):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def _setup(self, name=None):
"""
Load the settings module pointed to by the environment variable. This
is used the first time we need any settings at all, if the user has not
previously configured the settings manually.
"""
try:
settings_module = os.environ[ENVIRONMENT_VARIABLE]
if not settings_module: # If it's set but is an empty string.
raise KeyError
except KeyError:
desc = ("setting %s" % name) if name else "settings"
raise ImproperlyConfigured(
"Requested %s, but settings are not configured. "
"You must either define the environment variable %s "
"or call settings.configure() before accessing settings."
% (desc, ENVIRONMENT_VARIABLE))
self._wrapped = Settings(settings_module)
self._configure_logging()
def __getattr__(self, name):
if self._wrapped is empty:
self._setup(name)
return getattr(self._wrapped, name)
def _configure_logging(self):
"""
Setup logging from LOGGING_CONFIG and LOGGING settings.
"""
if not sys.warnoptions:
try:
# Route warnings through python logging
logging.captureWarnings(True)
# Allow DeprecationWarnings through the warnings filters
warnings.simplefilter("default", DeprecationWarning)
except AttributeError:
# No captureWarnings on Python 2.6, DeprecationWarnings are on anyway
pass
if self.LOGGING_CONFIG:
from django.utils.log import DEFAULT_LOGGING
# First find the logging configuration function ...
logging_config_path, logging_config_func_name = self.LOGGING_CONFIG.rsplit('.', 1)
logging_config_module = importlib.import_module(logging_config_path)
logging_config_func = getattr(logging_config_module, logging_config_func_name)
logging_config_func(DEFAULT_LOGGING)
if self.LOGGING:
# Backwards-compatibility shim for #16288 fix
compat_patch_logging_config(self.LOGGING)
# ... then invoke it with the logging settings
logging_config_func(self.LOGGING)
def configure(self, default_settings=global_settings, **options):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument must support attribute access (__getattr__)).
"""
if self._wrapped is not empty:
raise RuntimeError('Settings already configured.')
holder = UserSettingsHolder(default_settings)
for name, value in options.items():
setattr(holder, name, value)
self._wrapped = holder
self._configure_logging()
@property
def configured(self):
"""
Returns True if the settings have already been configured.
"""
return self._wrapped is not empty
class BaseSettings(object):
"""
Common logic for settings whether set by a module or by the user.
"""
def __setattr__(self, name, value):
if name in ("MEDIA_URL", "STATIC_URL") and value and not value.endswith('/'):
raise ImproperlyConfigured("If set, %s must end with a slash" % name)
elif name == "ALLOWED_INCLUDE_ROOTS" and isinstance(value, six.string_types):
raise ValueError("The ALLOWED_INCLUDE_ROOTS setting must be set "
"to a tuple, not a string.")
object.__setattr__(self, name, value)
class Settings(BaseSettings):
def __init__(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting == setting.upper():
setattr(self, setting, getattr(global_settings, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module
try:
mod = importlib.import_module(self.SETTINGS_MODULE)
except ImportError as e:
raise ImportError("Could not import settings '%s' (Is it on sys.path?): %s" % (self.SETTINGS_MODULE, e))
# Settings that should be converted into tuples if they're mistakenly entered
# as strings.
tuple_settings = ("INSTALLED_APPS", "TEMPLATE_DIRS")
for setting in dir(mod):
if setting == setting.upper():
setting_value = getattr(mod, setting)
if setting in tuple_settings and \
isinstance(setting_value, six.string_types):
warnings.warn("The %s setting must be a tuple. Please fix your "
"settings, as auto-correction is now deprecated." % setting,
PendingDeprecationWarning)
setting_value = (setting_value,) # In case the user forgot the comma.
setattr(self, setting, setting_value)
if not self.SECRET_KEY:
raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.")
if hasattr(time, 'tzset') and self.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = '/usr/share/zoneinfo'
if (os.path.exists(zoneinfo_root) and not
os.path.exists(os.path.join(zoneinfo_root, *(self.TIME_ZONE.split('/'))))):
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ['TZ'] = self.TIME_ZONE
time.tzset()
class UserSettingsHolder(BaseSettings):
"""
Holder for user configured settings.
"""
# SETTINGS_MODULE doesn't make much sense in the manually configured
# (standalone) case.
SETTINGS_MODULE = None
def __init__(self, default_settings):
"""
Requests for configuration variables not in this class are satisfied
from the module specified in default_settings (if possible).
"""
self.__dict__['_deleted'] = set()
self.default_settings = default_settings
def __getattr__(self, name):
if name in self._deleted:
raise AttributeError
return getattr(self.default_settings, name)
def __setattr__(self, name, value):
self._deleted.discard(name)
return super(UserSettingsHolder, self).__setattr__(name, value)
def __delattr__(self, name):
self._deleted.add(name)
return super(UserSettingsHolder, self).__delattr__(name)
def __dir__(self):
return list(self.__dict__) + dir(self.default_settings)
settings = LazySettings()
def compat_patch_logging_config(logging_config):
"""
Backwards-compatibility shim for #16288 fix. Takes initial value of
``LOGGING`` setting and patches it in-place (issuing deprecation warning)
if "mail_admins" logging handler is configured but has no filters.
"""
# Shim only if LOGGING["handlers"]["mail_admins"] exists,
# but has no "filters" key
if "filters" not in logging_config.get(
"handlers", {}).get(
"mail_admins", {"filters": []}):
warnings.warn(
"You have no filters defined on the 'mail_admins' logging "
"handler: adding implicit debug-false-only filter. "
"See http://docs.djangoproject.com/en/dev/releases/1.4/"
"#request-exceptions-are-now-always-logged",
DeprecationWarning)
filter_name = "require_debug_false"
filters = logging_config.setdefault("filters", {})
while filter_name in filters:
filter_name = filter_name + "_"
filters[filter_name] = {
"()": "django.utils.log.RequireDebugFalse",
}
logging_config["handlers"]["mail_admins"]["filters"] = [filter_name]
|
gpl-2.0
|
maxwward/SCOPEBak
|
askbot/conf/settings_wrapper.py
|
7
|
3878
|
"""
Definition of a Singleton wrapper class for askbot.deps.livesettings
with interface similar to django.conf.settings
that is each setting has unique key and is accessible
via dotted lookup.
for example to lookup value of setting BLAH you would do
from askbot.conf import settings as askbot_settings
askbot_settings.BLAH
NOTE that at the moment there is distinction between settings
(django settings) and askbot_settings (forum.deps.livesettings)
the value will be taken from askbot.deps.livesettings database or cache
note that during compilation phase database is not accessible
for the most part, so actual values are reliably available only
at run time
askbot.deps.livesettings is a module developed for satchmo project
"""
from django.core.cache import cache
from askbot.deps.livesettings import SortedDotDict, config_register
from askbot.deps.livesettings.functions import config_get
from askbot.deps.livesettings import signals
class ConfigSettings(object):
"""A very simple Singleton wrapper for settings
a limitation is that all settings names using this class
must be distinct, even though they might belong
to different settings groups
"""
__instance = None
__group_map = {}
def __init__(self):
"""assigns SortedDotDict to self.__instance if not set"""
if ConfigSettings.__instance == None:
ConfigSettings.__instance = SortedDotDict()
self.__dict__['_ConfigSettings__instance'] = ConfigSettings.__instance
self.__ordering_index = {}
def __getattr__(self, key):
"""value lookup returns the actual value of setting
not the object - this way only very minimal modifications
will be required in code to convert an app
depending on django.conf.settings to askbot.deps.livesettings
"""
return getattr(self.__instance, key).value
def get_default(self, key):
"""return the defalut value for the setting"""
return getattr(self.__instance, key).default
def reset(self, key):
"""returns setting to the default value"""
self.update(key, self.get_default(key))
def update(self, key, value):
try:
setting = config_get(self.__group_map[key], key)
setting.update(value)
except:
from askbot.deps.livesettings.models import Setting
setting = Setting.objects.get(key=key)
setting.value = value
setting.save()
#self.prime_cache()
def register(self, value):
"""registers the setting
value must be a subclass of askbot.deps.livesettings.Value
"""
key = value.key
group_key = value.group.key
ordering = self.__ordering_index.get(group_key, None)
if ordering:
ordering += 1
value.ordering = ordering
else:
ordering = 1
value.ordering = ordering
self.__ordering_index[group_key] = ordering
if key not in self.__instance:
self.__instance[key] = config_register(value)
self.__group_map[key] = group_key
def as_dict(self):
settings = cache.get('askbot-livesettings')
if settings:
return settings
else:
self.prime_cache()
return cache.get('askbot-livesettings')
@classmethod
def prime_cache(cls, **kwargs):
"""reload all settings into cache as dictionary
"""
out = dict()
for key in cls.__instance.keys():
#todo: this is odd that I could not use self.__instance.items() mapping here
out[key] = cls.__instance[key].value
cache.set('askbot-livesettings', out)
signals.configuration_value_changed.connect(ConfigSettings.prime_cache)
#settings instance to be used elsewhere in the project
settings = ConfigSettings()
|
gpl-3.0
|
jean/sentry
|
tests/sentry/web/frontend/test_admin.py
|
7
|
1612
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from exam import fixture
from sentry.testutils import TestCase
class EnvStatusTest(TestCase):
@fixture
def path(self):
return reverse('sentry-admin-status')
def test_requires_auth(self):
resp = self.client.get(self.path)
self.assertEquals(resp.status_code, 302)
def test_renders_template(self):
self.login_as(self.user)
resp = self.client.get(self.path)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/admin/status/env.html')
class PackageStatusTest(TestCase):
@fixture
def path(self):
return reverse('sentry-admin-packages-status')
def test_requires_auth(self):
resp = self.client.get(self.path)
self.assertEquals(resp.status_code, 302)
def test_renders_template(self):
self.login_as(self.user)
resp = self.client.get(self.path)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/admin/status/packages.html')
class MailStatusTest(TestCase):
@fixture
def path(self):
return reverse('sentry-admin-mail-status')
def test_requires_auth(self):
resp = self.client.get(self.path)
self.assertEquals(resp.status_code, 302)
def test_renders_template(self):
self.login_as(self.user)
resp = self.client.get(self.path)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/admin/status/mail.html')
|
bsd-3-clause
|
40223222/-2015cd_midterm2
|
static/Brython3.1.1-20150328-091302/Lib/collections/abc.py
|
739
|
16026
|
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
Unit tests are in test_collections.
"""
from abc import ABCMeta, abstractmethod
import sys
__all__ = ["Hashable", "Iterable", "Iterator",
"Sized", "Container", "Callable",
"Set", "MutableSet",
"Mapping", "MutableMapping",
"MappingView", "KeysView", "ItemsView", "ValuesView",
"Sequence", "MutableSequence",
"ByteString",
]
# Private list of types that we want to register with the various ABCs
# so that they will pass tests like:
# it = iter(somebytearray)
# assert isinstance(it, Iterable)
# Note: in other implementations, these types many not be distinct
# and they make have their own implementation specific types that
# are not included on this list.
bytes_iterator = type(iter(b''))
bytearray_iterator = type(iter(bytearray()))
#callable_iterator = ???
dict_keyiterator = type(iter({}.keys()))
dict_valueiterator = type(iter({}.values()))
dict_itemiterator = type(iter({}.items()))
list_iterator = type(iter([]))
list_reverseiterator = type(iter(reversed([])))
range_iterator = type(iter(range(0)))
set_iterator = type(iter(set()))
str_iterator = type(iter(""))
tuple_iterator = type(iter(()))
zip_iterator = type(iter(zip()))
## views ##
dict_keys = type({}.keys())
dict_values = type({}.values())
dict_items = type({}.items())
## misc ##
mappingproxy = type(type.__dict__)
### ONE-TRICK PONIES ###
class Hashable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __hash__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Hashable:
for B in C.__mro__:
if "__hash__" in B.__dict__:
if B.__dict__["__hash__"]:
return True
break
return NotImplemented
class Iterable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __iter__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Iterable:
if any("__iter__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
class Iterator(Iterable):
__slots__ = ()
@abstractmethod
def __next__(self):
raise StopIteration
def __iter__(self):
return self
@classmethod
def __subclasshook__(cls, C):
if cls is Iterator:
if (any("__next__" in B.__dict__ for B in C.__mro__) and
any("__iter__" in B.__dict__ for B in C.__mro__)):
return True
return NotImplemented
Iterator.register(bytes_iterator)
Iterator.register(bytearray_iterator)
#Iterator.register(callable_iterator)
Iterator.register(dict_keyiterator)
Iterator.register(dict_valueiterator)
Iterator.register(dict_itemiterator)
Iterator.register(list_iterator)
Iterator.register(list_reverseiterator)
Iterator.register(range_iterator)
Iterator.register(set_iterator)
Iterator.register(str_iterator)
Iterator.register(tuple_iterator)
Iterator.register(zip_iterator)
class Sized(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __len__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Sized:
if any("__len__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
class Container(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __contains__(self, x):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Container:
if any("__contains__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
class Callable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __call__(self, *args, **kwds):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Callable:
if any("__call__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
### SETS ###
class Set(Sized, Iterable, Container):
"""A set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__ and __len__.
To override the comparisons (presumably for speed, as the
semantics are fixed), all you have to do is redefine __le__ and
then the other operations will automatically follow suit.
"""
__slots__ = ()
def __le__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) > len(other):
return False
for elem in self:
if elem not in other:
return False
return True
def __lt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) < len(other) and self.__le__(other)
def __gt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return other < self
def __ge__(self, other):
if not isinstance(other, Set):
return NotImplemented
return other <= self
def __eq__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) == len(other) and self.__le__(other)
def __ne__(self, other):
return not (self == other)
@classmethod
def _from_iterable(cls, it):
'''Construct an instance of the class from any iterable input.
Must override this method if the class constructor signature
does not accept an iterable for an input.
'''
return cls(it)
def __and__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
return self._from_iterable(value for value in other if value in self)
def isdisjoint(self, other):
for value in other:
if value in self:
return False
return True
def __or__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
chain = (e for s in (self, other) for e in s)
return self._from_iterable(chain)
def __sub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in self
if value not in other)
def __xor__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return (self - other) | (other - self)
def _hash(self):
"""Compute the hash value of a set.
Note that we don't define __hash__: not all sets are hashable.
But if you define a hashable set type, its __hash__ should
call this function.
This must be compatible __eq__.
All sets ought to compare equal if they contain the same
elements, regardless of how they are implemented, and
regardless of the order of the elements; so there's not much
freedom for __eq__ or __hash__. We match the algorithm used
by the built-in frozenset type.
"""
MAX = sys.maxsize
MASK = 2 * MAX + 1
n = len(self)
h = 1927868237 * (n + 1)
h &= MASK
for x in self:
hx = hash(x)
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
h &= MASK
h = h * 69069 + 907133923
h &= MASK
if h > MAX:
h -= MASK + 1
if h == -1:
h = 590923713
return h
Set.register(frozenset)
class MutableSet(Set):
__slots__ = ()
@abstractmethod
def add(self, value):
"""Add an element."""
raise NotImplementedError
@abstractmethod
def discard(self, value):
"""Remove an element. Do not raise an exception if absent."""
raise NotImplementedError
def remove(self, value):
"""Remove an element. If not a member, raise a KeyError."""
if value not in self:
raise KeyError(value)
self.discard(value)
def pop(self):
"""Return the popped value. Raise KeyError if empty."""
it = iter(self)
try:
value = next(it)
except StopIteration:
raise KeyError
self.discard(value)
return value
def clear(self):
"""This is slow (creates N new iterators!) but effective."""
try:
while True:
self.pop()
except KeyError:
pass
def __ior__(self, it):
for value in it:
self.add(value)
return self
def __iand__(self, it):
for value in (self - it):
self.discard(value)
return self
def __ixor__(self, it):
if it is self:
self.clear()
else:
if not isinstance(it, Set):
it = self._from_iterable(it)
for value in it:
if value in self:
self.discard(value)
else:
self.add(value)
return self
def __isub__(self, it):
if it is self:
self.clear()
else:
for value in it:
self.discard(value)
return self
MutableSet.register(set)
### MAPPINGS ###
class Mapping(Sized, Iterable, Container):
__slots__ = ()
@abstractmethod
def __getitem__(self, key):
raise KeyError
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def keys(self):
return KeysView(self)
def items(self):
return ItemsView(self)
def values(self):
return ValuesView(self)
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
return dict(self.items()) == dict(other.items())
def __ne__(self, other):
return not (self == other)
Mapping.register(mappingproxy)
class MappingView(Sized):
def __init__(self, mapping):
self._mapping = mapping
def __len__(self):
return len(self._mapping)
def __repr__(self):
return '{0.__class__.__name__}({0._mapping!r})'.format(self)
class KeysView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, key):
return key in self._mapping
def __iter__(self):
for key in self._mapping:
yield key
KeysView.register(dict_keys)
class ItemsView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, item):
key, value = item
try:
v = self._mapping[key]
except KeyError:
return False
else:
return v == value
def __iter__(self):
for key in self._mapping:
yield (key, self._mapping[key])
ItemsView.register(dict_items)
class ValuesView(MappingView):
def __contains__(self, value):
for key in self._mapping:
if value == self._mapping[key]:
return True
return False
def __iter__(self):
for key in self._mapping:
yield self._mapping[key]
ValuesView.register(dict_values)
class MutableMapping(Mapping):
__slots__ = ()
@abstractmethod
def __setitem__(self, key, value):
raise KeyError
@abstractmethod
def __delitem__(self, key):
raise KeyError
__marker = object()
def pop(self, key, default=__marker):
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def popitem(self):
try:
key = next(iter(self))
except StopIteration:
raise KeyError
value = self[key]
del self[key]
return key, value
def clear(self):
try:
while True:
self.popitem()
except KeyError:
pass
def update(*args, **kwds):
if len(args) > 2:
raise TypeError("update() takes at most 2 positional "
"arguments ({} given)".format(len(args)))
elif not args:
raise TypeError("update() takes at least 1 argument (0 given)")
self = args[0]
other = args[1] if len(args) >= 2 else ()
if isinstance(other, Mapping):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
MutableMapping.register(dict)
### SEQUENCES ###
class Sequence(Sized, Iterable, Container):
"""All the operations on a read-only sequence.
Concrete subclasses must override __new__ or __init__,
__getitem__, and __len__.
"""
__slots__ = ()
@abstractmethod
def __getitem__(self, index):
raise IndexError
def __iter__(self):
i = 0
try:
while True:
v = self[i]
yield v
i += 1
except IndexError:
return
def __contains__(self, value):
for v in self:
if v == value:
return True
return False
def __reversed__(self):
for i in reversed(range(len(self))):
yield self[i]
def index(self, value):
for i, v in enumerate(self):
if v == value:
return i
raise ValueError
def count(self, value):
return sum(1 for v in self if v == value)
Sequence.register(tuple)
Sequence.register(str)
Sequence.register(range)
class ByteString(Sequence):
"""This unifies bytes and bytearray.
XXX Should add all their methods.
"""
__slots__ = ()
ByteString.register(bytes)
ByteString.register(bytearray)
class MutableSequence(Sequence):
__slots__ = ()
@abstractmethod
def __setitem__(self, index, value):
raise IndexError
@abstractmethod
def __delitem__(self, index):
raise IndexError
@abstractmethod
def insert(self, index, value):
raise IndexError
def append(self, value):
self.insert(len(self), value)
def clear(self):
try:
while True:
self.pop()
except IndexError:
pass
def reverse(self):
n = len(self)
for i in range(n//2):
self[i], self[n-i-1] = self[n-i-1], self[i]
def extend(self, values):
for v in values:
self.append(v)
def pop(self, index=-1):
v = self[index]
del self[index]
return v
def remove(self, value):
del self[self.index(value)]
def __iadd__(self, values):
self.extend(values)
return self
MutableSequence.register(list)
MutableSequence.register(bytearray) # Multiply inheriting, see ByteString
|
agpl-3.0
|
xindus40223115/2015cda_g1
|
static/Brython3.1.0-20150301-090019/Lib/site-packages/pygame/image.py
|
603
|
19603
|
#!/usr/bin/env python
'''Pygame module for image transfer.
The image module contains functions for loading and saving pictures, as
well as transferring Surfaces to formats usable by other packages.
Note that there is no Image class; an image is loaded as a
Surface object. The Surface class allows manipulation (drawing lines,
setting pixels, capturing regions, etc.).
The image module is a required dependency of Pygame, but it only optionally
supports any extended file formats. By default it can only load uncompressed
BMP images. When built with full image support, the pygame.image.load()
function can support the following formats.
* JPG
* PNG
* GIF (non animated)
* BMP
* PCX
* TGA (uncompressed)
* TIF
* LBM (and PBM)
* PBM (and PGM, PPM)
* XPM
Saving images only supports a limited set of formats. You can save
to the following formats.
* BMP
* TGA
* PNG
* JPEG
PNG, JPEG saving new in pygame 1.8.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from browser import html, window
from javascript import JSConstructor, console
#import os.path
import os
import re
#from SDL import *
import pygame.surface
#try:
# #from SDL.image import *
# #_have_SDL_image = True
#except ImportError:
_have_SDL_image = False
def load_extended(file, namehint=''):
'''Load new image from a file, using SDL.image.
:see: `load`
:Parameters:
`file` : str or file-like object
Image file or filename to load.
`namehint` : str
Optional file extension.
:rtype: `Surface`
'''
if not _have_SDL_image:
raise NotImplementedError('load_extended requires SDL.image')
if not hasattr(file, 'read'):
surf = IMG_Load(file)
else:
if not namehint and hasattr(file, 'name'):
namehint = file.name
namehint = os.path.splitext(namehint)[1]
rw = SDL_RWFromObject(file)
# XXX Differ from pygame: don't freesrc when we didn't allocate it
surf = IMG_LoadTyped_RW(rw, 0, namehint)
return pygame.surface.Surface(surf=surf)
def load_basic(file, namehint=''):
'''Load BMP image from a file.
:see: `load`
:Parameters:
`file` : str or file-like object
Image file or filename to load.
`namehint` : str
Ignored, for compatibility.
:rtype: `Surface`
'''
#if not hasattr(file, 'read'):
# surf = SDL_LoadBMP(file)
#else:
# rw = SDL_RWFromObject(file)
# # XXX Differ from pygame: don't freesrc when we didn't allocate it
# surf = SDL_LoadBMP_RW(rw, 0)
_img=JSConstructor(window.Image)()
_img.src=file
_img.canvas=html.CANVAS()
def img_onload(*args):
#http://www.jaypan.com/tutorial/javascript-passing-arguments-anonymous-functions-without-firing-function
#the onload files very slow so variables get messed up so we have
#to use args[0].path[0] to figure out the correct image
console.log(args)
if hasattr(args[0], 'target'): # Firefox
this=args[0].target
else: #chrome
this=args[0].path[0]
this.canvas.width=this.width
this.canvas.height=this.height
this.canvas.getContext('2d').drawImage(this,0,0)
#this.loaded=True
_img.onload=img_onload
return pygame.surface.Surface(surf=_img.canvas)
def load(file, namehint=''):
'''Load a new image from a file.
Pygame will automatically determine the image type (e.g., GIF or bitmap)
and create a new Surface object from the data. In some cases it will need
to know the file extension (e.g., GIF images should end in ".gif"). If
you pass a raw file-like object, you may also want to pass the original
filename as the namehint argument.
The returned Surface will contain the same color format, colorkey and
alpha transparency as the file it came from. You will often want to call
Surface.convert() with no arguments, to create a copy that will draw more
quickly on the screen.
For alpha transparency, like in .png images use the convert_alpha() method
after loading so that the image has per pixel transparency.
Pygame may not always be built to support all image formats. At minimum it
will support uncompressed BMP. If pygame.image.get_extended() returns
'True', you should be able to load most images (including png, jpg and gif).
You should use os.path.join() for compatibility, e.g.::
asurf = pygame.image.load(os.path.join('data', 'bla.png'))
This function calls `load_extended` if SDL.image is available, otherwise
`load_basic`.
:Parameters:
`file` : str or file-like object
Image file or filename to load.
`namehint` : str
Optional file extension.
:rtype: `Surface`
'''
#if _have_SDL_image:
# return load_extended(file, namehint)
#else:
return load_basic(file, namehint)
def save(surface, file):
'''Save an image to disk.
This will save your Surface as either a BMP, TGA, PNG, or JPEG image. If
the filename extension is unrecognized it will default to TGA. Both TGA,
and BMP file formats create uncompressed files.
:note: Only BMP is currently implemented.
:Parameters:
`surface` : `Surface`
Surface containing image data to save.
`file` : str or file-like object
File or filename to save to.
'''
#todo brython..
#for now, lets just pass
pass
if surface._surf.flags & SDL_OPENGL:
surf = _get_opengl_surface(surface._surf)
else:
surface._prep()
surf = surface._surf
if hasattr(file, 'write'):
# TODO TGA not BMP save
rw = SDL_RWFromObject(file)
# XXX Differ from pygame: don't freesrc when we didn't allocate it
SDL_SaveBMP_RW(surf, rw, 0)
else:
fileext = os.path.splitext(file)[1].lower()
if fileext == '.bmp':
SDL_SaveBMP(surf, file)
elif fileext in ('.jpg', '.jpeg'):
raise pygame.base.error('No support for jpg compiled in.') # TODO
elif fileext == '.png':
raise pygame.base.error('No support for png compiled in.') # TODO
else:
raise NotImplementedError('TODO: TGA support')
if surface._surf.flags & SDL_OPENGL:
SDL_FreeSurface(surf)
else:
surface._unprep()
def get_extended():
'''Test if extended image formats can be loaded.
If pygame is built with extended image formats this function will return
True. It is still not possible to determine which formats will be
available, but generally you will be able to load them all.
:rtype: bool
'''
return _have_SDL_image
def tostring(surface, format, flipped=False):
'''Transfer image to string buffer.
Creates a string that can be transferred with the 'fromstring' method in
other Python imaging packages. Some Python image packages prefer their
images in bottom-to-top format (PyOpenGL for example). If you pass True
for the flipped argument, the string buffer will be vertically flipped.
The format argument is a string of one of the following values. Note that
only 8bit Surfaces can use the "P" format. The other formats will work for
any Surface. Also note that other Python image packages support more
formats than Pygame.
* P, 8bit palettized Surfaces
* RGB, 24bit image
* RGBX, 32bit image with alpha channel derived from color key
* RGBA, 32bit image with an alpha channel
* ARGB, 32bit image with alpha channel first
:Parameters:
`surface` : `Surface`
Surface containing data to convert.
`format` : str
One of 'P', 'RGB', 'RGBX', 'RGBA' or 'ARGB'
`flipped` : bool
If True, data is ordered from bottom row to top.
:rtype: str
'''
#brython
return pygame.surface.Surface.toDataURL('image/%s' % format)
surf = surface._surf
if surf.flags & SDL_OPENGL:
surf = _get_opengl_surface(surf)
result = None
rows = []
pitch = surf.pitch
w = surf.w
h = surf.h
if flipped:
h_range = range(surf.h - 1, -1, -1)
else:
h_range = range(surf.h)
if format == 'P':
# The only case for creating palette data.
if surf.format.BytesPerPixel != 1:
raise ValueError('Can only create "P" format data with 8bit Surfaces')
surface.lock()
pixels = surf.pixels.to_string()
surface.unlock()
if pitch == w:
result = pixels # easy exit
else:
flipped = False # Flipping taken care of by h_range
for y in h_range:
rows.append(pixels[y*pitch:y*pitch + w])
elif surf.format.BytesPerPixel == len(format) and format != 'RGBX':
# No conversion required?
# This is an optimisation; could also use the default case.
if format == 'RGBA':
Rmask = SDL_SwapLE32(0x000000ff)
Gmask = SDL_SwapLE32(0x0000ff00)
Bmask = SDL_SwapLE32(0x00ff0000)
Amask = SDL_SwapLE32(0xff000000)
elif format == 'ARGB':
Amask = SDL_SwapLE32(0x000000ff)
Rmask = SDL_SwapLE32(0x0000ff00)
Gmask = SDL_SwapLE32(0x00ff0000)
Bmask = SDL_SwapLE32(0xff000000)
elif format == 'RGB':
if SDL_BYTEORDER == SDL_LIL_ENDIAN:
Rmask = 0x000000ff
Gmask = 0x0000ff00
Bmask = 0x00ff0000
else:
Rmask = 0x00ff0000
Gmask = 0x0000ff00
Bmask = 0x000000ff
Amask = surf.format.Amask # ignore
if surf.format.Rmask == Rmask and \
surf.format.Gmask == Gmask and \
surf.format.Bmask == Bmask and \
surf.format.Amask == Amask and \
pitch == w * surf.format.BytesPerPixel:
# Pixel data is already in required format, simply memcpy will
# work fast.
surface.lock()
result = surf.pixels.to_string()
surface.unlock()
elif surf.format.BytesPerPixel == 3 and \
(format in ('RGBA', 'ARGB') or \
format == 'RGBX' and not surf.flags & SDL_SRCCOLORKEY):
# Optimised conversion from RGB to RGBA or ARGB.
if surf.format.Rmask == SDL_SwapLE32(0x000000ff) and \
surf.format.Gmask == SDL_SwapLE32(0x0000ff00) and \
surf.format.Bmask == SDL_SwapLE32(0x00ff0000) and \
pitch == w * surf.format.BytesPerPixel:
surface.lock()
result = surf.pixels.to_string()
surface.unlock()
# Insert in empty alpha byte
alpha = chr(0xff)
result = alpha.join(re.findall('...', result, re.DOTALL))
if format == 'ARGB':
result = alpha + result
else:
result += alpha
elif surf.format.BytesPerPixel == 4 and format == 'RGB':
# Optimised conversion from RGBA or ARGB to RGB.
# This is an optimisation; could also use the default case.
if surf.format.Rmask == SDL_SwapLE32(0x000000ff):
# Internal format is RGBA
Gmask = SDL_SwapLE32(0x0000ff00)
Bmask = SDL_SwapLE32(0x00ff0000)
pattern = '(...).'
elif surf.format.Rmask == SDL_SwapLE32(0x0000ff00):
# Internal format is ARGB
Gmask = SDL_SwapLE32(0x00ff0000)
Bmask = SDL_SwapLE32(0xff000000)
pattern = '.(...)'
else:
# Internal format is something else, give up.
pattern = None
if pattern and \
surf.format.Gmask == Gmask and \
surf.format.Bmask == Bmask and \
pitch == w * surf.format.BytesPerPixel:
surface.lock()
result = surf.pixels.to_string()
surface.unlock()
# Squeeze out the alpha byte
result = ''.join(re.findall(pattern, result, re.DOTALL))
if not result and not rows:
# Default case, works for any conversion, but is slow.
surface.lock()
if surf.format.BytesPerPixel == 1:
palette = surf.format.palette.colors
if surf.flags & SDL_SRCCOLORKEY and not Amask and format == 'RGBX':
colorkey = surf.format.colorkey
pixels = [(palette[c].r, palette[c].g, palette[c].b,
(c != colorkey) * 0xff) \
for c in surf.pixels]
else:
pixels = [(palette[c].r, palette[c].g, palette[c].b, 255) \
for c in surf.pixels]
elif surf.format.BytesPerPixel == 3:
raise NotImplementedError('TODO')
else:
Rmask = surf.format.Rmask
Gmask = surf.format.Gmask
Bmask = surf.format.Bmask
Amask = surf.format.Amask
Rshift = surf.format.Rshift
Gshift = surf.format.Gshift
Bshift = surf.format.Bshift
Ashift = surf.format.Ashift
Rloss = surf.format.Rloss
Gloss = surf.format.Gloss
Bloss = surf.format.Bloss
Aloss = surf.format.Aloss
if surf.flags & SDL_SRCCOLORKEY and not Amask and format == 'RGBX':
colorkey = surf.format.colorkey
pixels = [( ((c & Rmask) >> Rshift) << Rloss,
((c & Gmask) >> Gshift) << Gloss,
((c & Bmask) >> Bshift) << Bloss,
(c != colorkey) * 0xff ) \
for c in surf.pixels]
else:
pixels = [( ((c & Rmask) >> Rshift) << Rloss,
((c & Gmask) >> Gshift) << Gloss,
((c & Bmask) >> Bshift) << Bloss,
((c & Amask) >> Ashift) << Aloss ) \
for c in surf.pixels]
surface.unlock()
pitch /= surf.format.BytesPerPixel
flipped = False # Flipping taken care of by h_range
if format == 'RGB':
for y in h_range:
rows.append(''.join([ chr(c[0]) + chr(c[1]) + chr(c[2]) \
for c in pixels[y*pitch:y*pitch + w] ]))
elif format in ('RGBA', 'RGBX'):
for y in h_range:
rows.append(''.join([ chr(c[0]) + chr(c[1]) + chr(c[2]) + \
chr(c[3]) \
for c in pixels[y*pitch:y*pitch + w] ]))
elif format == 'ARGB':
for y in h_range:
rows.append(''.join([ chr(c[3]) + chr(c[1]) + chr(c[2]) + \
chr(c[0]) \
for c in pixels[y*pitch:y*pitch + w] ]))
if surface._surf.flags & SDL_OPENGL:
SDL_FreeSurface(surf)
# Is pixel data already one big string?
if result:
if flipped:
# Split it into rows so it can be flipped vertically.
rows = re.findall('.' * w * len(format), result, re.DOTALL)
else:
return result
if flipped:
rows.reverse()
return ''.join(rows)
def fromstring(string, size, format, flipped=False):
'''Create new Surface from a string buffer.
This function takes arguments similar to pygame.image.tostring(). The size
argument is a pair of numbers representing the width and height. Once the
new Surface is created you can destroy the string buffer.
The size and format image must compute the exact same size as the passed
string buffer. Otherwise an exception will be raised.
:Parameters:
`string` : str
String containing image data.
`size` : (int, int)
Width, height of the image.
`format` : str
One of 'P', 'RGB', 'RGBA' or 'ARGB'
`flipped` : bool
If True, data is ordered from bottom row to top.
:rtype: `Surface`
'''
_img = html.IMG(width=size[0], height=size[1])
_img.src=string
_canvas=html.CANVAS(width=size[0], height=size[1])
_ctx=_canvas.getContext('2d')
_ctx.drawImage(_img,0,0)
return pygame.surface.Surface(surf=_canvas)
width, height = size
if format == 'P':
Rmask = 0
Gmask = 0
Bmask = 0
Amask = 0
depth = 8
pitch = width
elif format == 'RGB':
if SDL_BYTEORDER == SDL_LIL_ENDIAN:
Rmask = 0x000000ff
Gmask = 0x0000ff00
Bmask = 0x00ff0000
else:
Rmask = 0x00ff0000
Gmask = 0x0000ff00
Bmask = 0x000000ff
Amask = 0x00000000
depth = 24
pitch = width * 3
elif format in ('RGBA', 'RGBX'):
if SDL_BYTEORDER == SDL_LIL_ENDIAN:
Rmask = 0x000000ff
Gmask = 0x0000ff00
Bmask = 0x00ff0000
Amask = 0xff000000
else:
Rmask = 0xff000000
Gmask = 0x00ff0000
Bmask = 0x0000ff00
Amask = 0x000000ff
if format == 'RGBX':
Amask = 0x00000000
depth = 32
pitch = width * 4
elif format == 'ARGB':
if SDL_BYTEORDER == SDL_LIL_ENDIAN:
Rmask = 0x0000ff00
Gmask = 0x00ff0000
Bmask = 0xff000000
Amask = 0x000000ff
else:
Rmask = 0x00ff0000
Gmask = 0x0000ff00
Bmask = 0x000000ff
Amask = 0xff000000
depth = 32
pitch = width * 4
if len(string) != pitch * height:
raise ValueError('String length does not equal format and resolution size')
if flipped:
string = ''.join([string[y*pitch:y*pitch+pitch] \
for y in range(height - 1, -1, -1)])
surf = SDL_CreateRGBSurfaceFrom(string, width, height, depth, pitch,
Rmask, Gmask, Bmask, Amask)
return pygame.surface.Surface(surf=surf)
def frombuffer(string, size, format):
'''Create a new Surface that shares data inside a string buffer.
Create a new Surface that shares pixel data directly from the string buffer.
This method takes the same arguments as pygame.image.fromstring(), but is
unable to vertically flip the source data.
:note: In pygame-ctypes, this function is identical to `fromstring`.
:Parameters:
`string` : str
String containing image data.
`size` : (int, int)
Width, height of the image.
`format` : str
One of 'P', 'RGB', 'RGBA', 'RGBX' or 'ARGB'
:rtype: `Surface`
'''
return fromstring(string, size, format)
def _get_opengl_surface(surf):
import OpenGL.GL
data = OpenGL.GL.glReadPixels(0, 0, surf.w, surf.h,
OpenGL.GL.GL_RGB, OpenGL.GL.GL_UNSIGNED_BYTE)
if SDL_BYTEORDER == SDL_LIL_ENDIAN:
Rmask = 0x000000ff
Gmask = 0x0000ff00
Bmask = 0x00ff0000
else:
Rmask = 0x00ff0000
Gmask = 0x0000ff00
Bmask = 0x000000ff
# Flip vertically
pitch = surf.w * 3
data = ''.join([data[y*pitch:y*pitch+pitch] \
for y in range(surf.h - 1, -1, -1)])
newsurf = SDL_CreateRGBSurfaceFrom(data, surf.w, surf.h, 24, pitch,
Rmask, Gmask, Bmask, 0)
return newsurf
|
gpl-3.0
|
tpsatish95/Python-Workshop
|
Python Environment Setup/Alternate/1. Python/1. Installer/Python-3.4.0(Linux)/Lib/unittest/runner.py
|
195
|
7484
|
"""Running tests"""
import sys
import time
import warnings
from . import result
from .signals import registerResult
__unittest = True
class _WritelnDecorator(object):
"""Used to decorate file-like objects with a handy 'writeln' method"""
def __init__(self,stream):
self.stream = stream
def __getattr__(self, attr):
if attr in ('stream', '__getstate__'):
raise AttributeError(attr)
return getattr(self.stream,attr)
def writeln(self, arg=None):
if arg:
self.write(arg)
self.write('\n') # text-mode streams translate to \r\n if needed
class TextTestResult(result.TestResult):
"""A test result class that can print formatted text results to a stream.
Used by TextTestRunner.
"""
separator1 = '=' * 70
separator2 = '-' * 70
def __init__(self, stream, descriptions, verbosity):
super(TextTestResult, self).__init__(stream, descriptions, verbosity)
self.stream = stream
self.showAll = verbosity > 1
self.dots = verbosity == 1
self.descriptions = descriptions
def getDescription(self, test):
doc_first_line = test.shortDescription()
if self.descriptions and doc_first_line:
return '\n'.join((str(test), doc_first_line))
else:
return str(test)
def startTest(self, test):
super(TextTestResult, self).startTest(test)
if self.showAll:
self.stream.write(self.getDescription(test))
self.stream.write(" ... ")
self.stream.flush()
def addSuccess(self, test):
super(TextTestResult, self).addSuccess(test)
if self.showAll:
self.stream.writeln("ok")
elif self.dots:
self.stream.write('.')
self.stream.flush()
def addError(self, test, err):
super(TextTestResult, self).addError(test, err)
if self.showAll:
self.stream.writeln("ERROR")
elif self.dots:
self.stream.write('E')
self.stream.flush()
def addFailure(self, test, err):
super(TextTestResult, self).addFailure(test, err)
if self.showAll:
self.stream.writeln("FAIL")
elif self.dots:
self.stream.write('F')
self.stream.flush()
def addSkip(self, test, reason):
super(TextTestResult, self).addSkip(test, reason)
if self.showAll:
self.stream.writeln("skipped {0!r}".format(reason))
elif self.dots:
self.stream.write("s")
self.stream.flush()
def addExpectedFailure(self, test, err):
super(TextTestResult, self).addExpectedFailure(test, err)
if self.showAll:
self.stream.writeln("expected failure")
elif self.dots:
self.stream.write("x")
self.stream.flush()
def addUnexpectedSuccess(self, test):
super(TextTestResult, self).addUnexpectedSuccess(test)
if self.showAll:
self.stream.writeln("unexpected success")
elif self.dots:
self.stream.write("u")
self.stream.flush()
def printErrors(self):
if self.dots or self.showAll:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour,self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
class TextTestRunner(object):
"""A test runner class that displays results in textual form.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
resultclass = TextTestResult
def __init__(self, stream=None, descriptions=True, verbosity=1,
failfast=False, buffer=False, resultclass=None, warnings=None):
if stream is None:
stream = sys.stderr
self.stream = _WritelnDecorator(stream)
self.descriptions = descriptions
self.verbosity = verbosity
self.failfast = failfast
self.buffer = buffer
self.warnings = warnings
if resultclass is not None:
self.resultclass = resultclass
def _makeResult(self):
return self.resultclass(self.stream, self.descriptions, self.verbosity)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
registerResult(result)
result.failfast = self.failfast
result.buffer = self.buffer
with warnings.catch_warnings():
if self.warnings:
# if self.warnings is set, use it to filter all the warnings
warnings.simplefilter(self.warnings)
# if the filter is 'default' or 'always', special-case the
# warnings from the deprecated unittest methods to show them
# no more than once per module, because they can be fairly
# noisy. The -Wd and -Wa flags can be used to bypass this
# only when self.warnings is None.
if self.warnings in ['default', 'always']:
warnings.filterwarnings('module',
category=DeprecationWarning,
message='Please use assert\w+ instead.')
startTime = time.time()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
try:
test(result)
finally:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
if hasattr(result, 'separator2'):
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
expectedFails = unexpectedSuccesses = skipped = 0
try:
results = map(len, (result.expectedFailures,
result.unexpectedSuccesses,
result.skipped))
except AttributeError:
pass
else:
expectedFails, unexpectedSuccesses, skipped = results
infos = []
if not result.wasSuccessful():
self.stream.write("FAILED")
failed, errored = len(result.failures), len(result.errors)
if failed:
infos.append("failures=%d" % failed)
if errored:
infos.append("errors=%d" % errored)
else:
self.stream.write("OK")
if skipped:
infos.append("skipped=%d" % skipped)
if expectedFails:
infos.append("expected failures=%d" % expectedFails)
if unexpectedSuccesses:
infos.append("unexpected successes=%d" % unexpectedSuccesses)
if infos:
self.stream.writeln(" (%s)" % (", ".join(infos),))
else:
self.stream.write("\n")
return result
|
apache-2.0
|
fgallina/gpycomplete
|
gpycomplete/helpers.py
|
1
|
3526
|
# This file is part of gpycomplete.
# gpycomplete is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# gpycomplete is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with gpycomplete. If not, see <http://www.gnu.org/licenses/>.
# gpycomplete is written from scratch by Fabian Ezequiel Gallina
# <fgallina at gnu dot org dot ar> but it is somehow based on the
# original pycomplete package from the http://python-mode.sf.net.
# gpycomplete allows inline completion and help for the python
# programing language within GNU/Emacs
import pydoc
import types
import inspect
import context
def get_signature(obj):
"""Returns the signature of the given object.
Inspired in the original pycomplete package
"""
# FIXME - make this function less ugly
paren = obj.find("(")
if paren != -1:
obj = obj[:paren]
context_dict = 'subprogram_globals'
if not obj in context.get_context():
context_dict = 'helper_globals'
if not context.cimport(obj, context_dict):
return "no signature for " + obj
try:
obj = context.eval_code(obj, context_dict)
except:
return "no signature for " + obj
sig = ""
# This part is extracted from the pycomplete.py file
if type(obj) in (types.ClassType, types.TypeType):
obj = _find_constructor(obj)
elif type(obj) == types.MethodType:
obj = obj.im_func
if type(obj) in [types.FunctionType, types.LambdaType]:
(args, varargs, varkw, defaults) = inspect.getargspec(obj)
sig = ('%s: %s' % (obj.__name__,
inspect.formatargspec(args, varargs, varkw,
defaults)))
doc = getattr(obj, '__doc__', '')
if doc and not sig:
doc = doc.lstrip()
pos = doc.find('\n')
if pos < 0 or pos > 70:
pos = 70
sig = doc[:pos]
return sig
def _find_constructor(class_ob):
# This part is extracted from the pycomplete.py file
# Given a class object, return a function object used for the
# constructor (ie, __init__() ) or None if we can't find one.
try:
return class_ob.__init__.im_func
except AttributeError:
for base in class_ob.__bases__:
rc = _find_constructor(base)
if rc is not None: return rc
return None
def get_help(obj):
"""Returns the help of the given object.
Inspired in the original pycomplete package
"""
paren = obj.rfind("(")
if paren != -1:
obj = obj[:paren]
if obj.endswith("(") or obj.endswith("."):
obj = obj[:-1]
found = False
pobj = None
context_dict = 'subprogram_globals'
if not obj in context.get_context():
context_dict = 'helper_globals'
found = context.cimport(obj, context_dict)
else:
pobj = context.eval_code(obj)
if obj not in context.subcontext_globals and found:
pobj = context.eval_code(obj, context_dict)
if not pobj:
return "no help string for " + obj
obj = context.eval_code(obj)
return pydoc.getdoc(obj)
|
gpl-3.0
|
rooshilp/CMPUT410W15-project
|
testenv/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/schema.py
|
36
|
3619
|
from django.db.backends.schema import BaseDatabaseSchemaEditor
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_create_sequence = "CREATE SEQUENCE %(sequence)s"
sql_delete_sequence = "DROP SEQUENCE IF EXISTS %(sequence)s CASCADE"
sql_set_sequence_max = "SELECT setval('%(sequence)s', MAX(%(column)s)) FROM %(table)s"
sql_create_varchar_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s varchar_pattern_ops)%(extra)s"
sql_create_text_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s text_pattern_ops)%(extra)s"
def quote_value(self, value):
# Inner import so backend fails nicely if it's not present
import psycopg2
return psycopg2.extensions.adapt(value)
def _model_indexes_sql(self, model):
output = super(DatabaseSchemaEditor, self)._model_indexes_sql(model)
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return output
for field in model._meta.local_fields:
db_type = field.db_type(connection=self.connection)
if db_type is not None and (field.db_index or field.unique):
# Fields with database column types of `varchar` and `text` need
# a second index that specifies their operator class, which is
# needed when performing correct LIKE queries outside the
# C locale. See #12234.
if db_type.startswith('varchar'):
output.append(self._create_index_sql(
model, [field], suffix='_like', sql=self.sql_create_varchar_index))
elif db_type.startswith('text'):
output.append(self._create_index_sql(
model, [field], suffix='_like', sql=self.sql_create_text_index))
return output
def _alter_column_type_sql(self, table, column, type):
"""
Makes ALTER TYPE with SERIAL make sense.
"""
if type.lower() == "serial":
sequence_name = "%s_%s_seq" % (table, column)
return (
(
self.sql_alter_column_type % {
"column": self.quote_name(column),
"type": "integer",
},
[],
),
[
(
self.sql_delete_sequence % {
"sequence": sequence_name,
},
[],
),
(
self.sql_create_sequence % {
"sequence": sequence_name,
},
[],
),
(
self.sql_alter_column % {
"table": table,
"changes": self.sql_alter_column_default % {
"column": column,
"default": "nextval('%s')" % sequence_name,
}
},
[],
),
(
self.sql_set_sequence_max % {
"table": table,
"column": column,
"sequence": sequence_name,
},
[],
),
],
)
else:
return super(DatabaseSchemaEditor, self)._alter_column_type_sql(table, column, type)
|
gpl-2.0
|
drbild/boto
|
tests/unit/glacier/test_job.py
|
114
|
3711
|
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.compat import StringIO
from tests.compat import mock, unittest
from boto.glacier.job import Job
from boto.glacier.layer1 import Layer1
from boto.glacier.response import GlacierResponse
from boto.glacier.exceptions import TreeHashDoesNotMatchError
class TestJob(unittest.TestCase):
def setUp(self):
self.api = mock.Mock(spec=Layer1)
self.vault = mock.Mock()
self.vault.layer1 = self.api
self.job = Job(self.vault)
def test_get_job_validate_checksum_success(self):
response = GlacierResponse(mock.Mock(), None)
response['TreeHash'] = 'tree_hash'
self.api.get_job_output.return_value = response
with mock.patch('boto.glacier.job.tree_hash_from_str') as t:
t.return_value = 'tree_hash'
self.job.get_output(byte_range=(1, 1024), validate_checksum=True)
def test_get_job_validation_fails(self):
response = GlacierResponse(mock.Mock(), None)
response['TreeHash'] = 'tree_hash'
self.api.get_job_output.return_value = response
with mock.patch('boto.glacier.job.tree_hash_from_str') as t:
t.return_value = 'BAD_TREE_HASH_VALUE'
with self.assertRaises(TreeHashDoesNotMatchError):
# With validate_checksum set to True, this call fails.
self.job.get_output(byte_range=(1, 1024), validate_checksum=True)
# With validate_checksum set to False, this call succeeds.
self.job.get_output(byte_range=(1, 1024), validate_checksum=False)
def test_download_to_fileobj(self):
http_response = mock.Mock(read=mock.Mock(return_value='xyz'))
response = GlacierResponse(http_response, None)
response['TreeHash'] = 'tree_hash'
self.api.get_job_output.return_value = response
fileobj = StringIO()
self.job.archive_size = 3
with mock.patch('boto.glacier.job.tree_hash_from_str') as t:
t.return_value = 'tree_hash'
self.job.download_to_fileobj(fileobj)
fileobj.seek(0)
self.assertEqual(http_response.read.return_value, fileobj.read())
def test_calc_num_chunks(self):
self.job.archive_size = 0
self.assertEqual(self.job._calc_num_chunks(self.job.DefaultPartSize), 0)
self.job.archive_size = 1
self.assertEqual(self.job._calc_num_chunks(self.job.DefaultPartSize), 1)
self.job.archive_size = self.job.DefaultPartSize + 1
self.assertEqual(self.job._calc_num_chunks(self.job.DefaultPartSize), 2)
if __name__ == '__main__':
unittest.main()
|
mit
|
DimStar77/osc
|
tests/test_repairwc.py
|
12
|
12468
|
import osc.core
import osc.oscerr
import os
import sys
from common import GET, PUT, POST, DELETE, OscTestCase
from xml.etree import cElementTree as ET
FIXTURES_DIR = os.path.join(os.getcwd(), 'repairwc_fixtures')
def suite():
import unittest
return unittest.makeSuite(TestRepairWC)
class TestRepairWC(OscTestCase):
def _get_fixtures_dir(self):
return FIXTURES_DIR
def __assertNotRaises(self, exception, meth, *args, **kwargs):
try:
meth(*args, **kwargs)
except exception:
self.fail('%s raised' % exception.__name__)
def test_working_empty(self):
"""consistent, empty working copy"""
self._change_to_pkg('working_empty')
self.__assertNotRaises(osc.oscerr.WorkingCopyInconsistent, osc.core.Package, '.')
def test_working_nonempty(self):
"""
consistent, non-empty working copy. One file is in conflict,
one file is marked for deletion and one file has state 'A'
"""
self._change_to_pkg('working_nonempty')
self.__assertNotRaises(osc.oscerr.WorkingCopyInconsistent, osc.core.Package, '.')
def test_buildfiles(self):
"""
wc has a _buildconfig_prj_arch and a _buildinfo_prj_arch.xml in the storedir
"""
self._change_to_pkg('buildfiles')
self.__assertNotRaises(osc.oscerr.WorkingCopyInconsistent, osc.core.Package, '.')
@GET('http://localhost/source/osctest/simple1/foo?rev=1', text='This is a simple test.\n')
def test_simple1(self):
"""a file is marked for deletion but storefile doesn't exist"""
self._change_to_pkg('simple1')
self.assertRaises(osc.oscerr.WorkingCopyInconsistent, osc.core.Package, '.')
p = osc.core.Package('.', wc_check=False)
p.wc_repair()
self.assertTrue(os.path.exists(os.path.join('.osc', 'foo')))
self._check_deletelist('foo\n')
self._check_status(p, 'foo', 'D')
self._check_status(p, 'nochange', 'M')
self._check_status(p, 'merge', ' ')
self._check_status(p, 'toadd1', '?')
# additional cleanup check
self.__assertNotRaises(osc.oscerr.WorkingCopyInconsistent, osc.core.Package, '.')
def test_simple2(self):
"""a file "somefile" exists in the storedir which isn't tracked"""
self._change_to_pkg('simple2')
self.assertRaises(osc.oscerr.WorkingCopyInconsistent, osc.core.Package, '.')
p = osc.core.Package('.', wc_check=False)
p.wc_repair()
self.assertFalse(os.path.exists(os.path.join('.osc', 'somefile')))
self._check_deletelist('foo\n')
self._check_status(p, 'foo', 'D')
self._check_status(p, 'nochange', 'M')
self._check_status(p, 'merge', ' ')
self._check_status(p, 'toadd1', '?')
# additional cleanup check
self.__assertNotRaises(osc.oscerr.WorkingCopyInconsistent, osc.core.Package, '.')
def test_simple3(self):
"""toadd1 has state 'A' and a file .osc/toadd1 exists"""
self._change_to_pkg('simple3')
self.assertRaises(osc.oscerr.WorkingCopyInconsistent, osc.core.Package, '.')
p = osc.core.Package('.', wc_check=False)
p.wc_repair()
self.assertFalse(os.path.exists(os.path.join('.osc', 'toadd1')))
self._check_deletelist('foo\n')
self._check_status(p, 'foo', 'D')
self._check_status(p, 'nochange', 'M')
self._check_status(p, 'merge', ' ')
self._check_addlist('toadd1\n')
self._check_status(p, 'toadd1', 'A')
# additional cleanup check
self.__assertNotRaises(osc.oscerr.WorkingCopyInconsistent, osc.core.Package, '.')
def test_simple4(self):
"""a file is listed in _to_be_deleted but isn't present in _files"""
self._change_to_pkg('simple4')
self.assertRaises(osc.oscerr.WorkingCopyInconsistent, osc.core.Package, '.')
p = osc.core.Package('.', wc_check=False)
p.wc_repair()
self._check_deletelist('foo\n')
self._check_status(p, 'foo', 'D')
self._check_status(p, 'nochange', 'M')
self._check_status(p, 'merge', ' ')
self._check_status(p, 'toadd1', '?')
# additional cleanup check
self.__assertNotRaises(osc.oscerr.WorkingCopyInconsistent, osc.core.Package, '.')
def test_simple5(self):
"""a file is listed in _in_conflict but isn't present in _files"""
self._change_to_pkg('simple5')
self.assertRaises(osc.oscerr.WorkingCopyInconsistent, osc.core.Package, '.')
p = osc.core.Package('.', wc_check=False)
p.wc_repair()
self.assertFalse(os.path.exists(os.path.join('.osc', '_in_conflict')))
self._check_deletelist('foo\n')
self._check_status(p, 'foo', 'D')
self._check_status(p, 'nochange', 'M')
self._check_status(p, 'merge', ' ')
self._check_status(p, 'toadd1', '?')
# additional cleanup check
self.__assertNotRaises(osc.oscerr.WorkingCopyInconsistent, osc.core.Package, '.')
@GET('http://localhost/source/osctest/simple6/foo?rev=1', text='This is a simple test.\n')
def test_simple6(self):
"""
a file is listed in _to_be_deleted and is present
in _files but the storefile is missing
"""
self._change_to_pkg('simple6')
self.assertRaises(osc.oscerr.WorkingCopyInconsistent, osc.core.Package, '.')
p = osc.core.Package('.', wc_check=False)
p.wc_repair()
self.assertTrue(os.path.exists(os.path.join('.osc', 'foo')))
self._check_deletelist('foo\n')
self._check_status(p, 'foo', 'D')
self._check_status(p, 'nochange', 'M')
self._check_status(p, 'merge', ' ')
self._check_status(p, 'toadd1', '?')
# additional cleanup check
self.__assertNotRaises(osc.oscerr.WorkingCopyInconsistent, osc.core.Package, '.')
def test_simple7(self):
"""files marked as skipped don't exist in the storedir"""
self._change_to_pkg('simple7')
self.__assertNotRaises(osc.oscerr.WorkingCopyInconsistent, osc.core.Package, '.')
def test_simple8(self):
"""
a file is marked as skipped but the skipped file exists in the storedir
"""
self._change_to_pkg('simple8')
self.assertRaises(osc.oscerr.WorkingCopyInconsistent, osc.core.Package, '.')
p = osc.core.Package('.', wc_check=False)
p.wc_repair()
self.assertFalse(os.path.exists(os.path.join('.osc', 'skipped')))
self._check_deletelist('foo\n')
self._check_status(p, 'foo', 'D')
self._check_status(p, 'nochange', 'M')
self._check_status(p, 'merge', ' ')
self._check_status(p, 'toadd1', '?')
self._check_status(p, 'skipped', 'S')
# additional cleanup check
self.__assertNotRaises(osc.oscerr.WorkingCopyInconsistent, osc.core.Package, '.')
@GET('http://localhost/source/osctest/multiple/merge?rev=1', text='Is it\npossible to\nmerge this file?I hope so...\n')
@GET('http://localhost/source/osctest/multiple/nochange?rev=1', text='This file didn\'t change.\n')
def test_multiple(self):
"""
a storefile is missing, a file is listed in _to_be_deleted
but is not present in _files, a file is listed in _in_conflict
but the storefile is missing and a file exists in the storedir
but is not present in _files
"""
self._change_to_pkg('multiple')
self.assertRaises(osc.oscerr.WorkingCopyInconsistent, osc.core.Package, '.')
p = osc.core.Package('.', wc_check=False)
p.wc_repair()
self.assertTrue(os.path.exists(os.path.join('.osc', 'foo')))
self.assertFalse(os.path.exists(os.path.join('.osc', 'unknown_file')))
self._check_deletelist('foo\n')
self._check_status(p, 'foo', 'D')
self._check_status(p, 'nochange', 'C')
self._check_status(p, 'merge', ' ')
self._check_status(p, 'foobar', 'A')
self._check_status(p, 'toadd1', '?')
# additional cleanup check
self.__assertNotRaises(osc.oscerr.WorkingCopyInconsistent, osc.core.Package, '.')
def test_noapiurl(self):
"""the package wc has no _apiurl file"""
self._change_to_pkg('noapiurl')
p = osc.core.Package('.', wc_check=False)
p.wc_repair('http://localhost')
self.assertTrue(os.path.exists(os.path.join('.osc', '_apiurl')))
self.assertEqual(open(os.path.join('.osc', '_apiurl')).read(), 'http://localhost\n')
self.assertEqual(p.apiurl, 'http://localhost')
def test_invalidapiurl(self):
"""the package wc has an invalid apiurl file (invalid url format)"""
self._change_to_pkg('invalid_apiurl')
p = osc.core.Package('.', wc_check=False)
p.wc_repair('http://localhost')
self.assertTrue(os.path.exists(os.path.join('.osc', '_apiurl')))
self.assertEqual(open(os.path.join('.osc', '_apiurl')).read(), 'http://localhost\n')
self.assertEqual(p.apiurl, 'http://localhost')
def test_invalidapiurl_param(self):
"""pass an invalid apiurl to wc_repair"""
try:
from urllib.error import URLError
except ImportError:
from urllib2 import URLError
self._change_to_pkg('invalid_apiurl')
p = osc.core.Package('.', wc_check=False)
self.assertRaises(URLError, p.wc_repair, 'http:/localhost')
self.assertRaises(URLError, p.wc_repair, 'invalid')
def test_noapiurlNotExistingApiurl(self):
"""the package wc has no _apiurl file and no apiurl is passed to repairwc"""
self._change_to_pkg('noapiurl')
self.assertRaises(osc.oscerr.WorkingCopyInconsistent, osc.core.Package, '.')
p = osc.core.Package('.', wc_check=False)
self.assertRaises(osc.oscerr.WorkingCopyInconsistent, p.wc_repair)
self.assertFalse(os.path.exists(os.path.join('.osc', '_apiurl')))
def test_project_noapiurl(self):
"""the project wc has no _apiurl file"""
import shutil
prj_dir = os.path.join(self.tmpdir, 'prj_noapiurl')
shutil.copytree(os.path.join(self._get_fixtures_dir(), 'prj_noapiurl'), prj_dir)
storedir = os.path.join(prj_dir, osc.core.store)
self.assertRaises(osc.oscerr.WorkingCopyInconsistent, osc.core.Project, prj_dir, getPackageList=False)
prj = osc.core.Project(prj_dir, wc_check=False, getPackageList=False)
prj.wc_repair('http://localhost')
self.assertTrue(os.path.exists(os.path.join(storedir, '_apiurl')))
self.assertTrue(os.path.exists(os.path.join(storedir, '_apiurl')))
self.assertEqual(open(os.path.join(storedir, '_apiurl'), 'r').read(), 'http://localhost\n')
def test_project_invalidapiurl(self):
"""the project wc has an invalid _apiurl file (invalid url format)"""
import shutil
prj_dir = os.path.join(self.tmpdir, 'prj_invalidapiurl')
shutil.copytree(os.path.join(self._get_fixtures_dir(), 'prj_invalidapiurl'), prj_dir)
storedir = os.path.join(prj_dir, osc.core.store)
self.assertRaises(osc.oscerr.WorkingCopyInconsistent, osc.core.Project, prj_dir, getPackageList=False)
prj = osc.core.Project(prj_dir, wc_check=False, getPackageList=False)
prj.wc_repair('http://localhost')
self.assertTrue(os.path.exists(os.path.join(storedir, '_apiurl')))
self.assertTrue(os.path.exists(os.path.join(storedir, '_apiurl')))
self.assertEqual(open(os.path.join(storedir, '_apiurl'), 'r').read(), 'http://localhost\n')
def test_project_invalidapiurl_param(self):
"""pass an invalid apiurl to wc_repair"""
import shutil
try:
from urllib.error import URLError
except ImportError:
from urllib2 import URLError
prj_dir = os.path.join(self.tmpdir, 'prj_invalidapiurl')
shutil.copytree(os.path.join(self._get_fixtures_dir(), 'prj_invalidapiurl'), prj_dir)
storedir = os.path.join(prj_dir, osc.core.store)
self.assertRaises(osc.oscerr.WorkingCopyInconsistent, osc.core.Project, prj_dir, getPackageList=False)
prj = osc.core.Project(prj_dir, wc_check=False, getPackageList=False)
self.assertRaises(URLError, prj.wc_repair, 'http:/localhost')
self.assertRaises(URLError, prj.wc_repair, 'invalid')
if __name__ == '__main__':
import unittest
unittest.main()
|
gpl-2.0
|
albertrdixon/CouchPotatoServer
|
libs/enzyme/mpeg.py
|
180
|
30553
|
# -*- coding: utf-8 -*-
# enzyme - Video metadata parser
# Copyright 2011-2012 Antoine Bertin <[email protected]>
# Copyright 2003-2006 Thomas Schueppel <[email protected]>
# Copyright 2003-2006 Dirk Meyer <[email protected]>
#
# This file is part of enzyme.
#
# enzyme is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# enzyme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with enzyme. If not, see <http://www.gnu.org/licenses/>.
__all__ = ['Parser']
import os
import struct
import logging
import stat
from exceptions import ParseError
import core
# get logging object
log = logging.getLogger(__name__)
##------------------------------------------------------------------------
## START_CODE
##
## Start Codes, with 'slice' occupying 0x01..0xAF
##------------------------------------------------------------------------
START_CODE = {
0x00 : 'picture_start_code',
0xB0 : 'reserved',
0xB1 : 'reserved',
0xB2 : 'user_data_start_code',
0xB3 : 'sequence_header_code',
0xB4 : 'sequence_error_code',
0xB5 : 'extension_start_code',
0xB6 : 'reserved',
0xB7 : 'sequence end',
0xB8 : 'group of pictures',
}
for i in range(0x01, 0xAF):
START_CODE[i] = 'slice_start_code'
##------------------------------------------------------------------------
## START CODES
##------------------------------------------------------------------------
PICTURE = 0x00
USERDATA = 0xB2
SEQ_HEAD = 0xB3
SEQ_ERR = 0xB4
EXT_START = 0xB5
SEQ_END = 0xB7
GOP = 0xB8
SEQ_START_CODE = 0xB3
PACK_PKT = 0xBA
SYS_PKT = 0xBB
PADDING_PKT = 0xBE
AUDIO_PKT = 0xC0
VIDEO_PKT = 0xE0
PRIVATE_STREAM1 = 0xBD
PRIVATE_STREAM2 = 0xBf
TS_PACKET_LENGTH = 188
TS_SYNC = 0x47
##------------------------------------------------------------------------
## FRAME_RATE
##
## A lookup table of all the standard frame rates. Some rates adhere to
## a particular profile that ensures compatibility with VLSI capabilities
## of the early to mid 1990s.
##
## CPB
## Constrained Parameters Bitstreams, an MPEG-1 set of sampling and
## bitstream parameters designed to normalize decoder computational
## complexity, buffer size, and memory bandwidth while still addressing
## the widest possible range of applications.
##
## Main Level
## MPEG-2 Video Main Profile and Main Level is analogous to MPEG-1's
## CPB, with sampling limits at CCIR 601 parameters (720x480x30 Hz or
## 720x576x24 Hz).
##
##------------------------------------------------------------------------
FRAME_RATE = [
0,
24000.0 / 1001, ## 3-2 pulldown NTSC (CPB/Main Level)
24, ## Film (CPB/Main Level)
25, ## PAL/SECAM or 625/60 video
30000.0 / 1001, ## NTSC (CPB/Main Level)
30, ## drop-frame NTSC or component 525/60 (CPB/Main Level)
50, ## double-rate PAL
60000.0 / 1001, ## double-rate NTSC
60, ## double-rate, drop-frame NTSC/component 525/60 video
]
##------------------------------------------------------------------------
## ASPECT_RATIO -- INCOMPLETE?
##
## This lookup table maps the header aspect ratio index to a float value.
## These are just the defined ratios for CPB I believe. As I understand
## it, a stream that doesn't adhere to one of these aspect ratios is
## technically considered non-compliant.
##------------------------------------------------------------------------
ASPECT_RATIO = (None, # Forbidden
1.0, # 1/1 (VGA)
4.0 / 3, # 4/3 (TV)
16.0 / 9, # 16/9 (Widescreen)
2.21 # (Cinema)
)
class MPEG(core.AVContainer):
"""
Parser for various MPEG files. This includes MPEG-1 and MPEG-2
program streams, elementary streams and transport streams. The
reported length differs from the length reported by most video
players but the provides length here is correct. An MPEG file has
no additional metadata like title, etc; only codecs, length and
resolution is reported back.
"""
def __init__(self, file):
core.AVContainer.__init__(self)
self.sequence_header_offset = 0
self.mpeg_version = 2
# detect TS (fast scan)
if not self.isTS(file):
# detect system mpeg (many infos)
if not self.isMPEG(file):
# detect PES
if not self.isPES(file):
# Maybe it's MPEG-ES
if self.isES(file):
# If isES() succeeds, we needn't do anything further.
return
if file.name.lower().endswith('mpeg') or \
file.name.lower().endswith('mpg'):
# This has to be an mpeg file. It could be a bad
# recording from an ivtv based hardware encoder with
# same bytes missing at the beginning.
# Do some more digging...
if not self.isMPEG(file, force=True) or \
not self.video or not self.audio:
# does not look like an mpeg at all
raise ParseError()
else:
# no mpeg at all
raise ParseError()
self.mime = 'video/mpeg'
if not self.video:
self.video.append(core.VideoStream())
if self.sequence_header_offset <= 0:
return
self.progressive(file)
for vi in self.video:
vi.width, vi.height = self.dxy(file)
vi.fps, vi.aspect = self.framerate_aspect(file)
vi.bitrate = self.bitrate(file)
if self.length:
vi.length = self.length
if not self.type:
self.type = 'MPEG Video'
# set fourcc codec for video and audio
vc, ac = 'MP2V', 'MP2A'
if self.mpeg_version == 1:
vc, ac = 'MPEG', 0x0050
for v in self.video:
v.codec = vc
for a in self.audio:
if not a.codec:
a.codec = ac
def dxy(self, file):
"""
get width and height of the video
"""
file.seek(self.sequence_header_offset + 4, 0)
v = file.read(4)
x = struct.unpack('>H', v[:2])[0] >> 4
y = struct.unpack('>H', v[1:3])[0] & 0x0FFF
return (x, y)
def framerate_aspect(self, file):
"""
read framerate and aspect ratio
"""
file.seek(self.sequence_header_offset + 7, 0)
v = struct.unpack('>B', file.read(1))[0]
try:
fps = FRAME_RATE[v & 0xf]
except IndexError:
fps = None
if v >> 4 < len(ASPECT_RATIO):
aspect = ASPECT_RATIO[v >> 4]
else:
aspect = None
return (fps, aspect)
def progressive(self, file):
"""
Try to find out with brute force if the mpeg is interlaced or not.
Search for the Sequence_Extension in the extension header (01B5)
"""
file.seek(0)
buffer = ''
count = 0
while 1:
if len(buffer) < 1000:
count += 1
if count > 1000:
break
buffer += file.read(1024)
if len(buffer) < 1000:
break
pos = buffer.find('\x00\x00\x01\xb5')
if pos == -1 or len(buffer) - pos < 5:
buffer = buffer[-10:]
continue
ext = (ord(buffer[pos + 4]) >> 4)
if ext == 8:
pass
elif ext == 1:
if (ord(buffer[pos + 5]) >> 3) & 1:
self._set('progressive', True)
else:
self._set('interlaced', True)
return True
else:
log.debug(u'ext: %r' % ext)
buffer = buffer[pos + 4:]
return False
##------------------------------------------------------------------------
## bitrate()
##
## From the MPEG-2.2 spec:
##
## bit_rate -- This is a 30-bit integer. The lower 18 bits of the
## integer are in bit_rate_value and the upper 12 bits are in
## bit_rate_extension. The 30-bit integer specifies the bitrate of the
## bitstream measured in units of 400 bits/second, rounded upwards.
## The value zero is forbidden.
##
## So ignoring all the variable bitrate stuff for now, this 30 bit integer
## multiplied times 400 bits/sec should give the rate in bits/sec.
##
## TODO: Variable bitrates? I need one that implements this.
##
## Continued from the MPEG-2.2 spec:
##
## If the bitstream is a constant bitrate stream, the bitrate specified
## is the actual rate of operation of the VBV specified in annex C. If
## the bitstream is a variable bitrate stream, the STD specifications in
## ISO/IEC 13818-1 supersede the VBV, and the bitrate specified here is
## used to dimension the transport stream STD (2.4.2 in ITU-T Rec. xxx |
## ISO/IEC 13818-1), or the program stream STD (2.4.5 in ITU-T Rec. xxx |
## ISO/IEC 13818-1).
##
## If the bitstream is not a constant rate bitstream the vbv_delay
## field shall have the value FFFF in hexadecimal.
##
## Given the value encoded in the bitrate field, the bitstream shall be
## generated so that the video encoding and the worst case multiplex
## jitter do not cause STD buffer overflow or underflow.
##
##
##------------------------------------------------------------------------
## Some parts in the code are based on mpgtx (mpgtx.sf.net)
def bitrate(self, file):
"""
read the bitrate (most of the time broken)
"""
file.seek(self.sequence_header_offset + 8, 0)
t, b = struct.unpack('>HB', file.read(3))
vrate = t << 2 | b >> 6
return vrate * 400
def ReadSCRMpeg2(self, buffer):
"""
read SCR (timestamp) for MPEG2 at the buffer beginning (6 Bytes)
"""
if len(buffer) < 6:
return None
highbit = (ord(buffer[0]) & 0x20) >> 5
low4Bytes = ((long(ord(buffer[0])) & 0x18) >> 3) << 30
low4Bytes |= (ord(buffer[0]) & 0x03) << 28
low4Bytes |= ord(buffer[1]) << 20
low4Bytes |= (ord(buffer[2]) & 0xF8) << 12
low4Bytes |= (ord(buffer[2]) & 0x03) << 13
low4Bytes |= ord(buffer[3]) << 5
low4Bytes |= (ord(buffer[4])) >> 3
sys_clock_ref = (ord(buffer[4]) & 0x3) << 7
sys_clock_ref |= (ord(buffer[5]) >> 1)
return (long(highbit * (1 << 16) * (1 << 16)) + low4Bytes) / 90000
def ReadSCRMpeg1(self, buffer):
"""
read SCR (timestamp) for MPEG1 at the buffer beginning (5 Bytes)
"""
if len(buffer) < 5:
return None
highbit = (ord(buffer[0]) >> 3) & 0x01
low4Bytes = ((long(ord(buffer[0])) >> 1) & 0x03) << 30
low4Bytes |= ord(buffer[1]) << 22;
low4Bytes |= (ord(buffer[2]) >> 1) << 15;
low4Bytes |= ord(buffer[3]) << 7;
low4Bytes |= ord(buffer[4]) >> 1;
return (long(highbit) * (1 << 16) * (1 << 16) + low4Bytes) / 90000;
def ReadPTS(self, buffer):
"""
read PTS (PES timestamp) at the buffer beginning (5 Bytes)
"""
high = ((ord(buffer[0]) & 0xF) >> 1)
med = (ord(buffer[1]) << 7) + (ord(buffer[2]) >> 1)
low = (ord(buffer[3]) << 7) + (ord(buffer[4]) >> 1)
return ((long(high) << 30) + (med << 15) + low) / 90000
def ReadHeader(self, buffer, offset):
"""
Handle MPEG header in buffer on position offset
Return None on error, new offset or 0 if the new offset can't be scanned
"""
if buffer[offset:offset + 3] != '\x00\x00\x01':
return None
id = ord(buffer[offset + 3])
if id == PADDING_PKT:
return offset + (ord(buffer[offset + 4]) << 8) + \
ord(buffer[offset + 5]) + 6
if id == PACK_PKT:
if ord(buffer[offset + 4]) & 0xF0 == 0x20:
self.type = 'MPEG-1 Video'
self.get_time = self.ReadSCRMpeg1
self.mpeg_version = 1
return offset + 12
elif (ord(buffer[offset + 4]) & 0xC0) == 0x40:
self.type = 'MPEG-2 Video'
self.get_time = self.ReadSCRMpeg2
return offset + (ord(buffer[offset + 13]) & 0x07) + 14
else:
# I have no idea what just happened, but for some DVB
# recordings done with mencoder this points to a
# PACK_PKT describing something odd. Returning 0 here
# (let's hope there are no extensions in the header)
# fixes it.
return 0
if 0xC0 <= id <= 0xDF:
# code for audio stream
for a in self.audio:
if a.id == id:
break
else:
self.audio.append(core.AudioStream())
self.audio[-1]._set('id', id)
return 0
if 0xE0 <= id <= 0xEF:
# code for video stream
for v in self.video:
if v.id == id:
break
else:
self.video.append(core.VideoStream())
self.video[-1]._set('id', id)
return 0
if id == SEQ_HEAD:
# sequence header, remember that position for later use
self.sequence_header_offset = offset
return 0
if id in [PRIVATE_STREAM1, PRIVATE_STREAM2]:
# private stream. we don't know, but maybe we can guess later
add = ord(buffer[offset + 8])
# if (ord(buffer[offset+6]) & 4) or 1:
# id = ord(buffer[offset+10+add])
if buffer[offset + 11 + add:offset + 15 + add].find('\x0b\x77') != -1:
# AC3 stream
for a in self.audio:
if a.id == id:
break
else:
self.audio.append(core.AudioStream())
self.audio[-1]._set('id', id)
self.audio[-1].codec = 0x2000 # AC3
return 0
if id == SYS_PKT:
return 0
if id == EXT_START:
return 0
return 0
# Normal MPEG (VCD, SVCD) ========================================
def isMPEG(self, file, force=False):
"""
This MPEG starts with a sequence of 0x00 followed by a PACK Header
http://dvd.sourceforge.net/dvdinfo/packhdr.html
"""
file.seek(0, 0)
buffer = file.read(10000)
offset = 0
# seek until the 0 byte stop
while offset < len(buffer) - 100 and buffer[offset] == '\0':
offset += 1
offset -= 2
# test for mpeg header 0x00 0x00 0x01
header = '\x00\x00\x01%s' % chr(PACK_PKT)
if offset < 0 or not buffer[offset:offset + 4] == header:
if not force:
return 0
# brute force and try to find the pack header in the first
# 10000 bytes somehow
offset = buffer.find(header)
if offset < 0:
return 0
# scan the 100000 bytes of data
buffer += file.read(100000)
# scan first header, to get basic info about
# how to read a timestamp
self.ReadHeader(buffer, offset)
# store first timestamp
self.start = self.get_time(buffer[offset + 4:])
while len(buffer) > offset + 1000 and \
buffer[offset:offset + 3] == '\x00\x00\x01':
# read the mpeg header
new_offset = self.ReadHeader(buffer, offset)
# header scanning detected error, this is no mpeg
if new_offset == None:
return 0
if new_offset:
# we have a new offset
offset = new_offset
# skip padding 0 before a new header
while len(buffer) > offset + 10 and \
not ord(buffer[offset + 2]):
offset += 1
else:
# seek to new header by brute force
offset += buffer[offset + 4:].find('\x00\x00\x01') + 4
# fill in values for support functions:
self.__seek_size__ = 1000000
self.__sample_size__ = 10000
self.__search__ = self._find_timer_
self.filename = file.name
# get length of the file
self.length = self.get_length()
return 1
def _find_timer_(self, buffer):
"""
Return position of timer in buffer or None if not found.
This function is valid for 'normal' mpeg files
"""
pos = buffer.find('\x00\x00\x01%s' % chr(PACK_PKT))
if pos == -1:
return None
return pos + 4
# PES ============================================================
def ReadPESHeader(self, offset, buffer, id=0):
"""
Parse a PES header.
Since it starts with 0x00 0x00 0x01 like 'normal' mpegs, this
function will return (0, None) when it is no PES header or
(packet length, timestamp position (maybe None))
http://dvd.sourceforge.net/dvdinfo/pes-hdr.html
"""
if not buffer[0:3] == '\x00\x00\x01':
return 0, None
packet_length = (ord(buffer[4]) << 8) + ord(buffer[5]) + 6
align = ord(buffer[6]) & 4
header_length = ord(buffer[8])
# PES ID (starting with 001)
if ord(buffer[3]) & 0xE0 == 0xC0:
id = id or ord(buffer[3]) & 0x1F
for a in self.audio:
if a.id == id:
break
else:
self.audio.append(core.AudioStream())
self.audio[-1]._set('id', id)
elif ord(buffer[3]) & 0xF0 == 0xE0:
id = id or ord(buffer[3]) & 0xF
for v in self.video:
if v.id == id:
break
else:
self.video.append(core.VideoStream())
self.video[-1]._set('id', id)
# new mpeg starting
if buffer[header_length + 9:header_length + 13] == \
'\x00\x00\x01\xB3' and not self.sequence_header_offset:
# yes, remember offset for later use
self.sequence_header_offset = offset + header_length + 9
elif ord(buffer[3]) == 189 or ord(buffer[3]) == 191:
# private stream. we don't know, but maybe we can guess later
id = id or ord(buffer[3]) & 0xF
if align and \
buffer[header_length + 9:header_length + 11] == '\x0b\x77':
# AC3 stream
for a in self.audio:
if a.id == id:
break
else:
self.audio.append(core.AudioStream())
self.audio[-1]._set('id', id)
self.audio[-1].codec = 0x2000 # AC3
else:
# unknown content
pass
ptsdts = ord(buffer[7]) >> 6
if ptsdts and ptsdts == ord(buffer[9]) >> 4:
if ord(buffer[9]) >> 4 != ptsdts:
log.warning(u'WARNING: bad PTS/DTS, please contact us')
return packet_length, None
# timestamp = self.ReadPTS(buffer[9:14])
high = ((ord(buffer[9]) & 0xF) >> 1)
med = (ord(buffer[10]) << 7) + (ord(buffer[11]) >> 1)
low = (ord(buffer[12]) << 7) + (ord(buffer[13]) >> 1)
return packet_length, 9
return packet_length, None
def isPES(self, file):
log.info(u'trying mpeg-pes scan')
file.seek(0, 0)
buffer = file.read(3)
# header (also valid for all mpegs)
if not buffer == '\x00\x00\x01':
return 0
self.sequence_header_offset = 0
buffer += file.read(10000)
offset = 0
while offset + 1000 < len(buffer):
pos, timestamp = self.ReadPESHeader(offset, buffer[offset:])
if not pos:
return 0
if timestamp != None and not hasattr(self, 'start'):
self.get_time = self.ReadPTS
bpos = buffer[offset + timestamp:offset + timestamp + 5]
self.start = self.get_time(bpos)
if self.sequence_header_offset and hasattr(self, 'start'):
# we have all informations we need
break
offset += pos
if offset + 1000 < len(buffer) and len(buffer) < 1000000 or 1:
# looks like a pes, read more
buffer += file.read(10000)
if not self.video and not self.audio:
# no video and no audio?
return 0
self.type = 'MPEG-PES'
# fill in values for support functions:
self.__seek_size__ = 10000000 # 10 MB
self.__sample_size__ = 500000 # 500 k scanning
self.__search__ = self._find_timer_PES_
self.filename = file.name
# get length of the file
self.length = self.get_length()
return 1
def _find_timer_PES_(self, buffer):
"""
Return position of timer in buffer or -1 if not found.
This function is valid for PES files
"""
pos = buffer.find('\x00\x00\x01')
offset = 0
if pos == -1 or offset + 1000 >= len(buffer):
return None
retpos = -1
ackcount = 0
while offset + 1000 < len(buffer):
pos, timestamp = self.ReadPESHeader(offset, buffer[offset:])
if timestamp != None and retpos == -1:
retpos = offset + timestamp
if pos == 0:
# Oops, that was a mpeg header, no PES header
offset += buffer[offset:].find('\x00\x00\x01')
retpos = -1
ackcount = 0
else:
offset += pos
if retpos != -1:
ackcount += 1
if ackcount > 10:
# looks ok to me
return retpos
return None
# Elementary Stream ===============================================
def isES(self, file):
file.seek(0, 0)
try:
header = struct.unpack('>LL', file.read(8))
except (struct.error, IOError):
return False
if header[0] != 0x1B3:
return False
# Is an mpeg video elementary stream
self.mime = 'video/mpeg'
video = core.VideoStream()
video.width = header[1] >> 20
video.height = (header[1] >> 8) & 0xfff
if header[1] & 0xf < len(FRAME_RATE):
video.fps = FRAME_RATE[header[1] & 0xf]
if (header[1] >> 4) & 0xf < len(ASPECT_RATIO):
# FIXME: Empirically the aspect looks like PAR rather than DAR
video.aspect = ASPECT_RATIO[(header[1] >> 4) & 0xf]
self.video.append(video)
return True
# Transport Stream ===============================================
def isTS(self, file):
file.seek(0, 0)
buffer = file.read(TS_PACKET_LENGTH * 2)
c = 0
while c + TS_PACKET_LENGTH < len(buffer):
if ord(buffer[c]) == ord(buffer[c + TS_PACKET_LENGTH]) == TS_SYNC:
break
c += 1
else:
return 0
buffer += file.read(10000)
self.type = 'MPEG-TS'
while c + TS_PACKET_LENGTH < len(buffer):
start = ord(buffer[c + 1]) & 0x40
# maybe load more into the buffer
if c + 2 * TS_PACKET_LENGTH > len(buffer) and c < 500000:
buffer += file.read(10000)
# wait until the ts payload contains a payload header
if not start:
c += TS_PACKET_LENGTH
continue
tsid = ((ord(buffer[c + 1]) & 0x3F) << 8) + ord(buffer[c + 2])
adapt = (ord(buffer[c + 3]) & 0x30) >> 4
offset = 4
if adapt & 0x02:
# meta info present, skip it for now
adapt_len = ord(buffer[c + offset])
offset += adapt_len + 1
if not ord(buffer[c + 1]) & 0x40:
# no new pes or psi in stream payload starting
pass
elif adapt & 0x01:
# PES
timestamp = self.ReadPESHeader(c + offset, buffer[c + offset:],
tsid)[1]
if timestamp != None:
if not hasattr(self, 'start'):
self.get_time = self.ReadPTS
timestamp = c + offset + timestamp
self.start = self.get_time(buffer[timestamp:timestamp + 5])
elif not hasattr(self, 'audio_ok'):
timestamp = c + offset + timestamp
start = self.get_time(buffer[timestamp:timestamp + 5])
if start is not None and self.start is not None and \
abs(start - self.start) < 10:
# looks ok
self.audio_ok = True
else:
# timestamp broken
del self.start
log.warning(u'Timestamp error, correcting')
if hasattr(self, 'start') and self.start and \
self.sequence_header_offset and self.video and self.audio:
break
c += TS_PACKET_LENGTH
if not self.sequence_header_offset:
return 0
# fill in values for support functions:
self.__seek_size__ = 10000000 # 10 MB
self.__sample_size__ = 100000 # 100 k scanning
self.__search__ = self._find_timer_TS_
self.filename = file.name
# get length of the file
self.length = self.get_length()
return 1
def _find_timer_TS_(self, buffer):
c = 0
while c + TS_PACKET_LENGTH < len(buffer):
if ord(buffer[c]) == ord(buffer[c + TS_PACKET_LENGTH]) == TS_SYNC:
break
c += 1
else:
return None
while c + TS_PACKET_LENGTH < len(buffer):
start = ord(buffer[c + 1]) & 0x40
if not start:
c += TS_PACKET_LENGTH
continue
tsid = ((ord(buffer[c + 1]) & 0x3F) << 8) + ord(buffer[c + 2])
adapt = (ord(buffer[c + 3]) & 0x30) >> 4
offset = 4
if adapt & 0x02:
# meta info present, skip it for now
offset += ord(buffer[c + offset]) + 1
if adapt & 0x01:
timestamp = self.ReadPESHeader(c + offset, buffer[c + offset:], tsid)[1]
if timestamp is None:
# this should not happen
log.error(u'bad TS')
return None
return c + offset + timestamp
c += TS_PACKET_LENGTH
return None
# Support functions ==============================================
def get_endpos(self):
"""
get the last timestamp of the mpeg, return -1 if this is not possible
"""
if not hasattr(self, 'filename') or not hasattr(self, 'start'):
return None
length = os.stat(self.filename)[stat.ST_SIZE]
if length < self.__sample_size__:
return
file = open(self.filename)
file.seek(length - self.__sample_size__)
buffer = file.read(self.__sample_size__)
end = None
while 1:
pos = self.__search__(buffer)
if pos == None:
break
end = self.get_time(buffer[pos:]) or end
buffer = buffer[pos + 100:]
file.close()
return end
def get_length(self):
"""
get the length in seconds, return -1 if this is not possible
"""
end = self.get_endpos()
if end == None or self.start == None:
return None
if self.start > end:
return int(((long(1) << 33) - 1) / 90000) - self.start + end
return end - self.start
def seek(self, end_time):
"""
Return the byte position in the file where the time position
is 'pos' seconds. Return 0 if this is not possible
"""
if not hasattr(self, 'filename') or not hasattr(self, 'start'):
return 0
file = open(self.filename)
seek_to = 0
while 1:
file.seek(self.__seek_size__, 1)
buffer = file.read(self.__sample_size__)
if len(buffer) < 10000:
break
pos = self.__search__(buffer)
if pos != None:
# found something
nt = self.get_time(buffer[pos:])
if nt is not None and nt >= end_time:
# too much, break
break
# that wasn't enough
seek_to = file.tell()
file.close()
return seek_to
def __scan__(self):
"""
scan file for timestamps (may take a long time)
"""
if not hasattr(self, 'filename') or not hasattr(self, 'start'):
return 0
file = open(self.filename)
log.debug(u'scanning file...')
while 1:
file.seek(self.__seek_size__ * 10, 1)
buffer = file.read(self.__sample_size__)
if len(buffer) < 10000:
break
pos = self.__search__(buffer)
if pos == None:
continue
log.debug(u'buffer position: %r' % self.get_time(buffer[pos:]))
file.close()
log.debug(u'done scanning file')
Parser = MPEG
|
gpl-3.0
|
trotterdylan/grumpy
|
lib/random_test.py
|
7
|
2609
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import _random
import random
import weetest
def TestGrumpyRandom():
assert len(_random._gorandom(5)) == 5
assert _random._int_bit_length(0) == 0
assert _random._int_bit_length(1) == 1
assert _random._int_bit_length(8) == 4
assert _random._int_bit_length(256) == 9
assert _random._int_from_bytes([1, 0, 0, 0]) == 1
assert _random._int_from_bytes([0, 0, 0, 0]) == 0
assert _random._int_from_bytes([255, 255, 0, 0]) == 65535
assert _random._int_from_bytes([0, 0, 0, 1]) == 16777216
r = _random.GrumpyRandom()
assert 0.0 <= r.random() < 1.0
assert 0 <= r.getrandbits(1) <= 1
assert 0 <= r.getrandbits(2) <= 3
assert 0 <= r.getrandbits(8) <= 255
assert 0 <= r._randbelow(1) < 1
assert 0 <= r._randbelow(3) < 3
assert 0 <= r._randbelow(1000) < 1000
def TestSeed():
random.seed()
try:
random.seed("wrongtype")
except TypeError:
pass
else:
raise AssertionError("TypeError not raised")
def TestRandom():
a = random.random()
b = random.random()
c = random.random()
assert isinstance(a, float)
assert 0.0 <= a < 1.0
assert not a == b == c
def TestRandomUniform():
for _ in range(10):
a = random.uniform(0, 1000)
assert isinstance(a, float)
assert 0 <= a <= 1000
def TestRandomInt():
for _ in range(10):
a = random.randint(0, 1000000)
assert isinstance(a, int)
assert 0 <= a <= 1000000
b = random.randint(1, 1)
assert b == 1
try:
c = random.randint(0.1, 3)
except ValueError:
pass
else:
raise AssertionError("ValueError not raised")
try:
d = random.randint(4, 3)
except ValueError:
pass
else:
raise AssertionError("ValueError not raised")
def TestRandomChoice():
seq = [i*2 for i in range(5)]
for i in range(10):
item = random.choice(seq)
item_idx = item/2
assert seq[item_idx] == item
try:
random.choice([])
except IndexError:
pass
else:
raise AssertionError("IndexError not raised")
if __name__ == '__main__':
weetest.RunTests()
|
apache-2.0
|
yourcelf/btb
|
scanblog/btb/tests/live/mod_upload_tests.py
|
2
|
2033
|
import os
import time
from .base import BtbLiveServerTestCase, NoSuchElementException
from django.conf import settings
from scanning.models import Scan
from django.contrib.auth.models import User
class TestModUploads(BtbLiveServerTestCase):
def setUp(self):
super(TestModUploads, self).setUp()
self.applied_tasks = []
def tearDown(self):
super(TestModUploads, self).tearDown()
for s in Scan.objects.all():
s.full_delete()
def upload_file(self, path, number_of_scans):
assert os.path.exists(path)
assert Scan.objects.count() == 0
b = self.selenium
b.get(self.url("/"))
b.find_element_by_link_text("Upload").click()
self.assertEquals(self.css("h1").text, "Upload Scans")
el = self.css("#id_file")
el.send_keys(path)
self.css(".upload-submit").submit()
self.wait(lambda b: not b.current_url.startswith(self.url("/scanning/add")))
while b.current_url.startswith(self.url("/moderation/wait")):
try:
el = self.css(".error")
assert False, el.text
except NoSuchElementException:
time.sleep(1)
if number_of_scans == 1:
self.wait(lambda b: self.css("h1").text == "Split Scan")
self.assertTrue(b.current_url.startswith(self.url("/moderation/#/process")))
else:
time.sleep(5)
self.assertEquals(b.current_url, self.url("/moderation/"))
self.wait(lambda b: len(self.csss(".open-scans .scan")) == number_of_scans, 60)
for s in Scan.objects.all():
s.full_delete()
def test_upload(self):
self.sign_in("testmod", "testmod")
for filename, count in [
("unixzip.zip", 2),
("maczip.zip", 2),
("ex-req-post-photo.pdf", 1)]:
self.upload_file(
os.path.join(settings.MEDIA_ROOT, "test", "src", filename),
count
)
|
agpl-3.0
|
sameetb-cuelogic/edx-platform-test
|
common/djangoapps/util/tests/test_authentication.py
|
87
|
2512
|
"""Tests for util.authentication module."""
from mock import patch
from django.conf import settings
from rest_framework import permissions
from rest_framework.compat import patterns, url
from rest_framework.tests import test_authentication
from provider import scope, constants
from unittest import skipUnless
from ..authentication import OAuth2AuthenticationAllowInactiveUser
class OAuth2AuthAllowInactiveUserDebug(OAuth2AuthenticationAllowInactiveUser):
"""
A debug class analogous to the OAuth2AuthenticationDebug class that tests
the OAuth2 flow with the access token sent in a query param."""
allow_query_params_token = True
# The following patch overrides the URL patterns for the MockView class used in
# rest_framework.tests.test_authentication so that the corresponding AllowInactiveUser
# classes are tested instead.
@skipUnless(settings.FEATURES.get('ENABLE_OAUTH2_PROVIDER'), 'OAuth2 not enabled')
@patch.object(
test_authentication,
'urlpatterns',
patterns(
'',
url(
r'^oauth2-test/$',
test_authentication.MockView.as_view(authentication_classes=[OAuth2AuthenticationAllowInactiveUser])
),
url(
r'^oauth2-test-debug/$',
test_authentication.MockView.as_view(authentication_classes=[OAuth2AuthAllowInactiveUserDebug])
),
url(
r'^oauth2-with-scope-test/$',
test_authentication.MockView.as_view(
authentication_classes=[OAuth2AuthenticationAllowInactiveUser],
permission_classes=[permissions.TokenHasReadWriteScope]
)
)
)
)
class OAuth2AuthenticationAllowInactiveUserTestCase(test_authentication.OAuth2Tests):
"""
Tests the OAuth2AuthenticationAllowInactiveUser class by running all the existing tests in
OAuth2Tests but with the is_active flag on the user set to False.
"""
def setUp(self):
super(OAuth2AuthenticationAllowInactiveUserTestCase, self).setUp()
# set the user's is_active flag to False.
self.user.is_active = False
self.user.save()
# Override the SCOPE_NAME_DICT setting for tests for oauth2-with-scope-test. This is
# needed to support READ and WRITE scopes as they currently aren't supported by the
# edx-auth2-provider, and their scope values collide with other scopes defined in the
# edx-auth2-provider.
scope.SCOPE_NAME_DICT = {'read': constants.READ, 'write': constants.WRITE}
|
agpl-3.0
|
octocoin-project/octocoin
|
qa/rpc-tests/rest.py
|
2
|
3259
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Octocoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test REST interface
#
from test_framework import BitcoinTestFramework
from util import *
import json
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
def http_get_call(host, port, path, response_object = 0):
conn = httplib.HTTPConnection(host, port)
conn.request('GET', path)
if response_object:
return conn.getresponse()
return conn.getresponse().read()
class RESTTest (BitcoinTestFramework):
FORMAT_SEPARATOR = "."
def run_test(self):
url = urlparse.urlparse(self.nodes[0].url)
bb_hash = self.nodes[0].getbestblockhash()
# check binary format
response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check json format
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(json_obj['hash'], bb_hash)
# do tx test
tx_hash = json_obj['tx'][0]['txid'];
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
assert_equal(json_obj['txid'], tx_hash)
# check hex format response
hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check block tx details
# let's make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
self.sync_all()
# now mine the transactions
newblockhash = self.nodes[1].setgenerate(True, 1)
self.sync_all()
#check if the 3 tx show up in the new block
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in json_obj['tx']:
if not 'coinbase' in tx['vin'][0]: #exclude coinbase
assert_equal(tx['txid'] in txs, True)
#check the same but without tx details
json_string = http_get_call(url.hostname, url.port, '/rest/block/notxdetails/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj['tx'], True)
if __name__ == '__main__':
RESTTest ().main ()
|
mit
|
songmonit/CTTMSONLINE_V8
|
addons/account/wizard/account_use_model.py
|
341
|
3361
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_use_model(osv.osv_memory):
_name = 'account.use.model'
_description = 'Use model'
_columns = {
'model': fields.many2many('account.model', 'account_use_model_relation', 'account_id', 'model_id', 'Account Model'),
}
def view_init(self, cr , uid , fields_list, context=None):
account_model_obj = self.pool.get('account.model')
if context is None:
context = {}
if context.get('active_ids',False):
data_model = account_model_obj.browse(cr, uid, context['active_ids'])
for model in data_model:
for line in model.lines_id:
if line.date_maturity == 'partner':
if not line.partner_id:
raise osv.except_osv(_('Error!'), _("Maturity date of entry line generated by model line '%s' is based on partner payment term!"\
"\nPlease define partner on it!")%line.name)
pass
def create_entries(self, cr, uid, ids, context=None):
account_model_obj = self.pool.get('account.model')
mod_obj = self.pool.get('ir.model.data')
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
record_id = context and context.get('model_line', False) or False
if record_id:
model_ids = data['model']
else:
model_ids = context['active_ids']
move_ids = account_model_obj.generate(cr, uid, model_ids, context=context)
context = dict(context, move_ids=move_ids)
model_data_ids = mod_obj.search(cr, uid,[('model','=','ir.ui.view'),('name','=','view_move_form')], context=context)
resource_id = mod_obj.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {
'domain': "[('id','in', ["+','.join(map(str,context['move_ids']))+"])]",
'name': 'Entries',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.move',
'views': [(False,'tree'),(resource_id,'form')],
'type': 'ir.actions.act_window',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
xujun10110/golismero
|
tools/sqlmap/thirdparty/chardet/euckrprober.py
|
236
|
1672
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from mbcharsetprober import MultiByteCharSetProber
from codingstatemachine import CodingStateMachine
from chardistribution import EUCKRDistributionAnalysis
from mbcssm import EUCKRSMModel
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCKRSMModel)
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-KR"
|
gpl-2.0
|
mtougeron/python-openstacksdk
|
openstack/tests/unit/metric/v1/test_metric.py
|
3
|
2949
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testtools
from openstack.metric.v1 import metric
EXAMPLE = {
'id': '31bbd62e-b144-11e4-983c-bf9dbe7e25e6',
'archive_policy_name': 'low',
'created_by_user_id': '41bbd62e-b144-11e4-983c-bf9dbe7e25e6',
'created_by_project_id': '51bbd62e-b144-11e4-983c-bf9dbe7e25e6',
'resource_id': None,
'name': None,
}
EXAMPLE_AP = {
'id': '31bbd62e-b144-11e4-983c-bf9dbe7e25e6',
'archive_policy': {
'name': "foobar",
},
'created_by_user_id': '41bbd62e-b144-11e4-983c-bf9dbe7e25e6',
'created_by_project_id': '51bbd62e-b144-11e4-983c-bf9dbe7e25e6',
'resource_id': "61bbd62e-b144-11e4-983c-bf9dbe7e25e6",
'name': "foobaz",
}
class TestMetric(testtools.TestCase):
def setUp(self):
super(TestMetric, self).setUp()
self.resp = mock.Mock()
self.resp.body = ''
self.sess = mock.Mock()
self.sess.put = mock.MagicMock()
self.sess.put.return_value = self.resp
def test_basic(self):
m = metric.Metric()
self.assertIsNone(m.resource_key)
self.assertIsNone(m.resources_key)
self.assertEqual('/metric', m.base_path)
self.assertEqual('metric', m.service.service_type)
self.assertTrue(m.allow_create)
self.assertTrue(m.allow_retrieve)
self.assertFalse(m.allow_update)
self.assertTrue(m.allow_delete)
self.assertTrue(m.allow_list)
def test_make_it(self):
m = metric.Metric(EXAMPLE)
self.assertEqual(EXAMPLE['id'], m.id)
self.assertEqual(EXAMPLE['archive_policy_name'], m.archive_policy_name)
self.assertEqual(EXAMPLE['created_by_user_id'], m.created_by_user_id)
self.assertEqual(EXAMPLE['created_by_project_id'],
m.created_by_project_id)
self.assertEqual(EXAMPLE['resource_id'], m.resource_id)
self.assertEqual(EXAMPLE['name'], m.name)
m = metric.Metric(EXAMPLE_AP)
self.assertEqual(EXAMPLE_AP['id'], m.id)
self.assertEqual(EXAMPLE_AP['archive_policy'], m.archive_policy)
self.assertEqual(EXAMPLE_AP['created_by_user_id'],
m.created_by_user_id)
self.assertEqual(EXAMPLE_AP['created_by_project_id'],
m.created_by_project_id)
self.assertEqual(EXAMPLE_AP['resource_id'], m.resource_id)
self.assertEqual(EXAMPLE_AP['name'], m.name)
|
apache-2.0
|
JVillella/tensorflow
|
tensorflow/contrib/boosted_trees/python/kernel_tests/ensemble_optimizer_ops_test.py
|
27
|
14406
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the GTFlow ensemble optimization ops.
The tests cover:
- Adding a newly built tree to an existing ensemble
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.boosted_trees.proto import tree_config_pb2
from tensorflow.contrib.boosted_trees.python.ops import ensemble_optimizer_ops
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
def _append_to_leaf(leaf, class_id, weight):
"""Helper method for building tree leaves.
Appends weight contributions for the given class index to a leaf node.
Args:
leaf: leaf node to append to, int
class_id: class Id for the weight update, int
weight: weight contribution value, float
"""
leaf.sparse_vector.index.append(class_id)
leaf.sparse_vector.value.append(weight)
class EnsembleOptimizerOpsTest(test_util.TensorFlowTestCase):
def setUp(self):
"""Create an ensemble of 2 trees."""
super(EnsembleOptimizerOpsTest, self).setUp()
self._tree_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
# First tree.
tree_1 = self._tree_ensemble.trees.add()
_append_to_leaf(tree_1.nodes.add().leaf, 0, 0.4)
_append_to_leaf(tree_1.nodes.add().leaf, 1, 0.6)
# Second tree.
tree_2 = self._tree_ensemble.trees.add()
_append_to_leaf(tree_2.nodes.add().leaf, 0, 1)
_append_to_leaf(tree_2.nodes.add().leaf, 1, 0)
self._tree_ensemble.tree_weights.append(1.0)
self._tree_ensemble.tree_weights.append(1.0)
meta_1 = self._tree_ensemble.tree_metadata.add()
meta_1.num_tree_weight_updates = 2
meta_2 = self._tree_ensemble.tree_metadata.add()
meta_2.num_tree_weight_updates = 3
# Ensemble to be added.
self._ensemble_to_add = tree_config_pb2.DecisionTreeEnsembleConfig()
self._tree_to_add = self._ensemble_to_add.trees.add()
_append_to_leaf(self._tree_to_add.nodes.add().leaf, 0, 0.3)
_append_to_leaf(self._tree_to_add.nodes.add().leaf, 1, 0.7)
def testWithEmptyEnsemble(self):
with self.test_session():
# Create an empty ensemble.
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="empty")
# Create zero feature importance.
feature_usage_counts = variables.Variable(
initial_value=array_ops.zeros([1], dtypes.int64),
name="feature_usage_counts",
trainable=False)
feature_gains = variables.Variable(
initial_value=array_ops.zeros([1], dtypes.float32),
name="feature_gains",
trainable=False)
resources.initialize_resources(resources.shared_resources()).run()
variables.initialize_all_variables().run()
with ops.control_dependencies([
ensemble_optimizer_ops.add_trees_to_ensemble(
tree_ensemble_handle,
self._ensemble_to_add.SerializeToString(),
feature_usage_counts, [2],
feature_gains, [0.4], [[]],
learning_rate=1.0)
]):
result = model_ops.tree_ensemble_serialize(tree_ensemble_handle)[1]
# Output.
output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
output_ensemble.ParseFromString(result.eval())
self.assertProtoEquals(self._tree_to_add, output_ensemble.trees[0])
self.assertEqual(1, len(output_ensemble.trees))
self.assertAllEqual([1.0], output_ensemble.tree_weights)
self.assertEqual(1,
output_ensemble.tree_metadata[0].num_tree_weight_updates)
self.assertAllEqual([2], feature_usage_counts.eval())
self.assertArrayNear([0.4], feature_gains.eval(), 1e-6)
def testWithExistingEnsemble(self):
with self.test_session():
# Create existing tree ensemble.
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=self._tree_ensemble.SerializeToString(),
name="existing")
# Create non-zero feature importance.
feature_usage_counts = variables.Variable(
initial_value=np.array([0, 4, 1], np.int64),
name="feature_usage_counts",
trainable=False)
feature_gains = variables.Variable(
initial_value=np.array([0.0, 0.3, 0.05], np.float32),
name="feature_gains",
trainable=False)
resources.initialize_resources(resources.shared_resources()).run()
variables.initialize_all_variables().run()
output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
with ops.control_dependencies([
ensemble_optimizer_ops.add_trees_to_ensemble(
tree_ensemble_handle,
self._ensemble_to_add.SerializeToString(),
feature_usage_counts, [1, 2, 0],
feature_gains, [0.02, 0.1, 0.0], [[], []],
learning_rate=1)
]):
output_ensemble.ParseFromString(
model_ops.tree_ensemble_serialize(tree_ensemble_handle)[1].eval())
# Output.
self.assertEqual(3, len(output_ensemble.trees))
self.assertProtoEquals(self._tree_to_add, output_ensemble.trees[2])
self.assertAllEqual([1.0, 1.0, 1.0], output_ensemble.tree_weights)
self.assertEqual(2,
output_ensemble.tree_metadata[0].num_tree_weight_updates)
self.assertEqual(3,
output_ensemble.tree_metadata[1].num_tree_weight_updates)
self.assertEqual(1,
output_ensemble.tree_metadata[2].num_tree_weight_updates)
self.assertAllEqual([1, 6, 1], feature_usage_counts.eval())
self.assertArrayNear([0.02, 0.4, 0.05], feature_gains.eval(), 1e-6)
def testWithExistingEnsembleAndDropout(self):
with self.test_session():
tree_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
# Add 10 trees with some weights.
for i in range(0, 10):
tree = tree_ensemble.trees.add()
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble.tree_weights.append(i + 1)
meta = tree_ensemble.tree_metadata.add()
meta.num_tree_weight_updates = 1
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble.SerializeToString(),
name="existing")
# Create non-zero feature importance.
feature_usage_counts = variables.Variable(
initial_value=np.array([2, 3], np.int64),
name="feature_usage_counts",
trainable=False)
feature_gains = variables.Variable(
initial_value=np.array([0.0, 0.3], np.float32),
name="feature_gains",
trainable=False)
resources.initialize_resources(resources.shared_resources()).run()
variables.initialize_all_variables().run()
dropped = [1, 6, 8]
dropped_original_weights = [2.0, 7.0, 9.0]
output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
with ops.control_dependencies([
ensemble_optimizer_ops.add_trees_to_ensemble(
tree_ensemble_handle,
self._ensemble_to_add.SerializeToString(),
feature_usage_counts, [1, 2],
feature_gains, [0.5, 0.3], [dropped, dropped_original_weights],
learning_rate=0.1)
]):
output_ensemble.ParseFromString(
model_ops.tree_ensemble_serialize(tree_ensemble_handle)[1].eval())
# Output.
self.assertEqual(11, len(output_ensemble.trees))
self.assertProtoEquals(self._tree_to_add, output_ensemble.trees[10])
self.assertAllClose(4.5, output_ensemble.tree_weights[10])
self.assertAllClose([1., 1.5, 3., 4., 5., 6., 5.25, 8., 6.75, 10., 4.5],
output_ensemble.tree_weights)
self.assertEqual(1,
output_ensemble.tree_metadata[0].num_tree_weight_updates)
self.assertEqual(2,
output_ensemble.tree_metadata[1].num_tree_weight_updates)
self.assertEqual(1,
output_ensemble.tree_metadata[2].num_tree_weight_updates)
self.assertEqual(1,
output_ensemble.tree_metadata[3].num_tree_weight_updates)
self.assertEqual(1,
output_ensemble.tree_metadata[4].num_tree_weight_updates)
self.assertEqual(1,
output_ensemble.tree_metadata[5].num_tree_weight_updates)
self.assertEqual(2,
output_ensemble.tree_metadata[6].num_tree_weight_updates)
self.assertEqual(1,
output_ensemble.tree_metadata[7].num_tree_weight_updates)
self.assertEqual(2,
output_ensemble.tree_metadata[8].num_tree_weight_updates)
self.assertEqual(1,
output_ensemble.tree_metadata[9].num_tree_weight_updates)
self.assertEqual(
1, output_ensemble.tree_metadata[10].num_tree_weight_updates)
self.assertAllEqual([3, 5], feature_usage_counts.eval())
self.assertArrayNear([0.05, 0.33], feature_gains.eval(), 1e-6)
def testWithEmptyEnsembleAndShrinkage(self):
with self.test_session():
# Add shrinkage config.
learning_rate = 0.0001
tree_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble.SerializeToString(),
name="existing")
# Create zero feature importance.
feature_usage_counts = variables.Variable(
initial_value=np.array([0, 0], np.int64),
name="feature_usage_counts",
trainable=False)
feature_gains = variables.Variable(
initial_value=np.array([0.0, 0.0], np.float32),
name="feature_gains",
trainable=False)
resources.initialize_resources(resources.shared_resources()).run()
variables.initialize_all_variables().run()
output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
with ops.control_dependencies([
ensemble_optimizer_ops.add_trees_to_ensemble(
tree_ensemble_handle,
self._ensemble_to_add.SerializeToString(),
feature_usage_counts, [1, 2],
feature_gains, [0.5, 0.3], [[], []],
learning_rate=learning_rate)
]):
output_ensemble.ParseFromString(
model_ops.tree_ensemble_serialize(tree_ensemble_handle)[1].eval())
# New tree is added with shrinkage weight.
self.assertAllClose([learning_rate], output_ensemble.tree_weights)
self.assertEqual(1,
output_ensemble.tree_metadata[0].num_tree_weight_updates)
self.assertAllEqual([1, 2], feature_usage_counts.eval())
self.assertArrayNear([0.5 * learning_rate, 0.3 * learning_rate],
feature_gains.eval(), 1e-6)
def testWithExistingEnsembleAndShrinkage(self):
with self.test_session():
# Add shrinkage config.
learning_rate = 0.0001
tree_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
# Add 10 trees with some weights.
for i in range(0, 5):
tree = tree_ensemble.trees.add()
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble.tree_weights.append(i + 1)
meta = tree_ensemble.tree_metadata.add()
meta.num_tree_weight_updates = 1
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble.SerializeToString(),
name="existing")
# Create non-zero feature importance.
feature_usage_counts = variables.Variable(
initial_value=np.array([4, 7], np.int64),
name="feature_usage_counts",
trainable=False)
feature_gains = variables.Variable(
initial_value=np.array([0.2, 0.8], np.float32),
name="feature_gains",
trainable=False)
resources.initialize_resources(resources.shared_resources()).run()
variables.initialize_all_variables().run()
output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
with ops.control_dependencies([
ensemble_optimizer_ops.add_trees_to_ensemble(
tree_ensemble_handle,
self._ensemble_to_add.SerializeToString(),
feature_usage_counts, [1, 2],
feature_gains, [0.5, 0.3], [[], []],
learning_rate=learning_rate)
]):
output_ensemble.ParseFromString(
model_ops.tree_ensemble_serialize(tree_ensemble_handle)[1].eval())
# The weights of previous trees stayed the same, new tree (LAST) is added
# with shrinkage weight.
self.assertAllClose([1.0, 2.0, 3.0, 4.0, 5.0, learning_rate],
output_ensemble.tree_weights)
# Check that all number of updates are equal to 1 (e,g, no old tree weight
# got adjusted.
for i in range(0, 6):
self.assertEqual(
1, output_ensemble.tree_metadata[i].num_tree_weight_updates)
# Ensure feature importance was aggregated correctly.
self.assertAllEqual([5, 9], feature_usage_counts.eval())
self.assertArrayNear(
[0.2 + 0.5 * learning_rate, 0.8 + 0.3 * learning_rate],
feature_gains.eval(), 1e-6)
if __name__ == "__main__":
googletest.main()
|
apache-2.0
|
rg3915/spark
|
spark/courses/migrations/0001_initial.py
|
1
|
2869
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-05-30 01:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('authentication', '0003_auto_20170512_2009'),
]
operations = [
migrations.CreateModel(
name='Classe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('team', models.CharField(max_length=50, verbose_name='turma')),
('date_initial', models.DateField(blank=True, null=True, verbose_name='data inicial')),
('date_final', models.DateField(blank=True, null=True, verbose_name='data final')),
('short_description', models.CharField(blank=True, max_length=50, null=True, verbose_name='breve descrição')),
('description', models.TextField(blank=True, null=True, verbose_name='descrição')),
('slug', models.SlugField(blank=True, null=True, unique=True)),
],
options={
'ordering': ('team',),
'verbose_name_plural': 'turmas',
'verbose_name': 'turma',
},
),
migrations.CreateModel(
name='ClasseDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('classe', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='classedetail', to='courses.Classe')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='classedet_user', to='authentication.Profile', verbose_name='aluno')),
],
options={
'ordering': ('classe',),
'verbose_name_plural': 'detalhes',
'verbose_name': 'detalhe',
},
),
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True, verbose_name='nome')),
('slug', models.SlugField(blank=True, null=True, unique=True)),
],
options={
'ordering': ('name',),
'verbose_name_plural': 'cursos',
'verbose_name': 'curso',
},
),
migrations.AddField(
model_name='classe',
name='course',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='classe_course', to='courses.Course', verbose_name='curso'),
),
]
|
mit
|
hpcugent/easybuild-framework
|
easybuild/toolchains/__init__.py
|
1
|
1244
|
##
# Copyright 2012-2019 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Declaration of toolchains namespace.
:author: Stijn De Weirdt (Ghent University)
:author: Kenneth Hoste (Ghent University)
"""
import pkg_resources
pkg_resources.declare_namespace(__name__)
|
gpl-2.0
|
molguin92/MoulinetteBackend
|
moulinette/homework/models.py
|
1
|
6446
|
from datetime import datetime
from moulinette import db
# This file includes all the definitions for the homework model in the
# database. Any change here must then be applied to the database using the
# migrate.py file in the root folder of this project.
class Homework(db.Model):
"""
Class Homework represents the homework table in the database.
Contains all the information pertaining to a specific homework
assignment, such as name, description and a list of items to be completed.
"""
__tablename__ = 'homework'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, unique=True, nullable=False)
description = db.Column(db.Text, default='')
created = db.Column(db.DateTime, default=datetime.now)
updated = db.Column(db.DateTime, default=datetime.now,
onupdate=datetime.now)
items = db.relationship('Item', backref='homework')
active = db.Column(db.Boolean, nullable=False)
def __init__(self, name, description=''):
"""
Constructor for a Homework instance. Requires a name,
and optionally, a description.
:param name: The name of the homework assignment.
:param description: Description of the assignment.
"""
self.name = name
self.description = description
self.active = True
def add_item(self, name, description=''):
"""
Adds a homework item/problem/question to this homework assignment,
and returns it for editing and chaining method calls.
:param name: Name of the item or problem.
:param description: Description of the item.
:return: An Item object.
"""
i = Item(self.id, name, description)
db.session.add(i)
db.session.commit()
return i
def activate(self):
"""
Sets this homework to active.
"""
self.active = True
def deactivate(self):
"""
Sets this homework to inactive.
"""
self.active = False
class Item(db.Model):
"""
Class Item represents a homework item or problem in the database.
It contains information such as its parent homework, name, description
and a set of tests.
"""
__tablename__ = 'item'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
description = db.Column(db.Text, default='')
created = db.Column(db.DateTime, default=datetime.now)
updated = db.Column(db.DateTime, default=datetime.now,
onupdate=datetime.now)
homework_id = db.Column(db.Integer, db.ForeignKey('homework.id'),
nullable=False)
tests = db.relationship('Test', backref='item')
def __init__(self, homework_id, name, description=''):
"""
Constructs a new Item instance, taking the parent Homework id,
a name and optional description.
:param homework_id: Parent Homework id.
:param name: Name of the homework item.
:param description: Description of the task to be accomplished.
"""
self.name = name
self.description = description
self.homework_id = homework_id
def add_test(self, description, tinput, toutput, timeout=10):
"""
Adds a Test to this Item. Returns the Test for chaining method calls.
:param description: Description of the new test.
:param tinput: Input given to the program.
:param toutput: Expected output, for verifying correctness.
:return: A Test object.
"""
t = Test(self.id, tinput, toutput, description, timeout)
db.session.add(t)
db.session.commit()
return t
class Test(db.Model):
"""
Class Test represents a particular test for a specific homework Item.
Contains a reference to its parent Item object, as well as a description,
an input to be handed to the program created by the student and an
expected, correct output for validating the correctness of the program.
"""
__tablename__ = 'test'
id = db.Column(db.Integer, primary_key=True)
description = db.Column(db.String)
created = db.Column(db.DateTime, default=datetime.now)
updated = db.Column(db.DateTime, default=datetime.now,
onupdate=datetime.now)
timeout = db.Column(db.Integer, default=10)
stdin = db.Column(db.Text)
stdout = db.Column(db.Text)
item_id = db.Column(db.Integer, db.ForeignKey('item.id'), nullable=False)
def __init__(self, item_id, stdin, stdout, description='', timeout=10):
"""
Constructs a new Test instance, taking the parent Item id, an input
for the program to be tested and a correct output, along with an
optional description, as parameters.
:param item_id: Parent Item id.
:param stdin: The input to be handed to the tested program.
:param stdout: The expected correct output.
:param description: Optional description of this test case.
"""
self.item_id = item_id
self.stdin = stdin
self.stdout = stdout
self.description = description
self.timeout = timeout if timeout else 10
def get_input_output(self):
"""
Returns a tuple containing the input and output for this test case.
:return: Tuple (input, output)
"""
return self.stdin, self.stdout
def validate(self, out):
"""
Compares the given string to the expected correct output of this
test case. If outputs do not match, raises one three different
exceptions:
Exception ExcessiveOutput: Given string is too long.
Exception MissingOutput: Given string is too short.
Exception WrongOutput: Lenghts match, but output does not match.
:param out: A string to compare to the correct output.
"""
outlines = self.stdout.strip().split("\n")
testlines = out.strip().split("\n")
if len(outlines) > len(testlines):
raise ExcessiveOutput()
elif len(outlines) < len(testlines):
raise MissingOutput()
for i in range(len(outlines)):
if outlines[i] != testlines[i]:
raise WrongOutput()
class MissingOutput(Exception):
pass
class ExcessiveOutput(Exception):
pass
class WrongOutput(Exception):
pass
|
bsd-3-clause
|
yprez/python-social-auth
|
social/apps/pyramid_app/models.py
|
67
|
2119
|
"""Pyramid SQLAlchemy ORM models for Social Auth"""
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship, backref
from social.utils import setting_name, module_member
from social.storage.sqlalchemy_orm import SQLAlchemyUserMixin, \
SQLAlchemyAssociationMixin, \
SQLAlchemyNonceMixin, \
SQLAlchemyCodeMixin, \
BaseSQLAlchemyStorage
class PyramidStorage(BaseSQLAlchemyStorage):
user = None
nonce = None
association = None
def init_social(config, Base, session):
if hasattr(config, 'registry'):
config = config.registry.settings
UID_LENGTH = config.get(setting_name('UID_LENGTH'), 255)
User = module_member(config[setting_name('USER_MODEL')])
app_session = session
class _AppSession(object):
@classmethod
def _session(cls):
return app_session
class UserSocialAuth(_AppSession, Base, SQLAlchemyUserMixin):
"""Social Auth association model"""
uid = Column(String(UID_LENGTH))
user_id = Column(Integer, ForeignKey(User.id),
nullable=False, index=True)
user = relationship(User, backref=backref('social_auth',
lazy='dynamic'))
@classmethod
def username_max_length(cls):
return User.__table__.columns.get('username').type.length
@classmethod
def user_model(cls):
return User
class Nonce(_AppSession, Base, SQLAlchemyNonceMixin):
"""One use numbers"""
pass
class Association(_AppSession, Base, SQLAlchemyAssociationMixin):
"""OpenId account association"""
pass
class Code(_AppSession, Base, SQLAlchemyCodeMixin):
pass
# Set the references in the storage class
PyramidStorage.user = UserSocialAuth
PyramidStorage.nonce = Nonce
PyramidStorage.association = Association
PyramidStorage.code = Code
|
bsd-3-clause
|
crcresearch/osf.io
|
api/search/serializers.py
|
9
|
1868
|
from api.base.serializers import (
JSONAPISerializer
)
from api.base.utils import absolute_reverse
from api.files.serializers import FileSerializer
from api.nodes.serializers import NodeSerializer
from api.registrations.serializers import RegistrationSerializer
from api.users.serializers import UserSerializer
from api.institutions.serializers import InstitutionSerializer
from osf.models import AbstractNode
from osf.models import OSFUser
from osf.models import BaseFileNode
from osf.models import Institution
class SearchSerializer(JSONAPISerializer):
def to_representation(self, data, envelope='data'):
if isinstance(data, AbstractNode):
if data.is_registration:
serializer = RegistrationSerializer(data, context=self.context)
return RegistrationSerializer.to_representation(serializer, data)
serializer = NodeSerializer(data, context=self.context)
return NodeSerializer.to_representation(serializer, data)
if isinstance(data, OSFUser):
serializer = UserSerializer(data, context=self.context)
return UserSerializer.to_representation(serializer, data)
if isinstance(data, BaseFileNode):
serializer = FileSerializer(data, context=self.context)
return FileSerializer.to_representation(serializer, data)
if isinstance(data, Institution):
serializer = InstitutionSerializer(data, context=self.context)
return InstitutionSerializer.to_representation(serializer, data)
return None
def get_absolute_url(self, obj):
return absolute_reverse(
view_name='search:search-search',
kwargs={
'version': self.context['request'].parser_context['kwargs']['version']
}
)
class Meta:
type_ = 'search'
|
apache-2.0
|
jonnor/qt-creator
|
src/libs/3rdparty/botan/wrappers/boost-python/nisttest.py
|
14
|
1898
|
#!/usr/bin/python
import sys, os, botan
from os.path import join;
def validate(ca_certs, certs, crls, ee_certs):
store = botan.X509_Store()
for cert in certs:
if cert not in ee_certs:
store.add_cert(botan.X509_Certificate(cert), cert in ca_certs)
for crl in crls:
r = store.add_crl(botan.X509_CRL(crl))
if r != botan.verify_result.verified:
return r
for ee in ee_certs:
r = store.validate(botan.X509_Certificate(ee))
if r != botan.verify_result.verified:
return r
return botan.verify_result.verified
def run_test(files, rootdir, testname, expected):
crls = [join(rootdir,x) for x in files if x.endswith(".crl")]
certs = [join(rootdir,x) for x in files if x.endswith(".crt")]
end_entity = [x for x in certs if x.find("end.crt") != -1]
ca_certs = [x for x in certs if x.find("root.crt") != -1]
print "%s..." % testname,
result = validate(ca_certs, certs, crls, end_entity)
result = repr(result).replace('botan._botan.verify_result.', '')
if result != expected:
print "FAILED: got %s, expected %s" % (result, expected)
else:
print "passed"
def main():
def load_results(file):
results = {}
for line in open(file, 'r'):
line = line[0:line.find('#')].strip()
if line:
test,result = line.split(' ')
results[test] = result
return results
results = load_results('results.txt')
for root, dirs, files in os.walk('../../checks/nist_tests/tests'):
if files:
thistest = root[root.rfind('/')+1:]
if thistest in results:
run_test(files, root, thistest, results[thistest])
else:
print "%s... skipping - no expected result set" % thistest
if __name__ == "__main__":
sys.exit(main())
|
lgpl-2.1
|
iedparis8/django-crispy-forms
|
crispy_forms/bootstrap.py
|
10
|
11977
|
import warnings
from random import randint
from django.template import Context, Template
from django.template.loader import render_to_string
from django.template.defaultfilters import slugify
from .compatibility import text_type
from .layout import LayoutObject, Field, Div
from .utils import render_field, flatatt, TEMPLATE_PACK
class PrependedAppendedText(Field):
template = "%s/layout/prepended_appended_text.html" % TEMPLATE_PACK
def __init__(self, field, prepended_text=None, appended_text=None, *args, **kwargs):
self.field = field
self.appended_text = appended_text
self.prepended_text = prepended_text
if 'active' in kwargs:
self.active = kwargs.pop('active')
self.input_size = None
css_class = kwargs.get('css_class', '')
if css_class.find('input-lg') != -1: self.input_size = 'input-lg'
if css_class.find('input-sm') != -1: self.input_size = 'input-sm'
super(PrependedAppendedText, self).__init__(field, *args, **kwargs)
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK):
context.update({'crispy_appended_text': self.appended_text,
'crispy_prepended_text': self.prepended_text,
'input_size' : self.input_size,
'active': getattr(self, "active", False)})
return render_field(self.field, form, form_style, context, template=self.template, attrs=self.attrs, template_pack=template_pack)
class AppendedPrependedText(PrependedAppendedText):
def __init__(self, *args, **kwargs):
warnings.warn("AppendedPrependedText has been renamed to PrependedAppendedText, \
it will be removed in 1.3.0", PendingDeprecationWarning)
super(AppendedPrependedText, self).__init__(*args, **kwargs)
class AppendedText(PrependedAppendedText):
def __init__(self, field, text, *args, **kwargs):
kwargs.pop('appended_text', None)
kwargs.pop('prepended_text', None)
self.text = text
super(AppendedText, self).__init__(field, appended_text=text, **kwargs)
class PrependedText(PrependedAppendedText):
def __init__(self, field, text, *args, **kwargs):
kwargs.pop('appended_text', None)
kwargs.pop('prepended_text', None)
self.text = text
super(PrependedText, self).__init__(field, prepended_text=text, **kwargs)
class FormActions(LayoutObject):
"""
Bootstrap layout object. It wraps fields in a <div class="form-actions">
Example::
FormActions(
HTML(<span style="display: hidden;">Information Saved</span>),
Submit('Save', 'Save', css_class='btn-primary')
)
"""
template = "%s/layout/formactions.html" % TEMPLATE_PACK
def __init__(self, *fields, **kwargs):
self.fields = list(fields)
self.template = kwargs.pop('template', self.template)
self.attrs = kwargs
if 'css_class' in self.attrs:
self.attrs['class'] = self.attrs.pop('css_class')
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK):
html = u''
for field in self.fields:
html += render_field(field, form, form_style, context, template_pack=template_pack)
return render_to_string(self.template, Context({'formactions': self, 'fields_output': html}))
def flat_attrs(self):
return flatatt(self.attrs)
class InlineCheckboxes(Field):
"""
Layout object for rendering checkboxes inline::
InlineCheckboxes('field_name')
"""
template = "%s/layout/checkboxselectmultiple_inline.html" % TEMPLATE_PACK
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK):
context.update({'inline_class': 'inline'})
return super(InlineCheckboxes, self).render(form, form_style, context)
class InlineRadios(Field):
"""
Layout object for rendering radiobuttons inline::
InlineRadios('field_name')
"""
template = "%s/layout/radioselect_inline.html" % TEMPLATE_PACK
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK):
context.update({'inline_class': 'inline'})
return super(InlineRadios, self).render(form, form_style, context)
class FieldWithButtons(Div):
template = '%s/layout/field_with_buttons.html' % TEMPLATE_PACK
def render(self, form, form_style, context):
# We first render the buttons
buttons = ''
for field in self.fields[1:]:
buttons += render_field(
field, form, form_style, context,
'%s/layout/field.html' % TEMPLATE_PACK, layout_object=self
)
context.update({'div': self, 'buttons': buttons})
if isinstance(self.fields[0], Field):
# FieldWithButtons(Field('field_name'), StrictButton("go"))
# We render the field passing its name and attributes
return render_field(
self.fields[0][0], form, form_style, context,
self.template, attrs=self.fields[0].attrs
)
else:
return render_field(self.fields[0], form, form_style, context, self.template)
class StrictButton(object):
"""
Layout oject for rendering an HTML button::
Button("button content", css_class="extra")
"""
template = '%s/layout/button.html' % TEMPLATE_PACK
field_classes = 'btn'
def __init__(self, content, **kwargs):
self.content = content
self.template = kwargs.pop('template', self.template)
kwargs.setdefault('type', 'button')
# We turn css_id and css_class into id and class
if 'css_id' in kwargs:
kwargs['id'] = kwargs.pop('css_id')
kwargs['class'] = self.field_classes
if 'css_class' in kwargs:
kwargs['class'] += " %s" % kwargs.pop('css_class')
self.flat_attrs = flatatt(kwargs)
def render(self, form, form_style, context):
self.content = Template(text_type(self.content)).render(context)
return render_to_string(self.template, Context({'button': self}))
class Container(Div):
"""
Base class used for `Tab` and `AccordionGroup`, represents a basic container concept
"""
css_class = ""
def __init__(self, name, *fields, **kwargs):
super(Container, self).__init__(*fields, **kwargs)
self.template = kwargs.pop('template', self.template)
self.name = name
self.active = kwargs.pop("active", False)
if not self.css_id:
self.css_id = slugify(self.name)
def __contains__(self, field_name):
"""
check if field_name is contained within tab.
"""
return field_name in map(lambda pointer: pointer[1], self.get_field_names())
def render(self, form, form_style, context):
if self.active:
if not 'active' in self.css_class:
self.css_class += ' active'
else:
self.css_class = self.css_class.replace('active', '')
return super(Container, self).render(form, form_style, context)
class ContainerHolder(Div):
"""
Base class used for `TabHolder` and `Accordion`, groups containers
"""
def first_container_with_errors(self, errors):
"""
Returns the first container with errors, otherwise returns the first one
"""
for tab in self.fields:
errors_here = any(error in tab for error in errors)
if errors_here:
return tab
return self.fields[0]
class Tab(Container):
"""
Tab object. It wraps fields in a div whose default class is "tab-pane" and
takes a name as first argument. Example::
Tab('tab_name', 'form_field_1', 'form_field_2', 'form_field_3')
"""
css_class = 'tab-pane'
link_template = '%s/layout/tab-link.html' % TEMPLATE_PACK
def render_link(self):
"""
Render the link for the tab-pane. It must be called after render so css_class is updated
with active if needed.
"""
return render_to_string(self.link_template, Context({'link': self}))
class TabHolder(ContainerHolder):
"""
TabHolder object. It wraps Tab objects in a container. Requires bootstrap-tab.js::
TabHolder(
Tab('form_field_1', 'form_field_2'),
Tab('form_field_3')
)
"""
template = '%s/layout/tab.html' % TEMPLATE_PACK
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK):
links, content = '', ''
for tab in self.fields:
tab.active = False
# The first tab with errors will be active
self.first_container_with_errors(form.errors.keys()).active = True
for tab in self.fields:
content += render_field(
tab, form, form_style, context, template_pack=template_pack
)
links += tab.render_link()
return render_to_string(self.template, Context({
'tabs': self, 'links': links, 'content': content
}))
class AccordionGroup(Container):
"""
Accordion Group (pane) object. It wraps given fields inside an accordion
tab. It takes accordion tab name as first argument::
AccordionGroup("group name", "form_field_1", "form_field_2")
"""
template = "%s/accordion-group.html" % TEMPLATE_PACK
data_parent = "" # accordion parent div id.
class Accordion(ContainerHolder):
"""
Accordion menu object. It wraps `AccordionGroup` objects in a container::
Accordion(
AccordionGroup("group name", "form_field_1", "form_field_2"),
AccordionGroup("another group name", "form_field")
)
"""
template = "%s/accordion.html" % TEMPLATE_PACK
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK):
content = ''
# accordion group needs the parent div id to set `data-parent` (I don't
# know why). This needs to be a unique id
if not self.css_id:
self.css_id = "-".join(["accordion", text_type(randint(1000, 9999))])
# first group with errors or first groupt will be visible, others will be collapsed
self.first_container_with_errors(form.errors.keys()).active = True
for group in self.fields:
group.data_parent = self.css_id
content += render_field(
group, form, form_style, context, template_pack=template_pack
)
return render_to_string(
self.template,
Context({'accordion': self, 'content': content})
)
class Alert(Div):
"""
`Alert` generates markup in the form of an alert dialog
Alert(content='<strong>Warning!</strong> Best check yo self, you're not looking too good.')
"""
template = "bootstrap/layout/alert.html"
css_class = "alert"
def __init__(self, content, dismiss=True, block=False, **kwargs):
fields = []
if block:
self.css_class += ' alert-block'
Div.__init__(self, *fields, **kwargs)
self.template = kwargs.pop('template', self.template)
self.content = content
self.dismiss = dismiss
def render(self, form, form_style, context):
return render_to_string(
self.template,
Context({'alert': self, 'content': self.content, 'dismiss': self.dismiss
}))
class UneditableField(Field):
"""
Layout object for rendering fields as uneditable in bootstrap
Example::
UneditableField('field_name', css_class="input-xlarge")
"""
template = "%s/layout/uneditable_input.html" % TEMPLATE_PACK
def __init__(self, field, *args, **kwargs):
self.attrs = {'class': 'uneditable-input'}
super(UneditableField, self).__init__(field, *args, **kwargs)
class InlineField(Field):
template = "%s/layout/inline_field.html" % TEMPLATE_PACK
|
mit
|
kaushik94/sympy
|
sympy/functions/elementary/hyperbolic.py
|
1
|
48081
|
from __future__ import print_function, division
from sympy.core import S, sympify, cacheit, pi, I, Rational
from sympy.core.add import Add
from sympy.core.function import Function, ArgumentIndexError, _coeff_isneg
from sympy.functions.combinatorial.factorials import factorial, RisingFactorial
from sympy.functions.elementary.exponential import exp, log, match_real_imag
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.integers import floor
from sympy import pi, Eq
from sympy.logic import Or, And
from sympy.core.logic import fuzzy_or, fuzzy_and, fuzzy_bool
def _rewrite_hyperbolics_as_exp(expr):
expr = sympify(expr)
return expr.xreplace({h: h.rewrite(exp)
for h in expr.atoms(HyperbolicFunction)})
###############################################################################
########################### HYPERBOLIC FUNCTIONS ##############################
###############################################################################
class HyperbolicFunction(Function):
"""
Base class for hyperbolic functions.
See Also
========
sinh, cosh, tanh, coth
"""
unbranched = True
def _peeloff_ipi(arg):
"""
Split ARG into two parts, a "rest" and a multiple of I*pi/2.
This assumes ARG to be an Add.
The multiple of I*pi returned in the second position is always a Rational.
Examples
========
>>> from sympy.functions.elementary.hyperbolic import _peeloff_ipi as peel
>>> from sympy import pi, I
>>> from sympy.abc import x, y
>>> peel(x + I*pi/2)
(x, I*pi/2)
>>> peel(x + I*2*pi/3 + I*pi*y)
(x + I*pi*y + I*pi/6, I*pi/2)
"""
for a in Add.make_args(arg):
if a == S.Pi*S.ImaginaryUnit:
K = S.One
break
elif a.is_Mul:
K, p = a.as_two_terms()
if p == S.Pi*S.ImaginaryUnit and K.is_Rational:
break
else:
return arg, S.Zero
m1 = (K % S.Half)*S.Pi*S.ImaginaryUnit
m2 = K*S.Pi*S.ImaginaryUnit - m1
return arg - m2, m2
class sinh(HyperbolicFunction):
r"""
The hyperbolic sine function, `\frac{e^x - e^{-x}}{2}`.
* sinh(x) -> Returns the hyperbolic sine of x
See Also
========
cosh, tanh, asinh
"""
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return cosh(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return asinh
@classmethod
def eval(cls, arg):
from sympy import sin
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif arg is S.NegativeInfinity:
return S.NegativeInfinity
elif arg.is_zero:
return S.Zero
elif arg.is_negative:
return -cls(-arg)
else:
if arg is S.ComplexInfinity:
return S.NaN
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
return S.ImaginaryUnit * sin(i_coeff)
else:
if _coeff_isneg(arg):
return -cls(-arg)
if arg.is_Add:
x, m = _peeloff_ipi(arg)
if m:
return sinh(m)*cosh(x) + cosh(m)*sinh(x)
if arg.is_zero:
return S.Zero
if arg.func == asinh:
return arg.args[0]
if arg.func == acosh:
x = arg.args[0]
return sqrt(x - 1) * sqrt(x + 1)
if arg.func == atanh:
x = arg.args[0]
return x/sqrt(1 - x**2)
if arg.func == acoth:
x = arg.args[0]
return 1/(sqrt(x - 1) * sqrt(x + 1))
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
"""
Returns the next term in the Taylor series expansion.
"""
if n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) > 2:
p = previous_terms[-2]
return p * x**2 / (n*(n - 1))
else:
return x**(n) / factorial(n)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def as_real_imag(self, deep=True, **hints):
"""
Returns this function as a complex coordinate.
"""
from sympy import cos, sin
if self.args[0].is_extended_real:
if deep:
hints['complex'] = False
return (self.expand(deep, **hints), S.Zero)
else:
return (self, S.Zero)
if deep:
re, im = self.args[0].expand(deep, **hints).as_real_imag()
else:
re, im = self.args[0].as_real_imag()
return (sinh(re)*cos(im), cosh(re)*sin(im))
def _eval_expand_complex(self, deep=True, **hints):
re_part, im_part = self.as_real_imag(deep=deep, **hints)
return re_part + im_part*S.ImaginaryUnit
def _eval_expand_trig(self, deep=True, **hints):
if deep:
arg = self.args[0].expand(deep, **hints)
else:
arg = self.args[0]
x = None
if arg.is_Add: # TODO, implement more if deep stuff here
x, y = arg.as_two_terms()
else:
coeff, terms = arg.as_coeff_Mul(rational=True)
if coeff is not S.One and coeff.is_Integer and terms is not S.One:
x = terms
y = (coeff - 1)*x
if x is not None:
return (sinh(x)*cosh(y) + sinh(y)*cosh(x)).expand(trig=True)
return sinh(arg)
def _eval_rewrite_as_tractable(self, arg, **kwargs):
return (exp(arg) - exp(-arg)) / 2
def _eval_rewrite_as_exp(self, arg, **kwargs):
return (exp(arg) - exp(-arg)) / 2
def _eval_rewrite_as_cosh(self, arg, **kwargs):
return -S.ImaginaryUnit*cosh(arg + S.Pi*S.ImaginaryUnit/2)
def _eval_rewrite_as_tanh(self, arg, **kwargs):
tanh_half = tanh(S.Half*arg)
return 2*tanh_half/(1 - tanh_half**2)
def _eval_rewrite_as_coth(self, arg, **kwargs):
coth_half = coth(S.Half*arg)
return 2*coth_half/(coth_half**2 - 1)
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return arg
else:
return self.func(arg)
def _eval_is_real(self):
arg = self.args[0]
if arg.is_real:
return True
# if `im` is of the form n*pi
# else, check if it is a number
re, im = arg.as_real_imag()
return (im%pi).is_zero
def _eval_is_extended_real(self):
if self.args[0].is_extended_real:
return True
def _eval_is_positive(self):
if self.args[0].is_extended_real:
return self.args[0].is_positive
def _eval_is_negative(self):
if self.args[0].is_extended_real:
return self.args[0].is_negative
def _eval_is_finite(self):
arg = self.args[0]
return arg.is_finite
def _eval_is_zero(self):
arg = self.args[0]
if arg.is_zero:
return True
class cosh(HyperbolicFunction):
r"""
The hyperbolic cosine function, `\frac{e^x + e^{-x}}{2}`.
* cosh(x) -> Returns the hyperbolic cosine of x
See Also
========
sinh, tanh, acosh
"""
def fdiff(self, argindex=1):
if argindex == 1:
return sinh(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from sympy import cos
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif arg is S.NegativeInfinity:
return S.Infinity
elif arg.is_zero:
return S.One
elif arg.is_negative:
return cls(-arg)
else:
if arg is S.ComplexInfinity:
return S.NaN
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
return cos(i_coeff)
else:
if _coeff_isneg(arg):
return cls(-arg)
if arg.is_Add:
x, m = _peeloff_ipi(arg)
if m:
return cosh(m)*cosh(x) + sinh(m)*sinh(x)
if arg.is_zero:
return S.One
if arg.func == asinh:
return sqrt(1 + arg.args[0]**2)
if arg.func == acosh:
return arg.args[0]
if arg.func == atanh:
return 1/sqrt(1 - arg.args[0]**2)
if arg.func == acoth:
x = arg.args[0]
return x/(sqrt(x - 1) * sqrt(x + 1))
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 1:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) > 2:
p = previous_terms[-2]
return p * x**2 / (n*(n - 1))
else:
return x**(n)/factorial(n)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def as_real_imag(self, deep=True, **hints):
from sympy import cos, sin
if self.args[0].is_extended_real:
if deep:
hints['complex'] = False
return (self.expand(deep, **hints), S.Zero)
else:
return (self, S.Zero)
if deep:
re, im = self.args[0].expand(deep, **hints).as_real_imag()
else:
re, im = self.args[0].as_real_imag()
return (cosh(re)*cos(im), sinh(re)*sin(im))
def _eval_expand_complex(self, deep=True, **hints):
re_part, im_part = self.as_real_imag(deep=deep, **hints)
return re_part + im_part*S.ImaginaryUnit
def _eval_expand_trig(self, deep=True, **hints):
if deep:
arg = self.args[0].expand(deep, **hints)
else:
arg = self.args[0]
x = None
if arg.is_Add: # TODO, implement more if deep stuff here
x, y = arg.as_two_terms()
else:
coeff, terms = arg.as_coeff_Mul(rational=True)
if coeff is not S.One and coeff.is_Integer and terms is not S.One:
x = terms
y = (coeff - 1)*x
if x is not None:
return (cosh(x)*cosh(y) + sinh(x)*sinh(y)).expand(trig=True)
return cosh(arg)
def _eval_rewrite_as_tractable(self, arg, **kwargs):
return (exp(arg) + exp(-arg)) / 2
def _eval_rewrite_as_exp(self, arg, **kwargs):
return (exp(arg) + exp(-arg)) / 2
def _eval_rewrite_as_sinh(self, arg, **kwargs):
return -S.ImaginaryUnit*sinh(arg + S.Pi*S.ImaginaryUnit/2)
def _eval_rewrite_as_tanh(self, arg, **kwargs):
tanh_half = tanh(S.Half*arg)**2
return (1 + tanh_half)/(1 - tanh_half)
def _eval_rewrite_as_coth(self, arg, **kwargs):
coth_half = coth(S.Half*arg)**2
return (coth_half + 1)/(coth_half - 1)
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return S.One
else:
return self.func(arg)
def _eval_is_real(self):
arg = self.args[0]
# `cosh(x)` is real for real OR purely imaginary `x`
if arg.is_real or arg.is_imaginary:
return True
# cosh(a+ib) = cos(b)*cosh(a) + i*sin(b)*sinh(a)
# the imaginary part can be an expression like n*pi
# if not, check if the imaginary part is a number
re, im = arg.as_real_imag()
return (im%pi).is_zero
def _eval_is_positive(self):
# cosh(x+I*y) = cos(y)*cosh(x) + I*sin(y)*sinh(x)
# cosh(z) is positive iff it is real and the real part is positive.
# So we need sin(y)*sinh(x) = 0 which gives x=0 or y=n*pi
# Case 1 (y=n*pi): cosh(z) = (-1)**n * cosh(x) -> positive for n even
# Case 2 (x=0): cosh(z) = cos(y) -> positive when cos(y) is positive
z = self.args[0]
x, y = z.as_real_imag()
ymod = y % (2*pi)
yzero = ymod.is_zero
# shortcut if ymod is zero
if yzero:
return True
xzero = x.is_zero
# shortcut x is not zero
if xzero is False:
return yzero
return fuzzy_or([
# Case 1:
yzero,
# Case 2:
fuzzy_and([
xzero,
fuzzy_or([ymod < pi/2, ymod > 3*pi/2])
])
])
def _eval_is_nonnegative(self):
z = self.args[0]
x, y = z.as_real_imag()
ymod = y % (2*pi)
yzero = ymod.is_zero
# shortcut if ymod is zero
if yzero:
return True
xzero = x.is_zero
# shortcut x is not zero
if xzero is False:
return yzero
return fuzzy_or([
# Case 1:
yzero,
# Case 2:
fuzzy_and([
xzero,
fuzzy_or([ymod <= pi/2, ymod >= 3*pi/2])
])
])
def _eval_is_finite(self):
arg = self.args[0]
return arg.is_finite
class tanh(HyperbolicFunction):
r"""
The hyperbolic tangent function, `\frac{\sinh(x)}{\cosh(x)}`.
* tanh(x) -> Returns the hyperbolic tangent of x
See Also
========
sinh, cosh, atanh
"""
def fdiff(self, argindex=1):
if argindex == 1:
return S.One - tanh(self.args[0])**2
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return atanh
@classmethod
def eval(cls, arg):
from sympy import tan
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.One
elif arg is S.NegativeInfinity:
return S.NegativeOne
elif arg.is_zero:
return S.Zero
elif arg.is_negative:
return -cls(-arg)
else:
if arg is S.ComplexInfinity:
return S.NaN
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
if _coeff_isneg(i_coeff):
return -S.ImaginaryUnit * tan(-i_coeff)
return S.ImaginaryUnit * tan(i_coeff)
else:
if _coeff_isneg(arg):
return -cls(-arg)
if arg.is_Add:
x, m = _peeloff_ipi(arg)
if m:
tanhm = tanh(m)
if tanhm is S.ComplexInfinity:
return coth(x)
else: # tanhm == 0
return tanh(x)
if arg.is_zero:
return S.Zero
if arg.func == asinh:
x = arg.args[0]
return x/sqrt(1 + x**2)
if arg.func == acosh:
x = arg.args[0]
return sqrt(x - 1) * sqrt(x + 1) / x
if arg.func == atanh:
return arg.args[0]
if arg.func == acoth:
return 1/arg.args[0]
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
from sympy import bernoulli
if n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
a = 2**(n + 1)
B = bernoulli(n + 1)
F = factorial(n + 1)
return a*(a - 1) * B/F * x**n
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def as_real_imag(self, deep=True, **hints):
from sympy import cos, sin
if self.args[0].is_extended_real:
if deep:
hints['complex'] = False
return (self.expand(deep, **hints), S.Zero)
else:
return (self, S.Zero)
if deep:
re, im = self.args[0].expand(deep, **hints).as_real_imag()
else:
re, im = self.args[0].as_real_imag()
denom = sinh(re)**2 + cos(im)**2
return (sinh(re)*cosh(re)/denom, sin(im)*cos(im)/denom)
def _eval_rewrite_as_tractable(self, arg, **kwargs):
neg_exp, pos_exp = exp(-arg), exp(arg)
return (pos_exp - neg_exp)/(pos_exp + neg_exp)
def _eval_rewrite_as_exp(self, arg, **kwargs):
neg_exp, pos_exp = exp(-arg), exp(arg)
return (pos_exp - neg_exp)/(pos_exp + neg_exp)
def _eval_rewrite_as_sinh(self, arg, **kwargs):
return S.ImaginaryUnit*sinh(arg)/sinh(S.Pi*S.ImaginaryUnit/2 - arg)
def _eval_rewrite_as_cosh(self, arg, **kwargs):
return S.ImaginaryUnit*cosh(S.Pi*S.ImaginaryUnit/2 - arg)/cosh(arg)
def _eval_rewrite_as_coth(self, arg, **kwargs):
return 1/coth(arg)
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return arg
else:
return self.func(arg)
def _eval_is_real(self):
from sympy import cos, sinh
arg = self.args[0]
if arg.is_real:
return True
re, im = arg.as_real_imag()
# if denom = 0, tanh(arg) = zoo
if re == 0 and im % pi == pi/2:
return None
# check if im is of the form n*pi/2 to make sin(2*im) = 0
# if not, im could be a number, return False in that case
return (im % (pi/2)).is_zero
def _eval_is_extended_real(self):
if self.args[0].is_extended_real:
return True
def _eval_is_positive(self):
if self.args[0].is_extended_real:
return self.args[0].is_positive
def _eval_is_negative(self):
if self.args[0].is_extended_real:
return self.args[0].is_negative
def _eval_is_finite(self):
from sympy import sinh, cos
arg = self.args[0]
re, im = arg.as_real_imag()
denom = cos(im)**2 + sinh(re)**2
if denom == 0:
return False
elif denom.is_number:
return True
if arg.is_extended_real:
return True
def _eval_is_zero(self):
arg = self.args[0]
if arg.is_zero:
return True
class coth(HyperbolicFunction):
r"""
The hyperbolic cotangent function, `\frac{\cosh(x)}{\sinh(x)}`.
* coth(x) -> Returns the hyperbolic cotangent of x
"""
def fdiff(self, argindex=1):
if argindex == 1:
return -1/sinh(self.args[0])**2
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return acoth
@classmethod
def eval(cls, arg):
from sympy import cot
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.One
elif arg is S.NegativeInfinity:
return S.NegativeOne
elif arg.is_zero:
return S.ComplexInfinity
elif arg.is_negative:
return -cls(-arg)
else:
if arg is S.ComplexInfinity:
return S.NaN
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
if _coeff_isneg(i_coeff):
return S.ImaginaryUnit * cot(-i_coeff)
return -S.ImaginaryUnit * cot(i_coeff)
else:
if _coeff_isneg(arg):
return -cls(-arg)
if arg.is_Add:
x, m = _peeloff_ipi(arg)
if m:
cothm = coth(m)
if cothm is S.ComplexInfinity:
return coth(x)
else: # cothm == 0
return tanh(x)
if arg.is_zero:
return S.ComplexInfinity
if arg.func == asinh:
x = arg.args[0]
return sqrt(1 + x**2)/x
if arg.func == acosh:
x = arg.args[0]
return x/(sqrt(x - 1) * sqrt(x + 1))
if arg.func == atanh:
return 1/arg.args[0]
if arg.func == acoth:
return arg.args[0]
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
from sympy import bernoulli
if n == 0:
return 1 / sympify(x)
elif n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
B = bernoulli(n + 1)
F = factorial(n + 1)
return 2**(n + 1) * B/F * x**n
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def as_real_imag(self, deep=True, **hints):
from sympy import cos, sin
if self.args[0].is_extended_real:
if deep:
hints['complex'] = False
return (self.expand(deep, **hints), S.Zero)
else:
return (self, S.Zero)
if deep:
re, im = self.args[0].expand(deep, **hints).as_real_imag()
else:
re, im = self.args[0].as_real_imag()
denom = sinh(re)**2 + sin(im)**2
return (sinh(re)*cosh(re)/denom, -sin(im)*cos(im)/denom)
def _eval_rewrite_as_tractable(self, arg, **kwargs):
neg_exp, pos_exp = exp(-arg), exp(arg)
return (pos_exp + neg_exp)/(pos_exp - neg_exp)
def _eval_rewrite_as_exp(self, arg, **kwargs):
neg_exp, pos_exp = exp(-arg), exp(arg)
return (pos_exp + neg_exp)/(pos_exp - neg_exp)
def _eval_rewrite_as_sinh(self, arg, **kwargs):
return -S.ImaginaryUnit*sinh(S.Pi*S.ImaginaryUnit/2 - arg)/sinh(arg)
def _eval_rewrite_as_cosh(self, arg, **kwargs):
return -S.ImaginaryUnit*cosh(arg)/cosh(S.Pi*S.ImaginaryUnit/2 - arg)
def _eval_rewrite_as_tanh(self, arg, **kwargs):
return 1/tanh(arg)
def _eval_is_positive(self):
if self.args[0].is_extended_real:
return self.args[0].is_positive
def _eval_is_negative(self):
if self.args[0].is_extended_real:
return self.args[0].is_negative
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return 1/arg
else:
return self.func(arg)
class ReciprocalHyperbolicFunction(HyperbolicFunction):
"""Base class for reciprocal functions of hyperbolic functions. """
#To be defined in class
_reciprocal_of = None
_is_even = None
_is_odd = None
@classmethod
def eval(cls, arg):
if arg.could_extract_minus_sign():
if cls._is_even:
return cls(-arg)
if cls._is_odd:
return -cls(-arg)
t = cls._reciprocal_of.eval(arg)
if hasattr(arg, 'inverse') and arg.inverse() == cls:
return arg.args[0]
return 1/t if t is not None else t
def _call_reciprocal(self, method_name, *args, **kwargs):
# Calls method_name on _reciprocal_of
o = self._reciprocal_of(self.args[0])
return getattr(o, method_name)(*args, **kwargs)
def _calculate_reciprocal(self, method_name, *args, **kwargs):
# If calling method_name on _reciprocal_of returns a value != None
# then return the reciprocal of that value
t = self._call_reciprocal(method_name, *args, **kwargs)
return 1/t if t is not None else t
def _rewrite_reciprocal(self, method_name, arg):
# Special handling for rewrite functions. If reciprocal rewrite returns
# unmodified expression, then return None
t = self._call_reciprocal(method_name, arg)
if t is not None and t != self._reciprocal_of(arg):
return 1/t
def _eval_rewrite_as_exp(self, arg, **kwargs):
return self._rewrite_reciprocal("_eval_rewrite_as_exp", arg)
def _eval_rewrite_as_tractable(self, arg, **kwargs):
return self._rewrite_reciprocal("_eval_rewrite_as_tractable", arg)
def _eval_rewrite_as_tanh(self, arg, **kwargs):
return self._rewrite_reciprocal("_eval_rewrite_as_tanh", arg)
def _eval_rewrite_as_coth(self, arg, **kwargs):
return self._rewrite_reciprocal("_eval_rewrite_as_coth", arg)
def as_real_imag(self, deep = True, **hints):
return (1 / self._reciprocal_of(self.args[0])).as_real_imag(deep, **hints)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def _eval_expand_complex(self, deep=True, **hints):
re_part, im_part = self.as_real_imag(deep=True, **hints)
return re_part + S.ImaginaryUnit*im_part
def _eval_as_leading_term(self, x):
return (1/self._reciprocal_of(self.args[0]))._eval_as_leading_term(x)
def _eval_is_extended_real(self):
return self._reciprocal_of(self.args[0]).is_extended_real
def _eval_is_finite(self):
return (1/self._reciprocal_of(self.args[0])).is_finite
class csch(ReciprocalHyperbolicFunction):
r"""
The hyperbolic cosecant function, `\frac{2}{e^x - e^{-x}}`
* csch(x) -> Returns the hyperbolic cosecant of x
See Also
========
sinh, cosh, tanh, sech, asinh, acosh
"""
_reciprocal_of = sinh
_is_odd = True
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function
"""
if argindex == 1:
return -coth(self.args[0]) * csch(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
"""
Returns the next term in the Taylor series expansion
"""
from sympy import bernoulli
if n == 0:
return 1/sympify(x)
elif n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
B = bernoulli(n + 1)
F = factorial(n + 1)
return 2 * (1 - 2**n) * B/F * x**n
def _eval_rewrite_as_cosh(self, arg, **kwargs):
return S.ImaginaryUnit / cosh(arg + S.ImaginaryUnit * S.Pi / 2)
def _eval_is_positive(self):
if self.args[0].is_extended_real:
return self.args[0].is_positive
def _eval_is_negative(self):
if self.args[0].is_extended_real:
return self.args[0].is_negative
def _sage_(self):
import sage.all as sage
return sage.csch(self.args[0]._sage_())
class sech(ReciprocalHyperbolicFunction):
r"""
The hyperbolic secant function, `\frac{2}{e^x + e^{-x}}`
* sech(x) -> Returns the hyperbolic secant of x
See Also
========
sinh, cosh, tanh, coth, csch, asinh, acosh
"""
_reciprocal_of = cosh
_is_even = True
def fdiff(self, argindex=1):
if argindex == 1:
return - tanh(self.args[0])*sech(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
from sympy.functions.combinatorial.numbers import euler
if n < 0 or n % 2 == 1:
return S.Zero
else:
x = sympify(x)
return euler(n) / factorial(n) * x**(n)
def _eval_rewrite_as_sinh(self, arg, **kwargs):
return S.ImaginaryUnit / sinh(arg + S.ImaginaryUnit * S.Pi /2)
def _eval_is_positive(self):
if self.args[0].is_extended_real:
return True
def _sage_(self):
import sage.all as sage
return sage.sech(self.args[0]._sage_())
###############################################################################
############################# HYPERBOLIC INVERSES #############################
###############################################################################
class InverseHyperbolicFunction(Function):
"""Base class for inverse hyperbolic functions."""
pass
class asinh(InverseHyperbolicFunction):
"""
The inverse hyperbolic sine function.
* asinh(x) -> Returns the inverse hyperbolic sine of x
See Also
========
acosh, atanh, sinh
"""
def fdiff(self, argindex=1):
if argindex == 1:
return 1/sqrt(self.args[0]**2 + 1)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from sympy import asin
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif arg is S.NegativeInfinity:
return S.NegativeInfinity
elif arg.is_zero:
return S.Zero
elif arg is S.One:
return log(sqrt(2) + 1)
elif arg is S.NegativeOne:
return log(sqrt(2) - 1)
elif arg.is_negative:
return -cls(-arg)
else:
if arg is S.ComplexInfinity:
return S.ComplexInfinity
if arg.is_zero:
return S.Zero
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
return S.ImaginaryUnit * asin(i_coeff)
else:
if _coeff_isneg(arg):
return -cls(-arg)
if isinstance(arg, sinh) and arg.args[0].is_number:
z = arg.args[0]
if z.is_real:
return z
r, i = match_real_imag(z)
if r is not None and i is not None:
f = floor((i + pi/2)/pi)
m = z - I*pi*f
even = f.is_even
if even is True:
return m
elif even is False:
return -m
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) >= 2 and n > 2:
p = previous_terms[-2]
return -p * (n - 2)**2/(n*(n - 1)) * x**2
else:
k = (n - 1) // 2
R = RisingFactorial(S.Half, k)
F = factorial(k)
return (-1)**k * R / F * x**n / n
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return arg
else:
return self.func(arg)
def _eval_rewrite_as_log(self, x, **kwargs):
return log(x + sqrt(x**2 + 1))
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return sinh
def _eval_is_zero(self):
arg = self.args[0]
if arg.is_zero:
return True
class acosh(InverseHyperbolicFunction):
"""
The inverse hyperbolic cosine function.
* acosh(x) -> Returns the inverse hyperbolic cosine of x
See Also
========
asinh, atanh, cosh
"""
def fdiff(self, argindex=1):
if argindex == 1:
return 1/sqrt(self.args[0]**2 - 1)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif arg is S.NegativeInfinity:
return S.Infinity
elif arg.is_zero:
return S.Pi*S.ImaginaryUnit / 2
elif arg is S.One:
return S.Zero
elif arg is S.NegativeOne:
return S.Pi*S.ImaginaryUnit
if arg.is_number:
cst_table = {
S.ImaginaryUnit: log(S.ImaginaryUnit*(1 + sqrt(2))),
-S.ImaginaryUnit: log(-S.ImaginaryUnit*(1 + sqrt(2))),
S.Half: S.Pi/3,
Rational(-1, 2): S.Pi*Rational(2, 3),
sqrt(2)/2: S.Pi/4,
-sqrt(2)/2: S.Pi*Rational(3, 4),
1/sqrt(2): S.Pi/4,
-1/sqrt(2): S.Pi*Rational(3, 4),
sqrt(3)/2: S.Pi/6,
-sqrt(3)/2: S.Pi*Rational(5, 6),
(sqrt(3) - 1)/sqrt(2**3): S.Pi*Rational(5, 12),
-(sqrt(3) - 1)/sqrt(2**3): S.Pi*Rational(7, 12),
sqrt(2 + sqrt(2))/2: S.Pi/8,
-sqrt(2 + sqrt(2))/2: S.Pi*Rational(7, 8),
sqrt(2 - sqrt(2))/2: S.Pi*Rational(3, 8),
-sqrt(2 - sqrt(2))/2: S.Pi*Rational(5, 8),
(1 + sqrt(3))/(2*sqrt(2)): S.Pi/12,
-(1 + sqrt(3))/(2*sqrt(2)): S.Pi*Rational(11, 12),
(sqrt(5) + 1)/4: S.Pi/5,
-(sqrt(5) + 1)/4: S.Pi*Rational(4, 5)
}
if arg in cst_table:
if arg.is_extended_real:
return cst_table[arg]*S.ImaginaryUnit
return cst_table[arg]
if arg is S.ComplexInfinity:
return S.ComplexInfinity
if arg == S.ImaginaryUnit*S.Infinity:
return S.Infinity + S.ImaginaryUnit*S.Pi/2
if arg == -S.ImaginaryUnit*S.Infinity:
return S.Infinity - S.ImaginaryUnit*S.Pi/2
if arg.is_zero:
return S.Pi*S.ImaginaryUnit*S.Half
if isinstance(arg, cosh) and arg.args[0].is_number:
z = arg.args[0]
if z.is_real:
from sympy.functions.elementary.complexes import Abs
return Abs(z)
r, i = match_real_imag(z)
if r is not None and i is not None:
f = floor(i/pi)
m = z - I*pi*f
even = f.is_even
if even is True:
if r.is_nonnegative:
return m
elif r.is_negative:
return -m
elif even is False:
m -= I*pi
if r.is_nonpositive:
return -m
elif r.is_positive:
return m
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n == 0:
return S.Pi*S.ImaginaryUnit / 2
elif n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) >= 2 and n > 2:
p = previous_terms[-2]
return p * (n - 2)**2/(n*(n - 1)) * x**2
else:
k = (n - 1) // 2
R = RisingFactorial(S.Half, k)
F = factorial(k)
return -R / F * S.ImaginaryUnit * x**n / n
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return S.ImaginaryUnit*S.Pi/2
else:
return self.func(arg)
def _eval_rewrite_as_log(self, x, **kwargs):
return log(x + sqrt(x + 1) * sqrt(x - 1))
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return cosh
class atanh(InverseHyperbolicFunction):
"""
The inverse hyperbolic tangent function.
* atanh(x) -> Returns the inverse hyperbolic tangent of x
See Also
========
asinh, acosh, tanh
"""
def fdiff(self, argindex=1):
if argindex == 1:
return 1/(1 - self.args[0]**2)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from sympy import atan
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg.is_zero:
return S.Zero
elif arg is S.One:
return S.Infinity
elif arg is S.NegativeOne:
return S.NegativeInfinity
elif arg is S.Infinity:
return -S.ImaginaryUnit * atan(arg)
elif arg is S.NegativeInfinity:
return S.ImaginaryUnit * atan(-arg)
elif arg.is_negative:
return -cls(-arg)
else:
if arg is S.ComplexInfinity:
from sympy.calculus.util import AccumBounds
return S.ImaginaryUnit*AccumBounds(-S.Pi/2, S.Pi/2)
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
return S.ImaginaryUnit * atan(i_coeff)
else:
if _coeff_isneg(arg):
return -cls(-arg)
if arg.is_zero:
return S.Zero
if isinstance(arg, tanh) and arg.args[0].is_number:
z = arg.args[0]
if z.is_real:
return z
r, i = match_real_imag(z)
if r is not None and i is not None:
f = floor(2*i/pi)
even = f.is_even
m = z - I*f*pi/2
if even is True:
return m
elif even is False:
return m - I*pi/2
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
return x**n / n
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return arg
else:
return self.func(arg)
def _eval_rewrite_as_log(self, x, **kwargs):
return (log(1 + x) - log(1 - x)) / 2
def _eval_is_zero(self):
arg = self.args[0]
if arg.is_zero:
return True
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return tanh
class acoth(InverseHyperbolicFunction):
"""
The inverse hyperbolic cotangent function.
* acoth(x) -> Returns the inverse hyperbolic cotangent of x
"""
def fdiff(self, argindex=1):
if argindex == 1:
return 1/(1 - self.args[0]**2)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from sympy import acot
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Zero
elif arg is S.NegativeInfinity:
return S.Zero
elif arg.is_zero:
return S.Pi*S.ImaginaryUnit / 2
elif arg is S.One:
return S.Infinity
elif arg is S.NegativeOne:
return S.NegativeInfinity
elif arg.is_negative:
return -cls(-arg)
else:
if arg is S.ComplexInfinity:
return S.Zero
i_coeff = arg.as_coefficient(S.ImaginaryUnit)
if i_coeff is not None:
return -S.ImaginaryUnit * acot(i_coeff)
else:
if _coeff_isneg(arg):
return -cls(-arg)
if arg.is_zero:
return S.Pi*S.ImaginaryUnit*S.Half
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n == 0:
return S.Pi*S.ImaginaryUnit / 2
elif n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
return x**n / n
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return S.ImaginaryUnit*S.Pi/2
else:
return self.func(arg)
def _eval_rewrite_as_log(self, x, **kwargs):
return (log(1 + 1/x) - log(1 - 1/x)) / 2
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return coth
class asech(InverseHyperbolicFunction):
"""
The inverse hyperbolic secant function.
* asech(x) -> Returns the inverse hyperbolic secant of x
Examples
========
>>> from sympy import asech, sqrt, S
>>> from sympy.abc import x
>>> asech(x).diff(x)
-1/(x*sqrt(1 - x**2))
>>> asech(1).diff(x)
0
>>> asech(1)
0
>>> asech(S(2))
I*pi/3
>>> asech(-sqrt(2))
3*I*pi/4
>>> asech((sqrt(6) - sqrt(2)))
I*pi/12
See Also
========
asinh, atanh, cosh, acoth
References
==========
.. [1] https://en.wikipedia.org/wiki/Hyperbolic_function
.. [2] http://dlmf.nist.gov/4.37
.. [3] http://functions.wolfram.com/ElementaryFunctions/ArcSech/
"""
def fdiff(self, argindex=1):
if argindex == 1:
z = self.args[0]
return -1/(z*sqrt(1 - z**2))
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Pi*S.ImaginaryUnit / 2
elif arg is S.NegativeInfinity:
return S.Pi*S.ImaginaryUnit / 2
elif arg.is_zero:
return S.Infinity
elif arg is S.One:
return S.Zero
elif arg is S.NegativeOne:
return S.Pi*S.ImaginaryUnit
if arg.is_number:
cst_table = {
S.ImaginaryUnit: - (S.Pi*S.ImaginaryUnit / 2) + log(1 + sqrt(2)),
-S.ImaginaryUnit: (S.Pi*S.ImaginaryUnit / 2) + log(1 + sqrt(2)),
(sqrt(6) - sqrt(2)): S.Pi / 12,
(sqrt(2) - sqrt(6)): 11*S.Pi / 12,
sqrt(2 - 2/sqrt(5)): S.Pi / 10,
-sqrt(2 - 2/sqrt(5)): 9*S.Pi / 10,
2 / sqrt(2 + sqrt(2)): S.Pi / 8,
-2 / sqrt(2 + sqrt(2)): 7*S.Pi / 8,
2 / sqrt(3): S.Pi / 6,
-2 / sqrt(3): 5*S.Pi / 6,
(sqrt(5) - 1): S.Pi / 5,
(1 - sqrt(5)): 4*S.Pi / 5,
sqrt(2): S.Pi / 4,
-sqrt(2): 3*S.Pi / 4,
sqrt(2 + 2/sqrt(5)): 3*S.Pi / 10,
-sqrt(2 + 2/sqrt(5)): 7*S.Pi / 10,
S(2): S.Pi / 3,
-S(2): 2*S.Pi / 3,
sqrt(2*(2 + sqrt(2))): 3*S.Pi / 8,
-sqrt(2*(2 + sqrt(2))): 5*S.Pi / 8,
(1 + sqrt(5)): 2*S.Pi / 5,
(-1 - sqrt(5)): 3*S.Pi / 5,
(sqrt(6) + sqrt(2)): 5*S.Pi / 12,
(-sqrt(6) - sqrt(2)): 7*S.Pi / 12,
}
if arg in cst_table:
if arg.is_extended_real:
return cst_table[arg]*S.ImaginaryUnit
return cst_table[arg]
if arg is S.ComplexInfinity:
from sympy.calculus.util import AccumBounds
return S.ImaginaryUnit*AccumBounds(-S.Pi/2, S.Pi/2)
if arg.is_zero:
return S.Infinity
@staticmethod
@cacheit
def expansion_term(n, x, *previous_terms):
if n == 0:
return log(2 / x)
elif n < 0 or n % 2 == 1:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) > 2 and n > 2:
p = previous_terms[-2]
return p * (n - 1)**2 // (n // 2)**2 * x**2 / 4
else:
k = n // 2
R = RisingFactorial(S.Half , k) * n
F = factorial(k) * n // 2 * n // 2
return -1 * R / F * x**n / 4
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return sech
def _eval_rewrite_as_log(self, arg, **kwargs):
return log(1/arg + sqrt(1/arg - 1) * sqrt(1/arg + 1))
class acsch(InverseHyperbolicFunction):
"""
The inverse hyperbolic cosecant function.
* acsch(x) -> Returns the inverse hyperbolic cosecant of x
Examples
========
>>> from sympy import acsch, sqrt, S
>>> from sympy.abc import x
>>> acsch(x).diff(x)
-1/(x**2*sqrt(1 + x**(-2)))
>>> acsch(1).diff(x)
0
>>> acsch(1)
log(1 + sqrt(2))
>>> acsch(S.ImaginaryUnit)
-I*pi/2
>>> acsch(-2*S.ImaginaryUnit)
I*pi/6
>>> acsch(S.ImaginaryUnit*(sqrt(6) - sqrt(2)))
-5*I*pi/12
References
==========
.. [1] https://en.wikipedia.org/wiki/Hyperbolic_function
.. [2] http://dlmf.nist.gov/4.37
.. [3] http://functions.wolfram.com/ElementaryFunctions/ArcCsch/
"""
def fdiff(self, argindex=1):
if argindex == 1:
z = self.args[0]
return -1/(z**2*sqrt(1 + 1/z**2))
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
arg = sympify(arg)
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Zero
elif arg is S.NegativeInfinity:
return S.Zero
elif arg.is_zero:
return S.ComplexInfinity
elif arg is S.One:
return log(1 + sqrt(2))
elif arg is S.NegativeOne:
return - log(1 + sqrt(2))
if arg.is_number:
cst_table = {
S.ImaginaryUnit: -S.Pi / 2,
S.ImaginaryUnit*(sqrt(2) + sqrt(6)): -S.Pi / 12,
S.ImaginaryUnit*(1 + sqrt(5)): -S.Pi / 10,
S.ImaginaryUnit*2 / sqrt(2 - sqrt(2)): -S.Pi / 8,
S.ImaginaryUnit*2: -S.Pi / 6,
S.ImaginaryUnit*sqrt(2 + 2/sqrt(5)): -S.Pi / 5,
S.ImaginaryUnit*sqrt(2): -S.Pi / 4,
S.ImaginaryUnit*(sqrt(5)-1): -3*S.Pi / 10,
S.ImaginaryUnit*2 / sqrt(3): -S.Pi / 3,
S.ImaginaryUnit*2 / sqrt(2 + sqrt(2)): -3*S.Pi / 8,
S.ImaginaryUnit*sqrt(2 - 2/sqrt(5)): -2*S.Pi / 5,
S.ImaginaryUnit*(sqrt(6) - sqrt(2)): -5*S.Pi / 12,
S(2): -S.ImaginaryUnit*log((1+sqrt(5))/2),
}
if arg in cst_table:
return cst_table[arg]*S.ImaginaryUnit
if arg is S.ComplexInfinity:
return S.Zero
if arg.is_zero:
return S.ComplexInfinity
if _coeff_isneg(arg):
return -cls(-arg)
def inverse(self, argindex=1):
"""
Returns the inverse of this function.
"""
return csch
def _eval_rewrite_as_log(self, arg, **kwargs):
return log(1/arg + sqrt(1/arg**2 + 1))
|
bsd-3-clause
|
zzeleznick/zDjango
|
venv/lib/python2.7/site-packages/django/utils/log.py
|
108
|
5012
|
import logging
import traceback
from django.conf import settings
from django.core import mail
from django.core.mail import get_connection
from django.views.debug import ExceptionReporter, get_exception_reporter_filter
# Make sure a NullHandler is available
# This was added in Python 2.7/3.2
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Make sure that dictConfig is available
# This was added in Python 2.7/3.2
try:
from logging.config import dictConfig
except ImportError:
from django.utils.dictconfig import dictConfig
getLogger = logging.getLogger
# Default logging for Django. This sends an email to the site admins on every
# HTTP 500 error. Depending on DEBUG, all other log records are either sent to
# the console (DEBUG=True) or discarded by mean of the NullHandler (DEBUG=False).
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'console': {
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
},
'null': {
'class': 'django.utils.log.NullHandler',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers': ['console'],
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django.security': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'py.warnings': {
'handlers': ['console'],
},
}
}
class AdminEmailHandler(logging.Handler):
"""An exception log handler that emails log entries to site admins.
If the request is passed as the first argument to the log record,
request data will be provided in the email report.
"""
def __init__(self, include_html=False, email_backend=None):
logging.Handler.__init__(self)
self.include_html = include_html
self.email_backend = email_backend
def emit(self, record):
try:
request = record.request
subject = '%s (%s IP): %s' % (
record.levelname,
('internal' if request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS
else 'EXTERNAL'),
record.getMessage()
)
filter = get_exception_reporter_filter(request)
request_repr = filter.get_request_repr(request)
except Exception:
subject = '%s: %s' % (
record.levelname,
record.getMessage()
)
request = None
request_repr = "Request repr() unavailable."
subject = self.format_subject(subject)
if record.exc_info:
exc_info = record.exc_info
stack_trace = '\n'.join(traceback.format_exception(*record.exc_info))
else:
exc_info = (None, record.getMessage(), None)
stack_trace = 'No stack trace available'
message = "%s\n\n%s" % (stack_trace, request_repr)
reporter = ExceptionReporter(request, is_email=True, *exc_info)
html_message = reporter.get_traceback_html() if self.include_html else None
mail.mail_admins(subject, message, fail_silently=True,
html_message=html_message,
connection=self.connection())
def connection(self):
return get_connection(backend=self.email_backend, fail_silently=True)
def format_subject(self, subject):
"""
Escape CR and LF characters, and limit length.
RFC 2822's hard limit is 998 characters per line. So, minus "Subject: "
the actual subject must be no longer than 989 characters.
"""
formatted_subject = subject.replace('\n', '\\n').replace('\r', '\\r')
return formatted_subject[:989]
class CallbackFilter(logging.Filter):
"""
A logging filter that checks the return value of a given callable (which
takes the record-to-be-logged as its only parameter) to decide whether to
log a record.
"""
def __init__(self, callback):
self.callback = callback
def filter(self, record):
if self.callback(record):
return 1
return 0
class RequireDebugFalse(logging.Filter):
def filter(self, record):
return not settings.DEBUG
class RequireDebugTrue(logging.Filter):
def filter(self, record):
return settings.DEBUG
|
mit
|
837468220/python-for-android
|
python-modules/twisted/twisted/news/test/test_nntp.py
|
55
|
3417
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.news import database
from twisted.news import nntp
from twisted.protocols import loopback
ALL_GROUPS = ('alt.test.nntp', 0, 1, 'y'),
GROUP = ('0', '1', '0', 'alt.test.nntp', 'group', 'selected')
SUBSCRIPTIONS = ['alt.test.nntp', 'news.testgroup']
POST_STRING = """Path: not-for-mail
From: <[email protected]>
Subject: a test
Newsgroups: alt.test.nntp
Organization:
Summary:
Keywords:
User-Agent: tin/1.4.5-20010409 ("One More Nightmare") (UNIX) (Linux/2.4.17 (i686))
this is a test
.
..
...
lala
moo
--
"One World, one Web, one Program." - Microsoft(R) promotional ad
"Ein Volk, ein Reich, ein Fuhrer." - Adolf Hitler
--
10:56pm up 4 days, 4:42, 1 user, load average: 0.08, 0.08, 0.12
"""
class TestNNTPClient(nntp.NNTPClient):
def __init__(self):
nntp.NNTPClient.__init__(self)
def assertEquals(self, foo, bar):
if foo != bar: raise AssertionError("%r != %r!" % (foo, bar))
def connectionMade(self):
nntp.NNTPClient.connectionMade(self)
self.fetchSubscriptions()
def gotSubscriptions(self, subscriptions):
self.assertEquals(len(subscriptions), len(SUBSCRIPTIONS))
for s in subscriptions:
assert s in SUBSCRIPTIONS
self.fetchGroups()
def gotAllGroups(self, info):
self.assertEquals(len(info), len(ALL_GROUPS))
self.assertEquals(info[0], ALL_GROUPS[0])
self.fetchGroup('alt.test.nntp')
def getAllGroupsFailed(self, error):
raise AssertionError("fetchGroups() failed: %s" % (error,))
def gotGroup(self, info):
self.assertEquals(len(info), 6)
self.assertEquals(info, GROUP)
self.postArticle(POST_STRING)
def getSubscriptionsFailed(self, error):
raise AssertionError("fetchSubscriptions() failed: %s" % (error,))
def getGroupFailed(self, error):
raise AssertionError("fetchGroup() failed: %s" % (error,))
def postFailed(self, error):
raise AssertionError("postArticle() failed: %s" % (error,))
def postedOk(self):
self.fetchArticle(1)
def gotArticle(self, info):
origBody = POST_STRING.split('\n\n')[1]
newBody = info.split('\n\n', 1)[1]
self.assertEquals(origBody, newBody)
# We're done
self.transport.loseConnection()
def getArticleFailed(self, error):
raise AssertionError("fetchArticle() failed: %s" % (error,))
class NNTPTestCase(unittest.TestCase):
def setUp(self):
self.server = nntp.NNTPServer()
self.server.factory = self
self.backend = database.NewsShelf(None, 'news.db')
self.backend.addGroup('alt.test.nntp', 'y')
for s in SUBSCRIPTIONS:
self.backend.addSubscription(s)
self.client = TestNNTPClient()
def testLoopback(self):
return loopback.loopbackAsync(self.server, self.client)
# XXX This test is woefully incomplete. It tests the single
# most common code path and nothing else. Expand it and the
# test fairy will leave you a surprise.
# reactor.iterate(1) # fetchGroups()
# reactor.iterate(1) # fetchGroup()
# reactor.iterate(1) # postArticle()
|
apache-2.0
|
suyouxin/android_external_skia
|
make.py
|
81
|
5539
|
# Copyright 2011 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# "Makefile" replacement to build skia for Windows.
# More info at https://sites.google.com/site/skiadocs/
#
# Some usage examples:
# make clean
# make tests
# make bench BUILDTYPE=Release
# make gm GYP_DEFINES=skia_scalar=fixed BUILDTYPE=Release
# make all
import os
import shutil
import sys
BUILDTYPE = 'Debug'
# special targets
TARGET_ALL = 'all'
TARGET_CLEAN = 'clean'
TARGET_DEFAULT = 'most'
TARGET_GYP = 'gyp'
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
OUT_SUBDIR = os.environ.get('SKIA_OUT', 'out')
GYP_SUBDIR = 'gyp'
# Simple functions that report what they are doing, and exit(1) on failure.
def cd(path):
print '> cd %s' % path
if not os.path.isdir(path):
print 'directory %s does not exist' % path
sys.exit(1)
os.chdir(path)
def rmtree(path):
print '> rmtree %s' % path
shutil.rmtree(path, ignore_errors=True)
def runcommand(command):
print '> %s' % command
if os.system(command):
sys.exit(1)
def MakeClean():
"""Cross-platform "make clean" operation."""
cd(SCRIPT_DIR)
rmtree(OUT_SUBDIR)
def CheckWindowsEnvironment():
"""For Windows: check environment variables needed for command-line build.
If those environment variables are missing, try to set them.
If environment variables can be set up, this function returns; otherwise,
it displays an error message and exits.
"""
# If we already have the proper environment variables, nothing to do here.
try:
env_DevEnvDir = os.environ['DevEnvDir']
return # found it, so we are done
except KeyError:
pass # go on and run the rest of this function
print ('\nCould not find Visual Studio environment variables.'
'\nPerhaps you have not yet run vcvars32.bat as described at'
'\nhttp://msdn.microsoft.com/en-us/library/f2ccy3wt.aspx ?')
found_path = None
try:
possible_path = os.path.abspath(os.path.join(
os.environ['VS100COMNTOOLS'], os.path.pardir, os.path.pardir,
'VC', 'bin', 'vcvars32.bat'))
if os.path.exists(possible_path):
found_path = possible_path
except KeyError:
pass
if found_path:
print '\nIt looks like you can run that script at:\n%s' % found_path
else:
print '\nUnable to find vcvars32.bat on your system.'
sys.exit(1)
def MakeWindows(targets):
"""For Windows: build as appropriate for the command line arguments.
parameters:
targets: build targets as a list of strings
"""
if os.environ.get('CHROME_HEADLESS', '0') != '1':
# TODO(epoger): I'm not sure if this is needed for ninja builds.
CheckWindowsEnvironment()
# Run gyp_skia to prepare Visual Studio projects.
cd(SCRIPT_DIR)
runcommand('python gyp_skia')
# We already built the gypfiles...
while TARGET_GYP in targets:
targets.remove(TARGET_GYP)
# And call ninja to do the work!
if targets:
runcommand('ninja -C %s %s' % (
os.path.join(OUT_SUBDIR, BUILDTYPE), ' '.join(targets)))
def Make(args):
"""Main function.
parameters:
args: command line arguments as a list of strings
"""
# handle any variable-setting parameters or special targets
global BUILDTYPE
# if no targets were specified at all, make default target
if not args:
args = [TARGET_DEFAULT]
targets = []
for arg in args:
# If user requests "make all", chain to our explicitly-declared "everything"
# target. See https://code.google.com/p/skia/issues/detail?id=932 ("gyp
# automatically creates "all" target on some build flavors but not others")
if arg == TARGET_ALL:
targets.append('everything')
elif arg == TARGET_CLEAN:
MakeClean()
elif arg.startswith('BUILDTYPE='):
BUILDTYPE = arg[10:]
elif arg.startswith('GYP_DEFINES='):
os.environ['GYP_DEFINES'] = arg[12:]
else:
targets.append(arg)
# if there are no remaining targets, we're done
if not targets:
sys.exit(0)
# dispatch to appropriate Make<Platform>() variant.
if os.name == 'nt':
MakeWindows(targets)
sys.exit(0)
elif os.name == 'posix':
if sys.platform == 'darwin':
print 'Mac developers should not run this script; see ' \
'https://sites.google.com/site/skiadocs/user-documentation/quick-start-guides/mac'
sys.exit(1)
elif sys.platform == 'cygwin':
print 'Windows development on Cygwin is not currently supported; see ' \
'https://sites.google.com/site/skiadocs/user-documentation/quick-start-guides/windows'
sys.exit(1)
else:
print 'Unix developers should not run this script; see ' \
'https://sites.google.com/site/skiadocs/user-documentation/quick-start-guides/linux'
sys.exit(1)
else:
print 'unknown platform (os.name=%s, sys.platform=%s); see %s' % (
os.name, sys.platform, 'https://sites.google.com/site/skiadocs/')
sys.exit(1)
sys.exit(0)
# main()
Make(sys.argv[1:])
|
bsd-3-clause
|
javipalanca/Django-facebook
|
django_facebook/signals.py
|
27
|
1132
|
from django.dispatch import Signal
# Sent right after user is created
facebook_user_registered = Signal(providing_args=['user', 'facebook_data'])
# Sent after user is created, before profile is updated with data from Facebook
facebook_pre_update = Signal(
providing_args=['user', 'profile', 'facebook_data'])
facebook_post_update = Signal(
providing_args=['user', 'profile', 'facebook_data'])
# Sent after storing the friends from graph to db
facebook_post_store_friends = Signal(
providing_args=['user', 'friends', 'current_friends', 'inserted_friends'])
# Sent after storing the likes from graph to db
facebook_post_store_likes = Signal(
providing_args=['user', 'likes', 'current_likes', 'inserted_likes'])
# Some signals for compatibility with Django Registration
# A new user has registered.
user_registered = Signal(providing_args=['user', 'request'])
# A user has activated his or her account.
user_activated = Signal(providing_args=['user', 'request'])
# Run when the token extend finished
facebook_token_extend_finished = Signal(
providing_args=['user', 'profile', 'token_changed', 'old_token'])
|
bsd-3-clause
|
antmicro/distant-bes
|
distantbes/cli.py
|
1
|
2632
|
from distantbes import Invocation
from distantbes.enums import EXIT_CODES
from time import sleep
import argparse
H = [
"gRPC endpoint of the Build Event Service",
"gRPC endpoint of the Content Addressable Storage",
"force localhost in File message",
"build log file",
"one or more artifact files",
"build title/command",
"sleep N seconds before finishing the build",
"print available status codes and exit",
"status code of the build",
"test log file"
]
def main_func(args):
if args.print_status_codes:
for v in range(0, len(EXIT_CODES)):
print("{}\t{}".format(str(v), EXIT_CODES[v]))
return None
success = not bool(args.status_code)
i = Invocation(
grpc_bes_url=args.bes_backend,
grpc_cas_url=args.cas_backend,
command=args.command,
force_localhost_in_cas_msg=args.force_cas_localhost,
)
i.open()
if args.stdout_file is not None:
with open(args.stdout_file, "r") as f:
i.add_stdout(f.read())
if args.artifacts is not None or args.test_log_file is not None:
i.announce_target("default")
i.finalize_target_upload_files(i.targets[0], success=success, artifacts=args.artifacts)
if args.test_log_file is not None:
with open(args.test_log_file, "r") as f:
i.add_test_to_target(i.targets[0], logstr=f.read())
i.add_build_metrics(1,1,1,1,1)
if args.sleep is not None:
sleep(args.sleep)
i.close(status_code=args.status_code)
print(i.invocation_id)
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--bes-backend', type=str, default="localhost:1985", help=H[0])
parser.add_argument('--cas-backend', type=str, default="localhost:1985", help=H[1])
parser.add_argument('--force-cas-localhost', action="store_true", help=H[2])
parser.add_argument('--stdout-file', type=str, help=H[3])
parser.add_argument('--artifacts', nargs="+", help=H[4])
parser.add_argument('--test-log-file', type=str, help=H[9])
parser.add_argument('--command', type=str, default="distant-bes", help=H[5])
parser.add_argument('--sleep', type=int, help=H[6])
parser.add_argument('--print-status-codes', action="store_true", help=H[7])
parser.add_argument('--status_code', type=int, default=0, help=H[8])
parser.set_defaults(func=main_func)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
|
apache-2.0
|
wbond/subversion
|
build/generator/gen_win.py
|
2
|
60381
|
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
#
# gen_win.py -- base class for generating windows projects
#
import os
try:
# Python >=2.5
from hashlib import md5 as hashlib_md5
except ImportError:
# Python <2.5
from md5 import md5 as hashlib_md5
import sys
import fnmatch
import re
import subprocess
import glob
import string
import generator.swig.header_wrappers
import generator.swig.checkout_swig_header
import generator.swig.external_runtime
if sys.version_info[0] >= 3:
# Python >=3.0
from io import StringIO
else:
# Python <3.0
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import gen_base
import ezt
class GeneratorBase(gen_base.GeneratorBase):
"""This intermediate base class exists to be instantiated by win-tests.py,
in order to obtain information from build.conf and library paths without
actually doing any generation."""
_extension_map = {
('exe', 'target'): '.exe',
('exe', 'object'): '.obj',
('lib', 'target'): '.dll',
('lib', 'object'): '.obj',
('pyd', 'target'): '.pyd',
('pyd', 'object'): '.obj',
}
def parse_options(self, options):
self.apr_path = 'apr'
self.apr_util_path = 'apr-util'
self.apr_iconv_path = 'apr-iconv'
self.serf_path = None
self.serf_lib = None
self.bdb_path = 'db4-win32'
self.without_neon = False
self.neon_path = 'neon'
self.neon_ver = 25005
self.httpd_path = None
self.libintl_path = None
self.zlib_path = 'zlib'
self.openssl_path = None
self.jdk_path = None
self.junit_path = None
self.swig_path = None
self.vs_version = '2002'
self.sln_version = '7.00'
self.vcproj_version = '7.00'
self.vcproj_extension = '.vcproj'
self.sqlite_path = 'sqlite-amalgamation'
self.skip_sections = { 'mod_dav_svn': None,
'mod_authz_svn': None,
'mod_dontdothat' : None,
'libsvn_auth_kwallet': None,
'libsvn_auth_gnome_keyring': None }
# Instrumentation options
self.disable_shared = None
self.static_apr = None
self.instrument_apr_pools = None
self.instrument_purify_quantify = None
self.configure_apr_util = None
self.sasl_path = None
# NLS options
self.enable_nls = None
# ML (assembler) is disabled by default; use --enable-ml to detect
self.enable_ml = None
for opt, val in options:
if opt == '--with-berkeley-db':
self.bdb_path = val
elif opt == '--with-apr':
self.apr_path = val
elif opt == '--with-apr-util':
self.apr_util_path = val
elif opt == '--with-apr-iconv':
self.apr_iconv_path = val
elif opt == '--with-serf':
self.serf_path = val
elif opt == '--with-neon':
self.neon_path = val
elif opt == '--without-neon':
self.without_neon = True
elif opt == '--with-httpd':
self.httpd_path = val
del self.skip_sections['mod_dav_svn']
del self.skip_sections['mod_authz_svn']
del self.skip_sections['mod_dontdothat']
elif opt == '--with-libintl':
self.libintl_path = val
self.enable_nls = 1
elif opt == '--with-jdk':
self.jdk_path = val
elif opt == '--with-junit':
self.junit_path = val
elif opt == '--with-zlib':
self.zlib_path = val
elif opt == '--with-swig':
self.swig_path = val
elif opt == '--with-sqlite':
self.sqlite_path = val
elif opt == '--with-sasl':
self.sasl_path = val
elif opt == '--with-openssl':
self.openssl_path = val
elif opt == '--enable-purify':
self.instrument_purify_quantify = 1
self.instrument_apr_pools = 1
elif opt == '--enable-quantify':
self.instrument_purify_quantify = 1
elif opt == '--enable-pool-debug':
self.instrument_apr_pools = 1
elif opt == '--enable-nls':
self.enable_nls = 1
elif opt == '--enable-bdb-in-apr-util':
self.configure_apr_util = 1
elif opt == '--enable-ml':
self.enable_ml = 1
elif opt == '--disable-shared':
self.disable_shared = 1
elif opt == '--with-static-apr':
self.static_apr = 1
elif opt == '--vsnet-version':
if val == '2002' or re.match('7(\.\d+)?', val):
self.vs_version = '2002'
self.sln_version = '7.00'
self.vcproj_version = '7.00'
self.vcproj_extension = '.vcproj'
elif val == '2003' or re.match('8(\.\d+)?', val):
self.vs_version = '2003'
self.sln_version = '8.00'
self.vcproj_version = '7.10'
self.vcproj_extension = '.vcproj'
elif val == '2005' or re.match('9(\.\d+)?', val):
self.vs_version = '2005'
self.sln_version = '9.00'
self.vcproj_version = '8.00'
self.vcproj_extension = '.vcproj'
elif val == '2008' or re.match('10(\.\d+)?', val):
self.vs_version = '2008'
self.sln_version = '10.00'
self.vcproj_version = '9.00'
self.vcproj_extension = '.vcproj'
elif val == '2010':
self.vs_version = '2010'
self.sln_version = '11.00'
self.vcproj_version = '10.0'
self.vcproj_extension = '.vcxproj'
else:
print('WARNING: Unknown VS.NET version "%s",'
' assuming "%s"\n' % (val, '7.00'))
def __init__(self, fname, verfname, options):
# parse (and save) the options that were passed to us
self.parse_options(options)
# Initialize parent
gen_base.GeneratorBase.__init__(self, fname, verfname, options)
# Find Berkeley DB
self._find_bdb()
def _find_bdb(self):
"Find the Berkeley DB library and version"
for ver in ("48", "47", "46", "45", "44", "43", "42", "41", "40"):
lib = "libdb" + ver
path = os.path.join(self.bdb_path, "lib")
if os.path.exists(os.path.join(path, lib + ".lib")):
self.bdb_lib = lib
break
else:
self.bdb_lib = None
class WinGeneratorBase(GeneratorBase):
"Base class for all Windows project files generators"
def __init__(self, fname, verfname, options, subdir):
"""
Do some Windows specific setup
Build the list of Platforms & Configurations &
create the necessary paths
"""
# Initialize parent
GeneratorBase.__init__(self, fname, verfname, options)
if self.bdb_lib is not None:
print("Found %s.lib in %s\n" % (self.bdb_lib, self.bdb_path))
else:
print("BDB not found, BDB fs will not be built\n")
if subdir == 'vcnet-vcproj':
print('Generating for Visual Studio %s\n' % self.vs_version)
# Find the right Ruby include and libraries dirs and
# library name to link SWIG bindings with
self._find_ruby()
# Find the right Perl library name to link SWIG bindings with
self._find_perl()
# Find the right Python include and libraries dirs for SWIG bindings
self._find_python()
# Find the installed SWIG version to adjust swig options
self._find_swig()
# Find the installed Java Development Kit
self._find_jdk()
# Find APR and APR-util version
self._find_apr()
self._find_apr_util()
# Find Sqlite
self._find_sqlite()
# Look for ZLib and ML
if self.zlib_path:
self._find_zlib()
self._find_ml()
# Find neon version
if self.neon_path:
self._find_neon()
# Find serf and its dependencies
if self.serf_path:
self._find_serf()
#Make some files for the installer so that we don't need to
#require sed or some other command to do it
### GJS: don't do this right now
if 0:
buf = open(os.path.join("packages","win32-innosetup","svn.iss.in"), 'rb').read()
buf = buf.replace("@VERSION@", "0.16.1+").replace("@RELEASE@", "4365")
buf = buf.replace("@DBBINDLL@", self.dbbindll)
svnissrel = os.path.join("packages","win32-innosetup","svn.iss.release")
svnissdeb = os.path.join("packages","win32-innosetup","svn.iss.debug")
if self.write_file_if_changed(svnissrel, buf.replace("@CONFIG@", "Release")):
print('Wrote %s' % svnissrel)
if self.write_file_if_changed(svnissdeb, buf.replace("@CONFIG@", "Debug")):
print('Wrote %s' % svnissdeb)
#Make the project files directory if it doesn't exist
#TODO win32 might not be the best path as win64 stuff will go here too
self.projfilesdir=os.path.join("build","win32",subdir)
self.rootpath = self.find_rootpath()
if not os.path.exists(self.projfilesdir):
os.makedirs(self.projfilesdir)
# Generate the build_zlib.bat file
if self.zlib_path:
data = {'zlib_path': os.path.abspath(self.zlib_path),
'zlib_version': self.zlib_version,
'use_ml': self.have_ml and 1 or None}
bat = os.path.join(self.projfilesdir, 'build_zlib.bat')
self.write_with_template(bat, 'templates/build_zlib.ezt', data)
# Generate the build_locale.bat file
pofiles = []
if self.enable_nls:
for po in os.listdir(os.path.join('subversion', 'po')):
if fnmatch.fnmatch(po, '*.po'):
pofiles.append(POFile(po[:-3]))
data = {'pofiles': pofiles}
self.write_with_template(os.path.join(self.projfilesdir,
'build_locale.bat'),
'templates/build_locale.ezt', data)
#Here we can add additional platforms to compile for
self.platforms = ['Win32']
# VC 2002 and VC 2003 only allow a single platform per project file
if subdir == 'vcnet-vcproj':
if self.vcproj_version != '7.00' and self.vcproj_version != '7.10':
self.platforms = ['Win32','x64']
#Here we can add additional modes to compile for
self.configs = ['Debug','Release']
if self.swig_libdir:
# Generate SWIG header wrappers and external runtime
for swig in (generator.swig.header_wrappers,
generator.swig.checkout_swig_header,
generator.swig.external_runtime):
swig.Generator(self.conf, self.swig_exe).write()
else:
print("%s not found; skipping SWIG file generation..." % self.swig_exe)
def find_rootpath(self):
"Gets the root path as understand by the project system"
return ".." + "\\.." * self.projfilesdir.count(os.sep) + "\\"
def makeguid(self, data):
"Generate a windows style GUID"
### blah. this function can generate invalid GUIDs. leave it for now,
### but we need to fix it. we can wrap the apr UUID functions, or
### implement this from scratch using the algorithms described in
### http://www.webdav.org/specs/draft-leach-uuids-guids-01.txt
myhash = hashlib_md5(data).hexdigest()
guid = ("{%s-%s-%s-%s-%s}" % (myhash[0:8], myhash[8:12],
myhash[12:16], myhash[16:20],
myhash[20:32])).upper()
return guid
def path(self, *paths):
"""Convert build path to msvc path and prepend root"""
return self.rootpath + msvc_path_join(*list(map(msvc_path, paths)))
def apath(self, path, *paths):
"""Convert build path to msvc path and prepend root if not absolute"""
### On Unix, os.path.isabs won't do the right thing if "item"
### contains backslashes or drive letters
if os.path.isabs(path):
return msvc_path_join(msvc_path(path), *list(map(msvc_path, paths)))
else:
return self.rootpath + msvc_path_join(msvc_path(path),
*list(map(msvc_path, paths)))
def get_install_targets(self):
"Generate the list of targets"
# Get list of targets to generate project files for
install_targets = self.graph.get_all_sources(gen_base.DT_INSTALL) \
+ self.projects
# Don't create projects for scripts
install_targets = [x for x in install_targets if not isinstance(x, gen_base.TargetScript)]
# Drop the libsvn_fs_base target and tests if we don't have BDB
if not self.bdb_lib:
install_targets = [x for x in install_targets if x.name != 'libsvn_fs_base']
install_targets = [x for x in install_targets if not (isinstance(x, gen_base.TargetExe)
and x.install == 'bdb-test')]
# Drop the serf target if we don't have both serf and openssl
if not self.serf_lib:
install_targets = [x for x in install_targets if x.name != 'serf']
install_targets = [x for x in install_targets if x.name != 'libsvn_ra_serf']
if self.without_neon:
install_targets = [x for x in install_targets if x.name != 'neon']
install_targets = [x for x in install_targets if x.name != 'libsvn_ra_neon']
# Drop the swig targets if we don't have swig
if not self.swig_path and not self.swig_libdir:
install_targets = [x for x in install_targets
if not (isinstance(x, gen_base.TargetSWIG)
or isinstance(x, gen_base.TargetSWIGLib)
or isinstance(x, gen_base.TargetSWIGProject))]
dll_targets = []
for target in install_targets:
if isinstance(target, gen_base.TargetLib):
if target.msvc_fake:
install_targets.append(self.create_fake_target(target))
if target.msvc_export:
if self.disable_shared:
target.msvc_static = True
else:
dll_targets.append(self.create_dll_target(target))
install_targets.extend(dll_targets)
for target in install_targets:
target.project_guid = self.makeguid(target.name)
# sort these for output stability, to watch out for regressions.
install_targets.sort(key = lambda t: t.name)
return install_targets
def create_fake_target(self, dep):
"Return a new target which depends on another target but builds nothing"
section = gen_base.TargetProject.Section(gen_base.TargetProject,
dep.name + "_fake",
{'path': 'build/win32'}, self)
section.create_targets()
section.target.msvc_name = dep.msvc_name and dep.msvc_name + "_fake"
self.graph.add(gen_base.DT_LINK, section.target.name, dep)
dep.msvc_fake = section.target
return section.target
def create_dll_target(self, dep):
"Return a dynamic library that depends on a static library"
target = gen_base.TargetLib(dep.name,
{ 'path' : dep.path,
'msvc-name' : dep.name + "_dll" },
self)
target.msvc_export = dep.msvc_export
# move the description from the static library target to the dll.
target.desc = dep.desc
dep.desc = None
# The dependency should now be static.
dep.msvc_export = None
dep.msvc_static = True
# Remove the 'lib' prefix, so that the static library will be called
# svn_foo.lib
dep.name = dep.name[3:]
# However, its name should still be 'libsvn_foo' in Visual Studio
dep.msvc_name = target.name
# We renamed dep, so right now it has no dependencies. Because target has
# dep's old dependencies, transfer them over to dep.
deps = self.graph.deps[gen_base.DT_LINK]
deps[dep.name] = deps[target.name]
for key in deps.keys():
# Link everything except tests against the dll. Tests need to be linked
# against the static libraries because they sometimes access internal
# library functions.
if dep in deps[key] and key.find("test") == -1:
deps[key].remove(dep)
deps[key].append(target)
# The dll has exactly one dependency, the static library.
deps[target.name] = [ dep ]
return target
def get_configs(self, target):
"Get the list of configurations for the project"
configs = [ ]
for cfg in self.configs:
configs.append(
ProjectItem(name=cfg,
lower=cfg.lower(),
defines=self.get_win_defines(target, cfg),
libdirs=self.get_win_lib_dirs(target, cfg),
libs=self.get_win_libs(target, cfg),
))
return configs
def get_proj_sources(self, quote_path, target):
"Get the list of source files for each project"
sources = [ ]
javac_exe = "javac"
javah_exe = "javah"
jar_exe = "jar"
if self.jdk_path:
javac_exe = os.path.join(self.jdk_path, "bin", javac_exe)
javah_exe = os.path.join(self.jdk_path, "bin", javah_exe)
jar_exe = os.path.join(self.jdk_path, "bin", jar_exe)
if not isinstance(target, gen_base.TargetProject):
for source, object, reldir in self.get_win_sources(target):
cbuild = None
ctarget = None
cdesc = None
if isinstance(target, gen_base.TargetJavaHeaders):
classes = self.path(target.classes)
if self.junit_path is not None:
classes = "%s;%s" % (classes, self.junit_path)
headers = self.path(target.headers)
classname = target.package + "." + source.class_name
cbuild = "%s -verbose -force -classpath %s -d %s %s" \
% (self.quote(javah_exe), self.quote(classes),
self.quote(headers), classname)
ctarget = self.path(object.filename_win)
cdesc = "Generating %s" % (object.filename_win)
elif isinstance(target, gen_base.TargetJavaClasses):
classes = targetdir = self.path(target.classes)
if self.junit_path is not None:
classes = "%s;%s" % (classes, self.junit_path)
sourcepath = self.path(source.sourcepath)
cbuild = "%s -g -target 1.5 -source 1.5 -classpath %s -d %s " \
"-sourcepath %s $(InputPath)" \
% tuple(map(self.quote, (javac_exe, classes,
targetdir, sourcepath)))
ctarget = self.path(object.filename)
cdesc = "Compiling %s" % (source)
rsrc = self.path(str(source))
if quote_path and '-' in rsrc:
rsrc = '"%s"' % rsrc
sources.append(ProjectItem(path=rsrc, reldir=reldir, user_deps=[],
custom_build=cbuild, custom_target=ctarget,
custom_desc=cdesc,
extension=os.path.splitext(rsrc)[1]))
if isinstance(target, gen_base.TargetJavaClasses) and target.jar:
classdir = self.path(target.classes)
jarfile = msvc_path_join(classdir, target.jar)
cbuild = "%s cf %s -C %s %s" \
% (self.quote(jar_exe), jarfile, classdir,
" ".join(target.packages))
deps = [x.custom_target for x in sources]
sources.append(ProjectItem(path='makejar', reldir='', user_deps=deps,
custom_build=cbuild, custom_target=jarfile,
extension=''))
if isinstance(target, gen_base.TargetSWIG):
swig_options = self.swig.opts[target.lang].split()
swig_options.append('-DWIN32')
swig_deps = []
for include_dir in self.get_win_includes(target):
swig_options.append("-I%s" % self.quote(include_dir))
for obj in self.graph.get_sources(gen_base.DT_LINK, target.name):
if isinstance(obj, gen_base.SWIGObject):
for cobj in self.graph.get_sources(gen_base.DT_OBJECT, obj):
if isinstance(cobj, gen_base.SWIGObject):
csrc = self.path(cobj.filename)
cout = csrc
# included header files that the generated c file depends on
user_deps = swig_deps[:]
for iobj in self.graph.get_sources(gen_base.DT_SWIG_C, cobj):
isrc = self.path(str(iobj))
if not isinstance(iobj, gen_base.SWIGSource):
user_deps.append(isrc)
continue
cbuild = '%s %s -o %s $(InputPath)' \
% (self.swig_exe, " ".join(swig_options), cout)
cdesc = 'Generating %s' % cout
sources.append(ProjectItem(path=isrc, reldir=None,
custom_build=cbuild,
custom_target=csrc,
custom_desc=cdesc,
user_deps=user_deps,
extension=''))
def_file = self.get_def_file(target)
if def_file is not None:
gsrc = self.path("build/generator/extractor.py")
deps = [self.path('build.conf')]
for header in target.msvc_export:
deps.append(self.path('subversion/include', header))
cbuild = "%s $(InputPath) %s > %s" \
% (self.quote(sys.executable), " ".join(deps), def_file)
cdesc = 'Generating %s ' % def_file
sources.append(ProjectItem(path=gsrc, reldir=None,
custom_build=cbuild,
custom_target=def_file,
custom_desc=cdesc,
user_deps=deps,
extension=''))
sources.append(ProjectItem(path=def_file, reldir=None,
custom_build=None, user_deps=[],
extension=''))
sources.sort(key = lambda x: x.path)
return sources
def get_output_name(self, target):
if isinstance(target, gen_base.TargetExe):
return target.name + '.exe'
elif isinstance(target, gen_base.TargetJava):
### This target file is not actually built, but we need it to keep
### the VC Express build happy.
return target.name
elif isinstance(target, gen_base.TargetApacheMod):
return target.name + '.so'
elif isinstance(target, gen_base.TargetLib):
if target.msvc_static:
return '%s-%d.lib' % (target.name, self.version)
else:
return os.path.basename(target.filename)
elif isinstance(target, gen_base.TargetProject):
### Since this target type doesn't produce any output, we shouldn't
### need to specify an output filename. But to keep the VC.NET template
### happy for now we have to return something
return target.name + '.exe'
elif isinstance(target, gen_base.TargetI18N):
return target.name
def get_output_pdb(self, target):
name = self.get_output_name(target)
name = os.path.splitext(name)
return name[0] + '.pdb'
def get_output_dir(self, target):
if isinstance(target, gen_base.TargetJavaHeaders):
return msvc_path("../" + target.headers)
elif isinstance(target, gen_base.TargetJavaClasses):
return msvc_path("../" + target.classes)
else:
return msvc_path(target.path)
def get_intermediate_dir(self, target):
if isinstance(target, gen_base.TargetSWIG):
return msvc_path_join(msvc_path(target.path), target.name)
else:
return self.get_output_dir(target)
def get_def_file(self, target):
if isinstance(target, gen_base.TargetLib) and target.msvc_export \
and not self.disable_shared:
return target.name + ".def"
return None
def gen_proj_names(self, install_targets):
"Generate project file names for the targets"
# Generate project file names for the targets: replace dashes with
# underscores and replace *-test with test_* (so that the test
# programs are visually separare from the rest of the projects)
for target in install_targets:
if target.msvc_name:
target.proj_name = target.msvc_name
continue
name = target.name
pos = name.find('-test')
if pos >= 0:
proj_name = 'test_' + name[:pos].replace('-', '_')
elif isinstance(target, gen_base.TargetSWIG):
proj_name = 'swig_' + name.replace('-', '_')
else:
proj_name = name.replace('-', '_')
target.proj_name = proj_name
def get_external_project(self, target, proj_ext):
if not ((isinstance(target, gen_base.TargetLinked)
or isinstance(target, gen_base.TargetI18N))
and target.external_project):
return None
if target.external_project[:5] == 'neon/':
path = self.neon_path + target.external_project[4:]
elif target.external_project[:5] == 'serf/' and self.serf_lib:
path = self.serf_path + target.external_project[4:]
elif target.external_project.find('/') != -1:
path = target.external_project
else:
path = os.path.join(self.projfilesdir, target.external_project)
return "%s.%s" % (gen_base.native_path(path), proj_ext)
def adjust_win_depends(self, target, name):
"Handle special dependencies if needed"
if name == '__CONFIG__':
depends = []
else:
depends = self.sections['__CONFIG__'].get_dep_targets(target)
depends.extend(self.get_win_depends(target, FILTER_PROJECTS))
# Make the default target generate the .mo files, too
if self.enable_nls and name == '__ALL__':
depends.extend(self.sections['locale'].get_targets())
# Build ZLib as a dependency of Neon or Serf if we have it
if self.zlib_path and (name == 'neon' or name == 'serf'):
depends.extend(self.sections['zlib'].get_targets())
# To set the correct build order of the JavaHL targets, the javahl-javah
# and libsvnjavahl targets are defined with extra dependencies in build.conf
# like this:
# add-deps = $(javahl_javah_DEPS) $(javahl_java_DEPS)
#
# This section parses those dependencies and adds them to the dependency list
# for this target.
if name.startswith('javahl') or name == 'libsvnjavahl':
for dep in re.findall('\$\(([^\)]*)_DEPS\)', target.add_deps):
dep = dep.replace('_', '-')
depends.extend(self.sections[dep].get_targets())
return depends
def get_win_depends(self, target, mode):
"""Return the list of dependencies for target"""
dep_dict = {}
if isinstance(target, gen_base.TargetLib) and target.msvc_static:
self.get_static_win_depends(target, dep_dict)
else:
self.get_linked_win_depends(target, dep_dict)
deps = []
if mode == FILTER_PROJECTS:
for dep, (is_proj, is_lib, is_static) in dep_dict.items():
if is_proj:
deps.append(dep)
elif mode == FILTER_LIBS:
for dep, (is_proj, is_lib, is_static) in dep_dict.items():
if is_static or (is_lib and not is_proj):
deps.append(dep)
else:
raise NotImplementedError
deps.sort(key = lambda d: d.name)
return deps
def get_direct_depends(self, target):
"""Read target dependencies from graph
return value is list of (dependency, (is_project, is_lib, is_static)) tuples
"""
deps = []
for dep in self.graph.get_sources(gen_base.DT_LINK, target.name):
if not isinstance(dep, gen_base.Target):
continue
is_project = hasattr(dep, 'proj_name')
is_lib = isinstance(dep, gen_base.TargetLib)
is_static = is_lib and dep.msvc_static
deps.append((dep, (is_project, is_lib, is_static)))
for dep in self.graph.get_sources(gen_base.DT_NONLIB, target.name):
is_project = hasattr(dep, 'proj_name')
is_lib = isinstance(dep, gen_base.TargetLib)
is_static = is_lib and dep.msvc_static
deps.append((dep, (is_project, is_lib, is_static)))
return deps
def get_static_win_depends(self, target, deps):
"""Find project dependencies for a static library project"""
for dep, dep_kind in self.get_direct_depends(target):
is_proj, is_lib, is_static = dep_kind
# recurse for projectless targets
if not is_proj:
self.get_static_win_depends(dep, deps)
# Only add project dependencies on non-library projects. If we added
# project dependencies on libraries, MSVC would copy those libraries
# into the static archive. This would waste space and lead to linker
# warnings about multiply defined symbols. Instead, the library
# dependencies get added to any DLLs or EXEs that depend on this static
# library (see get_linked_win_depends() implementation).
if not is_lib:
deps[dep] = dep_kind
# a static library can depend on another library through a fake project
elif dep.msvc_fake:
deps[dep.msvc_fake] = dep_kind
def get_linked_win_depends(self, target, deps, static_recurse=0):
"""Find project dependencies for a DLL or EXE project"""
direct_deps = self.get_direct_depends(target)
for dep, dep_kind in direct_deps:
is_proj, is_lib, is_static = dep_kind
# add all top level dependencies
if not static_recurse or is_lib:
# We need to guard against linking both a static and a dynamic library
# into a project (this is mainly a concern for tests). To do this, for
# every dll dependency we first check to see if its corresponding
# static library is already in the list of dependencies. If it is,
# we don't add the dll to the list.
if is_lib and dep.msvc_export and not self.disable_shared:
static_dep = self.graph.get_sources(gen_base.DT_LINK, dep.name)[0]
if static_dep in deps:
continue
deps[dep] = dep_kind
# add any libraries that static library dependencies depend on
for dep, dep_kind in direct_deps:
is_proj, is_lib, is_static = dep_kind
# recurse for projectless dependencies
if not is_proj:
self.get_linked_win_depends(dep, deps, 0)
# also recurse into static library dependencies
elif is_static:
self.get_linked_win_depends(dep, deps, 1)
def get_win_defines(self, target, cfg):
"Return the list of defines for target"
fakedefines = ["WIN32","_WINDOWS","alloca=_alloca",
"_CRT_SECURE_NO_DEPRECATE=",
"_CRT_NONSTDC_NO_DEPRECATE=",
"_CRT_SECURE_NO_WARNINGS="]
if self.sqlite_inline:
fakedefines.append("SVN_SQLITE_INLINE")
if isinstance(target, gen_base.TargetApacheMod):
if target.name == 'mod_dav_svn':
fakedefines.extend(["AP_DECLARE_EXPORT"])
if target.name.find('ruby') == -1:
fakedefines.append("snprintf=_snprintf")
if isinstance(target, gen_base.TargetSWIG):
fakedefines.append("SWIG_GLOBAL")
# Expect rb_errinfo() to be avilable in Ruby 1.9+,
# rather than ruby_errinfo.
if (self.ruby_major_version > 1 or self.ruby_minor_version > 8):
fakedefines.extend(["HAVE_RB_ERRINFO"])
if cfg == 'Debug':
fakedefines.extend(["_DEBUG","SVN_DEBUG"])
elif cfg == 'Release':
fakedefines.append("NDEBUG")
if self.static_apr:
fakedefines.extend(["APR_DECLARE_STATIC", "APU_DECLARE_STATIC"])
# XXX: Check if db is present, and if so, let apr-util know
# XXX: This is a hack until the apr build system is improved to
# XXX: know these things for itself.
if self.bdb_lib:
fakedefines.append("APU_HAVE_DB=1")
fakedefines.append("SVN_LIBSVN_FS_LINKS_FS_BASE=1")
# check if they wanted nls
if self.enable_nls:
fakedefines.append("ENABLE_NLS")
# check for neon 0.26.x or newer
if self.neon_ver >= 26000:
fakedefines.append("SVN_NEON_0_26=1")
# check for neon 0.27.x or newer
if self.neon_ver >= 27000:
fakedefines.append("SVN_NEON_0_27=1")
# check for neon 0.28.x or newer
if self.neon_ver >= 28000:
fakedefines.append("SVN_NEON_0_28=1")
if self.serf_lib:
fakedefines.append("SVN_HAVE_SERF")
fakedefines.append("SVN_LIBSVN_CLIENT_LINKS_RA_SERF")
if self.neon_lib:
fakedefines.append("SVN_HAVE_NEON")
fakedefines.append("SVN_LIBSVN_CLIENT_LINKS_RA_NEON")
# check we have sasl
if self.sasl_path:
fakedefines.append("SVN_HAVE_SASL")
if target.name.endswith('svn_subr'):
fakedefines.append("SVN_USE_WIN32_CRASHHANDLER")
# use static linking to Expat
fakedefines.append("XML_STATIC")
return fakedefines
def get_win_includes(self, target):
"Return the list of include directories for target"
fakeincludes = [ self.path("subversion/include"),
self.path("subversion"),
self.apath(self.apr_path, "include"),
self.apath(self.apr_util_path, "include") ]
if target.name == 'mod_authz_svn':
fakeincludes.extend([ self.apath(self.httpd_path, "modules/aaa") ])
if isinstance(target, gen_base.TargetApacheMod):
fakeincludes.extend([ self.apath(self.apr_util_path, "xml/expat/lib"),
self.apath(self.httpd_path, "include"),
self.apath(self.bdb_path, "include") ])
elif isinstance(target, gen_base.TargetSWIG):
util_includes = "subversion/bindings/swig/%s/libsvn_swig_%s" \
% (target.lang,
gen_base.lang_utillib_suffix[target.lang])
fakeincludes.extend([ self.path("subversion/bindings/swig"),
self.path("subversion/bindings/swig/proxy"),
self.path("subversion/bindings/swig/include"),
self.path(util_includes) ])
else:
fakeincludes.extend([ self.apath(self.apr_util_path, "xml/expat/lib"),
self.apath(self.neon_path, "src"),
self.path("subversion/bindings/swig/proxy"),
self.apath(self.bdb_path, "include") ])
if self.libintl_path:
fakeincludes.append(self.apath(self.libintl_path, 'inc'))
if self.serf_lib:
fakeincludes.append(self.apath(self.serf_path))
if self.swig_libdir \
and (isinstance(target, gen_base.TargetSWIG)
or isinstance(target, gen_base.TargetSWIGLib)):
if self.swig_vernum >= 103028:
fakeincludes.append(self.apath(self.swig_libdir, target.lang))
if target.lang == 'perl':
# At least swigwin 1.3.38+ uses perl5 as directory name. Just add it
# to the list to make sure we don't break old versions
fakeincludes.append(self.apath(self.swig_libdir, 'perl5'))
else:
fakeincludes.append(self.swig_libdir)
if target.lang == "perl":
fakeincludes.extend(self.perl_includes)
if target.lang == "python":
fakeincludes.extend(self.python_includes)
if target.lang == "ruby":
fakeincludes.extend(self.ruby_includes)
fakeincludes.append(self.apath(self.zlib_path))
if self.sqlite_inline:
fakeincludes.append(self.apath(self.sqlite_path))
else:
fakeincludes.append(self.apath(self.sqlite_path, 'inc'))
if self.sasl_path:
fakeincludes.append(self.apath(self.sasl_path, 'include'))
if target.name == "libsvnjavahl" and self.jdk_path:
fakeincludes.append(os.path.join(self.jdk_path, 'include'))
fakeincludes.append(os.path.join(self.jdk_path, 'include', 'win32'))
return fakeincludes
def get_win_lib_dirs(self, target, cfg):
"Return the list of library directories for target"
libcfg = cfg.replace("Debug", "LibD").replace("Release", "LibR")
fakelibdirs = [ self.apath(self.bdb_path, "lib"),
self.apath(self.neon_path),
self.apath(self.zlib_path),
]
if not self.sqlite_inline:
fakelibdirs.append(self.apath(self.sqlite_path, "lib"))
if self.sasl_path:
fakelibdirs.append(self.apath(self.sasl_path, "lib"))
if self.serf_lib:
fakelibdirs.append(self.apath(msvc_path_join(self.serf_path, cfg)))
fakelibdirs.append(self.apath(self.apr_path, cfg))
fakelibdirs.append(self.apath(self.apr_util_path, cfg))
fakelibdirs.append(self.apath(self.apr_util_path, 'xml', 'expat',
'lib', libcfg))
if isinstance(target, gen_base.TargetApacheMod):
fakelibdirs.append(self.apath(self.httpd_path, cfg))
if target.name == 'mod_dav_svn':
fakelibdirs.append(self.apath(self.httpd_path, "modules/dav/main",
cfg))
if self.swig_libdir \
and (isinstance(target, gen_base.TargetSWIG)
or isinstance(target, gen_base.TargetSWIGLib)):
if target.lang == "perl" and self.perl_libdir:
fakelibdirs.append(self.perl_libdir)
if target.lang == "python" and self.python_libdir:
fakelibdirs.append(self.python_libdir)
if target.lang == "ruby" and self.ruby_libdir:
fakelibdirs.append(self.ruby_libdir)
return fakelibdirs
def get_win_libs(self, target, cfg):
"Return the list of external libraries needed for target"
dblib = None
if self.bdb_lib:
dblib = self.bdb_lib+(cfg == 'Debug' and 'd.lib' or '.lib')
if self.neon_lib:
neonlib = self.neon_lib+(cfg == 'Debug' and 'd.lib' or '.lib')
if self.serf_lib:
if self.serf_ver_maj == 1:
serflib = 'serf-1.lib'
else:
serflib = 'serf.lib'
zlib = (cfg == 'Debug' and 'zlibstatD.lib' or 'zlibstat.lib')
sasllib = None
if self.sasl_path:
sasllib = 'libsasl.lib'
if not isinstance(target, gen_base.TargetLinked):
return []
if isinstance(target, gen_base.TargetLib) and target.msvc_static:
return []
nondeplibs = target.msvc_libs[:]
nondeplibs.append(zlib)
if self.enable_nls:
if self.libintl_path:
nondeplibs.append(self.apath(self.libintl_path,
'lib', 'intl3_svn.lib'))
else:
nondeplibs.append('intl3_svn.lib')
if isinstance(target, gen_base.TargetExe):
nondeplibs.append('setargv.obj')
if ((isinstance(target, gen_base.TargetSWIG)
or isinstance(target, gen_base.TargetSWIGLib))
and target.lang == 'perl'):
nondeplibs.append(self.perl_lib)
if ((isinstance(target, gen_base.TargetSWIG)
or isinstance(target, gen_base.TargetSWIGLib))
and target.lang == 'ruby'):
nondeplibs.append(self.ruby_lib)
for dep in self.get_win_depends(target, FILTER_LIBS):
nondeplibs.extend(dep.msvc_libs)
if dep.external_lib == '$(SVN_DB_LIBS)':
nondeplibs.append(dblib)
if dep.external_lib == '$(SVN_SQLITE_LIBS)' and not self.sqlite_inline:
nondeplibs.append('sqlite3.lib')
if self.neon_lib and dep.external_lib == '$(NEON_LIBS)':
nondeplibs.append(neonlib)
if self.serf_lib and dep.external_lib == '$(SVN_SERF_LIBS)':
nondeplibs.append(serflib)
if dep.external_lib == '$(SVN_SASL_LIBS)':
nondeplibs.append(sasllib)
if dep.external_lib == '$(SVN_APR_LIBS)':
nondeplibs.append(self.apr_lib)
if dep.external_lib == '$(SVN_APRUTIL_LIBS)':
nondeplibs.append(self.aprutil_lib)
if dep.external_lib == '$(SVN_XML_LIBS)':
nondeplibs.append('xml.lib')
return gen_base.unique(nondeplibs)
def get_win_sources(self, target, reldir_prefix=''):
"Return the list of source files that need to be compliled for target"
sources = { }
for obj in self.graph.get_sources(gen_base.DT_LINK, target.name):
if isinstance(obj, gen_base.Target):
continue
for src in self.graph.get_sources(gen_base.DT_OBJECT, obj):
if isinstance(src, gen_base.SourceFile):
if reldir_prefix:
if src.reldir:
reldir = reldir_prefix + '\\' + src.reldir
else:
reldir = reldir_prefix
else:
reldir = src.reldir
else:
reldir = ''
sources[src] = src, obj, reldir
return list(sources.values())
def write_file_if_changed(self, fname, new_contents):
"""Rewrite the file if new_contents are different than its current content.
If you have your windows projects open and generate the projects
it's not a small thing for windows to re-read all projects so
only update those that have changed.
"""
try:
old_contents = open(fname, 'rb').read()
except IOError:
old_contents = None
if old_contents != new_contents:
open(fname, 'wb').write(new_contents)
print("Wrote: %s" % fname)
def write_with_template(self, fname, tname, data):
fout = StringIO()
template = ezt.Template(compress_whitespace = 0)
template.parse_file(os.path.join('build', 'generator', tname))
template.generate(fout, data)
self.write_file_if_changed(fname, fout.getvalue())
def write_zlib_project_file(self, name):
if not self.zlib_path:
return
zlib_path = os.path.abspath(self.zlib_path)
self.move_proj_file(self.projfilesdir, name,
(('zlib_path', zlib_path),
('zlib_sources',
glob.glob(os.path.join(zlib_path, '*.c'))
+ glob.glob(os.path.join(zlib_path,
'contrib/masmx86/*.c'))
+ glob.glob(os.path.join(zlib_path,
'contrib/masmx86/*.asm'))),
('zlib_headers',
glob.glob(os.path.join(zlib_path, '*.h'))),
('zlib_version', self.zlib_version),
('project_guid', self.makeguid('zlib')),
('use_ml', self.have_ml and 1 or None),
))
def write_neon_project_file(self, name):
if self.without_neon:
return
neon_path = os.path.abspath(self.neon_path)
self.move_proj_file(self.neon_path, name,
(('neon_sources',
glob.glob(os.path.join(neon_path, 'src', '*.c'))),
('neon_headers',
glob.glob(os.path.join(neon_path, 'src', '*.h'))),
('expat_path',
os.path.join(os.path.abspath(self.apr_util_path),
'xml', 'expat', 'lib')),
('zlib_path', self.zlib_path
and os.path.abspath(self.zlib_path)),
('openssl_path',
self.openssl_path
and os.path.abspath(self.openssl_path)),
('project_guid', self.makeguid('neon')),
))
def write_serf_project_file(self, name):
if not self.serf_lib:
return
serf_path = os.path.abspath(self.serf_path)
if self.serf_ver_maj == 1:
serflib = 'serf-1.lib'
else:
serflib = 'serf.lib'
self.move_proj_file(self.serf_path, name,
(('serf_sources',
glob.glob(os.path.join(serf_path, '*.c'))
+ glob.glob(os.path.join(serf_path, 'auth', '*.c'))
+ glob.glob(os.path.join(serf_path, 'buckets',
'*.c'))),
('serf_headers',
glob.glob(os.path.join(serf_path, '*.h'))
+ glob.glob(os.path.join(serf_path, 'auth', '*.h'))
+ glob.glob(os.path.join(serf_path, 'buckets',
'*.h'))),
('zlib_path', self.zlib_path
and os.path.abspath(self.zlib_path)),
('openssl_path',
self.openssl_path
and os.path.abspath(self.openssl_path)),
('apr_path', os.path.abspath(self.apr_path)),
('apr_util_path', os.path.abspath(self.apr_util_path)),
('project_guid', self.makeguid('serf')),
('apr_static', self.static_apr),
('serf_lib', serflib),
))
def move_proj_file(self, path, name, params=()):
### Move our slightly templatized pre-built project files into place --
### these projects include zlib, neon, serf, locale, config, etc.
dest_file = os.path.join(path, name)
source_template = os.path.join('templates', name + '.ezt')
data = {
'version' : self.vcproj_version,
'configs' : self.configs,
'platforms' : self.platforms
}
for key, val in params:
data[key] = val
self.write_with_template(dest_file, source_template, data)
def write(self):
"Override me when creating a new project type"
raise NotImplementedError
def _find_perl(self):
"Find the right perl library name to link swig bindings with"
self.perl_includes = []
self.perl_libdir = None
fp = os.popen('perl -MConfig -e ' + escape_shell_arg(
'print "$Config{PERL_REVISION}$Config{PERL_VERSION}"'), 'r')
try:
line = fp.readline()
if line:
msg = 'Found installed perl version number.'
self.perl_lib = 'perl' + line.rstrip() + '.lib'
else:
msg = 'Could not detect perl version.'
self.perl_lib = 'perl56.lib'
print('%s\n Perl bindings will be linked with %s\n'
% (msg, self.perl_lib))
finally:
fp.close()
fp = os.popen('perl -MConfig -e ' + escape_shell_arg(
'print $Config{archlib}'), 'r')
try:
line = fp.readline()
if line:
self.perl_libdir = os.path.join(line, 'CORE')
self.perl_includes = [os.path.join(line, 'CORE')]
finally:
fp.close()
def _find_ruby(self):
"Find the right Ruby library name to link swig bindings with"
self.ruby_includes = []
self.ruby_libdir = None
self.ruby_version = None
self.ruby_major_version = None
self.ruby_minor_version = None
proc = os.popen('ruby -rrbconfig -e ' + escape_shell_arg(
"puts Config::CONFIG['ruby_version'];"
"puts Config::CONFIG['LIBRUBY'];"
"puts Config::CONFIG['archdir'];"
"puts Config::CONFIG['libdir'];"), 'r')
try:
rubyver = proc.readline()[:-1]
if rubyver:
self.ruby_version = rubyver
self.ruby_major_version = string.atoi(self.ruby_version[0])
self.ruby_minor_version = string.atoi(self.ruby_version[2])
libruby = proc.readline()[:-1]
if libruby:
msg = 'Found installed ruby %s' % rubyver
self.ruby_lib = libruby
self.ruby_includes.append(proc.readline()[:-1])
self.ruby_libdir = proc.readline()[:-1]
else:
msg = 'Could not detect Ruby version, assuming 1.8.'
self.ruby_version = "1.8"
self.ruby_major_version = 1
self.ruby_minor_version = 8
self.ruby_lib = 'msvcrt-ruby18.lib'
print('%s\n Ruby bindings will be linked with %s\n'
% (msg, self.ruby_lib))
finally:
proc.close()
def _find_python(self):
"Find the appropriate options for creating SWIG-based Python modules"
self.python_includes = []
self.python_libdir = ""
try:
from distutils import sysconfig
inc = sysconfig.get_python_inc()
plat = sysconfig.get_python_inc(plat_specific=1)
self.python_includes.append(inc)
if inc != plat:
self.python_includes.append(plat)
self.python_libdir = self.apath(sysconfig.PREFIX, "libs")
except ImportError:
pass
def _find_jdk(self):
if not self.jdk_path:
jdk_ver = None
try:
try:
# Python >=3.0
import winreg
except ImportError:
# Python <3.0
import _winreg as winreg
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
r"SOFTWARE\JavaSoft\Java Development Kit")
# Find the newest JDK version.
num_values = winreg.QueryInfoKey(key)[1]
for i in range(num_values):
(name, value, key_type) = winreg.EnumValue(key, i)
if name == "CurrentVersion":
jdk_ver = value
break
# Find the JDK path.
if jdk_ver is not None:
key = winreg.OpenKey(key, jdk_ver)
num_values = winreg.QueryInfoKey(key)[1]
for i in range(num_values):
(name, value, key_type) = winreg.EnumValue(key, i)
if name == "JavaHome":
self.jdk_path = value
break
winreg.CloseKey(key)
except (ImportError, EnvironmentError):
pass
if self.jdk_path:
print("Found JDK version %s in %s\n" % (jdk_ver, self.jdk_path))
else:
print("Using JDK in %s\n" % (self.jdk_path))
def _find_swig(self):
# Require 1.3.24. If not found, assume 1.3.25.
default_version = '1.3.25'
minimum_version = '1.3.24'
vernum = 103025
minimum_vernum = 103024
libdir = ''
if self.swig_path is not None:
self.swig_exe = os.path.abspath(os.path.join(self.swig_path, 'swig'))
else:
self.swig_exe = 'swig'
try:
outfp = subprocess.Popen([self.swig_exe, '-version'], stdout=subprocess.PIPE, universal_newlines=True).stdout
txt = outfp.read()
if txt:
vermatch = re.compile(r'^SWIG\ Version\ (\d+)\.(\d+)\.(\d+)$', re.M) \
.search(txt)
else:
vermatch = None
if vermatch:
version = tuple(map(int, vermatch.groups()))
# build/ac-macros/swig.m4 explains the next incantation
vernum = int('%d%02d%03d' % version)
print('Found installed SWIG version %d.%d.%d\n' % version)
if vernum < minimum_vernum:
print('WARNING: Subversion requires version %s\n'
% minimum_version)
libdir = self._find_swig_libdir()
else:
print('Could not find installed SWIG,'
' assuming version %s\n' % default_version)
self.swig_libdir = ''
outfp.close()
except OSError:
print('Could not find installed SWIG,'
' assuming version %s\n' % default_version)
self.swig_libdir = ''
self.swig_vernum = vernum
self.swig_libdir = libdir
def _find_swig_libdir(self):
fp = os.popen(self.swig_exe + ' -swiglib', 'r')
try:
libdir = fp.readline().rstrip()
if libdir:
print('Using SWIG library directory %s\n' % libdir)
return libdir
else:
print('WARNING: could not find SWIG library directory\n')
finally:
fp.close()
return ''
def _find_ml(self):
"Check if the ML assembler is in the path"
if not self.enable_ml:
self.have_ml = 0
return
fp = os.popen('ml /help', 'r')
try:
line = fp.readline()
if line:
msg = 'Found ML, ZLib build will use ASM sources'
self.have_ml = 1
else:
msg = 'Could not find ML, ZLib build will not use ASM sources'
self.have_ml = 0
print('%s\n' % (msg,))
finally:
fp.close()
def _find_neon(self):
"Find the neon version"
msg = 'WARNING: Unable to determine neon version\n'
if self.without_neon:
self.neon_lib = None
msg = 'Not attempting to find neon\n'
else:
try:
self.neon_lib = "libneon"
fp = open(os.path.join(self.neon_path, '.version'))
txt = fp.read()
vermatch = re.compile(r'(\d+)\.(\d+)\.(\d+)$', re.M) \
.search(txt)
if vermatch:
version = tuple(map(int, vermatch.groups()))
# build/ac-macros/swig.m4 explains the next incantation
self.neon_ver = int('%d%02d%03d' % version)
msg = 'Found neon version %d.%d.%d\n' % version
if self.neon_ver < 25005:
msg = 'WARNING: Neon version 0.25.5 or higher is required'
except:
msg = 'WARNING: Error while determining neon version\n'
self.neon_lib = None
print(msg)
def _get_serf_version(self):
"Retrieves the serf version from serf.h"
# shouldn't be called unless serf is there
assert self.serf_path and os.path.exists(self.serf_path)
self.serf_ver_maj = None
self.serf_ver_min = None
self.serf_ver_patch = None
# serf.h should be present
if not os.path.exists(os.path.join(self.serf_path, 'serf.h')):
return None, None, None
txt = open(os.path.join(self.serf_path, 'serf.h')).read()
maj_match = re.search(r'SERF_MAJOR_VERSION\s+(\d+)', txt)
min_match = re.search(r'SERF_MINOR_VERSION\s+(\d+)', txt)
patch_match = re.search(r'SERF_PATCH_VERSION\s+(\d+)', txt)
if maj_match:
self.serf_ver_maj = int(maj_match.group(1))
if min_match:
self.serf_ver_min = int(min_match.group(1))
if patch_match:
self.serf_ver_patch = int(patch_match.group(1))
return self.serf_ver_maj, self.serf_ver_min, self.serf_ver_patch
def _find_serf(self):
"Check if serf and its dependencies are available"
minimal_serf_version = (0, 3, 0)
self.serf_lib = None
if self.serf_path and os.path.exists(self.serf_path):
if self.openssl_path and os.path.exists(self.openssl_path):
self.serf_lib = 'serf'
version = self._get_serf_version()
if None in version:
msg = 'Unknown serf version found; but, will try to build ' \
'ra_serf.\n'
else:
self.serf_ver = '.'.join(str(v) for v in version)
if version < minimal_serf_version:
self.serf_lib = None
msg = 'Found serf %s, but >= %s is required. ra_serf will not be built.\n' % \
(self.serf_ver, '.'.join(str(v) for v in minimal_serf_version))
else:
msg = 'Found serf version %s\n' % self.serf_ver
print(msg)
else:
print('openssl not found, ra_serf will not be built\n')
else:
print('serf not found, ra_serf will not be built\n')
def _find_apr(self):
"Find the APR library and version"
version_file_path = os.path.join(self.apr_path, 'include',
'apr_version.h')
if not os.path.exists(version_file_path):
sys.stderr.write("ERROR: '%s' not found.\n" % version_file_path);
sys.stderr.write("Use '--with-apr' option to configure APR location.\n");
sys.exit(1)
fp = open(version_file_path)
txt = fp.read()
fp.close()
vermatch = re.search(r'^\s*#define\s+APR_MAJOR_VERSION\s+(\d+)', txt, re.M)
major_ver = int(vermatch.group(1))
suffix = ''
if major_ver > 0:
suffix = '-%d' % major_ver
if self.static_apr:
self.apr_lib = 'apr%s.lib' % suffix
else:
self.apr_lib = 'libapr%s.lib' % suffix
def _find_apr_util(self):
"Find the APR-util library and version"
version_file_path = os.path.join(self.apr_util_path, 'include',
'apu_version.h')
if not os.path.exists(version_file_path):
sys.stderr.write("ERROR: '%s' not found.\n" % version_file_path);
sys.stderr.write("Use '--with-apr-util' option to configure APR-Util location.\n");
sys.exit(1)
fp = open(version_file_path)
txt = fp.read()
fp.close()
vermatch = re.search(r'^\s*#define\s+APU_MAJOR_VERSION\s+(\d+)', txt, re.M)
major_ver = int(vermatch.group(1))
suffix = ''
if major_ver > 0:
suffix = '-%d' % major_ver
if self.static_apr:
self.aprutil_lib = 'aprutil%s.lib' % suffix
else:
self.aprutil_lib = 'libaprutil%s.lib' % suffix
def _find_sqlite(self):
"Find the Sqlite library and version"
header_file = os.path.join(self.sqlite_path, 'inc', 'sqlite3.h')
# First check for compiled version of SQLite.
if os.path.exists(header_file):
# Compiled SQLite seems found, check for sqlite3.lib file.
lib_file = os.path.join(self.sqlite_path, 'lib', 'sqlite3.lib')
if not os.path.exists(lib_file):
sys.stderr.write("ERROR: '%s' not found.\n" % lib_file)
sys.stderr.write("Use '--with-sqlite' option to configure sqlite location.\n");
sys.exit(1)
self.sqlite_inline = False
else:
# Compiled SQLite not found. Try amalgamation version.
amalg_file = os.path.join(self.sqlite_path, 'sqlite3.c')
if not os.path.exists(amalg_file):
sys.stderr.write("ERROR: SQLite not found in '%s' directory.\n" % self.sqlite_path)
sys.stderr.write("Use '--with-sqlite' option to configure sqlite location.\n");
sys.exit(1)
header_file = os.path.join(self.sqlite_path, 'sqlite3.h')
self.sqlite_inline = True
fp = open(header_file)
txt = fp.read()
fp.close()
vermatch = re.search(r'^\s*#define\s+SQLITE_VERSION\s+"(\d+)\.(\d+)\.(\d+)(?:\.\d)?"', txt, re.M)
version = tuple(map(int, vermatch.groups()))
self.sqlite_version = '%d.%d.%d' % version
msg = 'Found SQLite version %s\n'
major, minor, patch = version
if major < 3 or (major == 3 and minor < 6) \
or (major == 3 and minor == 6 and patch < 18):
sys.stderr.write("ERROR: SQLite 3.6.18 or higher is required "
"(%s found)\n" % self.sqlite_version);
sys.exit(1)
else:
print(msg % self.sqlite_version)
def _find_zlib(self):
"Find the ZLib library and version"
if not self.zlib_path:
self.zlib_version = '1'
return
header_file = os.path.join(self.zlib_path, 'zlib.h')
if not os.path.exists(header_file):
self.zlib_version = '1'
return
fp = open(header_file)
txt = fp.read()
fp.close()
vermatch = re.search(r'^\s*#define\s+ZLIB_VERSION\s+"(\d+)\.(\d+)\.(\d+)(?:\.\d)?"', txt, re.M)
version = tuple(map(int, vermatch.groups()))
self.zlib_version = '%d.%d.%d' % version
msg = 'Found ZLib version %s\n'
print(msg % self.zlib_version)
class ProjectItem:
"A generic item class for holding sources info, config info, etc for a project"
def __init__(self, **kw):
vars(self).update(kw)
# ============================================================================
# This is a cut-down and modified version of code from:
# subversion/subversion/bindings/swig/python/svn/core.py
#
if sys.platform == "win32":
_escape_shell_arg_re = re.compile(r'(\\+)(\"|$)')
def escape_shell_arg(arg):
# The (very strange) parsing rules used by the C runtime library are
# described at:
# http://msdn.microsoft.com/library/en-us/vclang/html/_pluslang_Parsing_C.2b2b_.Command.2d.Line_Arguments.asp
# double up slashes, but only if they are followed by a quote character
arg = re.sub(_escape_shell_arg_re, r'\1\1\2', arg)
# surround by quotes and escape quotes inside
arg = '"' + arg.replace('"', '"^""') + '"'
return arg
else:
def escape_shell_arg(str):
return "'" + str.replace("'", "'\\''") + "'"
# ============================================================================
FILTER_LIBS = 1
FILTER_PROJECTS = 2
class POFile:
"Item class for holding po file info"
def __init__(self, base):
self.po = base + '.po'
self.spo = base + '.spo'
self.mo = base + '.mo'
# MSVC paths always use backslashes regardless of current platform
def msvc_path(path):
"""Convert a build path to an msvc path"""
return path.replace('/', '\\')
def msvc_path_join(*path_parts):
"""Join path components into an msvc path"""
return '\\'.join(path_parts)
|
apache-2.0
|
b0noI/AIF2
|
src/test/integration/python/threshold_p_for_first_filter_separator_character.py
|
3
|
50964
|
# data collected by PropertyBasedSettingsTest.experimentWith_threshold_p_for_first_filter_separator_character
data = [
{"value": 0.000000, "errors": 55},
{"value": 0.000500, "errors": 55},
{"value": 0.001000, "errors": 55},
{"value": 0.001500, "errors": 54},
{"value": 0.002000, "errors": 54},
{"value": 0.002500, "errors": 54},
{"value": 0.003000, "errors": 53},
{"value": 0.003500, "errors": 53},
{"value": 0.004000, "errors": 53},
{"value": 0.004500, "errors": 53},
{"value": 0.005000, "errors": 53},
{"value": 0.005500, "errors": 53},
{"value": 0.006000, "errors": 53},
{"value": 0.006500, "errors": 53},
{"value": 0.007000, "errors": 53},
{"value": 0.007500, "errors": 53},
{"value": 0.008000, "errors": 53},
{"value": 0.008500, "errors": 53},
{"value": 0.009000, "errors": 53},
{"value": 0.009500, "errors": 53},
{"value": 0.010000, "errors": 53},
{"value": 0.010500, "errors": 53},
{"value": 0.011000, "errors": 53},
{"value": 0.011500, "errors": 53},
{"value": 0.012000, "errors": 53},
{"value": 0.012500, "errors": 53},
{"value": 0.013000, "errors": 53},
{"value": 0.013500, "errors": 53},
{"value": 0.014000, "errors": 53},
{"value": 0.014500, "errors": 53},
{"value": 0.015000, "errors": 53},
{"value": 0.015500, "errors": 53},
{"value": 0.016000, "errors": 53},
{"value": 0.016500, "errors": 53},
{"value": 0.017000, "errors": 53},
{"value": 0.017500, "errors": 53},
{"value": 0.018000, "errors": 53},
{"value": 0.018500, "errors": 53},
{"value": 0.019000, "errors": 53},
{"value": 0.019500, "errors": 53},
{"value": 0.020000, "errors": 53},
{"value": 0.020500, "errors": 53},
{"value": 0.021000, "errors": 53},
{"value": 0.021500, "errors": 53},
{"value": 0.022000, "errors": 53},
{"value": 0.022500, "errors": 53},
{"value": 0.023000, "errors": 53},
{"value": 0.023500, "errors": 53},
{"value": 0.024000, "errors": 53},
{"value": 0.024500, "errors": 53},
{"value": 0.025000, "errors": 53},
{"value": 0.025500, "errors": 53},
{"value": 0.026000, "errors": 53},
{"value": 0.026500, "errors": 53},
{"value": 0.027000, "errors": 53},
{"value": 0.027500, "errors": 53},
{"value": 0.028000, "errors": 53},
{"value": 0.028500, "errors": 53},
{"value": 0.029000, "errors": 53},
{"value": 0.029500, "errors": 53},
{"value": 0.030000, "errors": 53},
{"value": 0.030500, "errors": 53},
{"value": 0.031000, "errors": 53},
{"value": 0.031500, "errors": 53},
{"value": 0.032000, "errors": 53},
{"value": 0.032500, "errors": 53},
{"value": 0.033000, "errors": 53},
{"value": 0.033500, "errors": 53},
{"value": 0.034000, "errors": 53},
{"value": 0.034500, "errors": 53},
{"value": 0.035000, "errors": 53},
{"value": 0.035500, "errors": 53},
{"value": 0.036000, "errors": 53},
{"value": 0.036500, "errors": 53},
{"value": 0.037000, "errors": 53},
{"value": 0.037500, "errors": 53},
{"value": 0.038000, "errors": 53},
{"value": 0.038500, "errors": 53},
{"value": 0.039000, "errors": 53},
{"value": 0.039500, "errors": 53},
{"value": 0.040000, "errors": 53},
{"value": 0.040500, "errors": 53},
{"value": 0.041000, "errors": 53},
{"value": 0.041500, "errors": 53},
{"value": 0.042000, "errors": 53},
{"value": 0.042500, "errors": 53},
{"value": 0.043000, "errors": 53},
{"value": 0.043500, "errors": 53},
{"value": 0.044000, "errors": 53},
{"value": 0.044500, "errors": 53},
{"value": 0.045000, "errors": 53},
{"value": 0.045500, "errors": 53},
{"value": 0.046000, "errors": 53},
{"value": 0.046500, "errors": 53},
{"value": 0.047000, "errors": 53},
{"value": 0.047500, "errors": 53},
{"value": 0.048000, "errors": 53},
{"value": 0.048500, "errors": 53},
{"value": 0.049000, "errors": 53},
{"value": 0.049500, "errors": 53},
{"value": 0.050000, "errors": 53},
{"value": 0.050500, "errors": 53},
{"value": 0.051000, "errors": 53},
{"value": 0.051500, "errors": 53},
{"value": 0.052000, "errors": 53},
{"value": 0.052500, "errors": 53},
{"value": 0.053000, "errors": 53},
{"value": 0.053500, "errors": 53},
{"value": 0.054000, "errors": 53},
{"value": 0.054500, "errors": 53},
{"value": 0.055000, "errors": 53},
{"value": 0.055500, "errors": 53},
{"value": 0.056000, "errors": 53},
{"value": 0.056500, "errors": 53},
{"value": 0.057000, "errors": 53},
{"value": 0.057500, "errors": 53},
{"value": 0.058000, "errors": 53},
{"value": 0.058500, "errors": 53},
{"value": 0.059000, "errors": 53},
{"value": 0.059500, "errors": 53},
{"value": 0.060000, "errors": 53},
{"value": 0.060500, "errors": 53},
{"value": 0.061000, "errors": 53},
{"value": 0.061500, "errors": 53},
{"value": 0.062000, "errors": 53},
{"value": 0.062500, "errors": 53},
{"value": 0.063000, "errors": 53},
{"value": 0.063500, "errors": 53},
{"value": 0.064000, "errors": 53},
{"value": 0.064500, "errors": 53},
{"value": 0.065000, "errors": 53},
{"value": 0.065500, "errors": 53},
{"value": 0.066000, "errors": 53},
{"value": 0.066500, "errors": 53},
{"value": 0.067000, "errors": 53},
{"value": 0.067500, "errors": 53},
{"value": 0.068000, "errors": 53},
{"value": 0.068500, "errors": 53},
{"value": 0.069000, "errors": 53},
{"value": 0.069500, "errors": 53},
{"value": 0.070000, "errors": 53},
{"value": 0.070500, "errors": 53},
{"value": 0.071000, "errors": 53},
{"value": 0.071500, "errors": 53},
{"value": 0.072000, "errors": 53},
{"value": 0.072500, "errors": 53},
{"value": 0.073000, "errors": 53},
{"value": 0.073500, "errors": 53},
{"value": 0.074000, "errors": 53},
{"value": 0.074500, "errors": 53},
{"value": 0.075000, "errors": 53},
{"value": 0.075500, "errors": 53},
{"value": 0.076000, "errors": 53},
{"value": 0.076500, "errors": 53},
{"value": 0.077000, "errors": 53},
{"value": 0.077500, "errors": 53},
{"value": 0.078000, "errors": 53},
{"value": 0.078500, "errors": 53},
{"value": 0.079000, "errors": 53},
{"value": 0.079500, "errors": 53},
{"value": 0.080000, "errors": 53},
{"value": 0.080500, "errors": 53},
{"value": 0.081000, "errors": 53},
{"value": 0.081500, "errors": 53},
{"value": 0.082000, "errors": 53},
{"value": 0.082500, "errors": 53},
{"value": 0.083000, "errors": 53},
{"value": 0.083500, "errors": 53},
{"value": 0.084000, "errors": 53},
{"value": 0.084500, "errors": 53},
{"value": 0.085000, "errors": 53},
{"value": 0.085500, "errors": 53},
{"value": 0.086000, "errors": 53},
{"value": 0.086500, "errors": 53},
{"value": 0.087000, "errors": 53},
{"value": 0.087500, "errors": 53},
{"value": 0.088000, "errors": 53},
{"value": 0.088500, "errors": 53},
{"value": 0.089000, "errors": 53},
{"value": 0.089500, "errors": 53},
{"value": 0.090000, "errors": 55},
{"value": 0.090500, "errors": 55},
{"value": 0.091000, "errors": 55},
{"value": 0.091500, "errors": 55},
{"value": 0.092000, "errors": 55},
{"value": 0.092500, "errors": 55},
{"value": 0.093000, "errors": 55},
{"value": 0.093500, "errors": 55},
{"value": 0.094000, "errors": 55},
{"value": 0.094500, "errors": 55},
{"value": 0.095000, "errors": 55},
{"value": 0.095500, "errors": 55},
{"value": 0.096000, "errors": 55},
{"value": 0.096500, "errors": 55},
{"value": 0.097000, "errors": 55},
{"value": 0.097500, "errors": 55},
{"value": 0.098000, "errors": 54},
{"value": 0.098500, "errors": 54},
{"value": 0.099000, "errors": 54},
{"value": 0.099500, "errors": 54},
{"value": 0.100000, "errors": 54},
{"value": 0.100500, "errors": 54},
{"value": 0.101000, "errors": 54},
{"value": 0.101500, "errors": 54},
{"value": 0.102000, "errors": 54},
{"value": 0.102500, "errors": 54},
{"value": 0.103000, "errors": 54},
{"value": 0.103500, "errors": 54},
{"value": 0.104000, "errors": 54},
{"value": 0.104500, "errors": 54},
{"value": 0.105000, "errors": 54},
{"value": 0.105500, "errors": 54},
{"value": 0.106000, "errors": 54},
{"value": 0.106500, "errors": 54},
{"value": 0.107000, "errors": 54},
{"value": 0.107500, "errors": 54},
{"value": 0.108000, "errors": 54},
{"value": 0.108500, "errors": 54},
{"value": 0.109000, "errors": 54},
{"value": 0.109500, "errors": 54},
{"value": 0.110000, "errors": 54},
{"value": 0.110500, "errors": 54},
{"value": 0.111000, "errors": 54},
{"value": 0.111500, "errors": 54},
{"value": 0.112000, "errors": 54},
{"value": 0.112500, "errors": 54},
{"value": 0.113000, "errors": 54},
{"value": 0.113500, "errors": 57},
{"value": 0.114000, "errors": 57},
{"value": 0.114500, "errors": 57},
{"value": 0.115000, "errors": 57},
{"value": 0.115500, "errors": 60},
{"value": 0.116000, "errors": 63},
{"value": 0.116500, "errors": 63},
{"value": 0.117000, "errors": 65},
{"value": 0.117500, "errors": 67},
{"value": 0.118000, "errors": 67},
{"value": 0.118500, "errors": 67},
{"value": 0.119000, "errors": 69},
{"value": 0.119500, "errors": 69},
{"value": 0.120000, "errors": 73},
{"value": 0.120500, "errors": 75},
{"value": 0.121000, "errors": 77},
{"value": 0.121500, "errors": 81},
{"value": 0.122000, "errors": 83},
{"value": 0.122500, "errors": 87},
{"value": 0.123000, "errors": 89},
{"value": 0.123500, "errors": 89},
{"value": 0.124000, "errors": 92},
{"value": 0.124500, "errors": 92},
{"value": 0.125000, "errors": 92},
{"value": 0.125500, "errors": 95},
{"value": 0.126000, "errors": 95},
{"value": 0.126500, "errors": 98},
{"value": 0.127000, "errors": 98},
{"value": 0.127500, "errors": 101},
{"value": 0.128000, "errors": 101},
{"value": 0.128500, "errors": 101},
{"value": 0.129000, "errors": 103},
{"value": 0.129500, "errors": 103},
{"value": 0.130000, "errors": 103},
{"value": 0.130500, "errors": 103},
{"value": 0.131000, "errors": 105},
{"value": 0.131500, "errors": 107},
{"value": 0.132000, "errors": 107},
{"value": 0.132500, "errors": 109},
{"value": 0.133000, "errors": 109},
{"value": 0.133500, "errors": 109},
{"value": 0.134000, "errors": 109},
{"value": 0.134500, "errors": 109},
{"value": 0.135000, "errors": 112},
{"value": 0.135500, "errors": 118},
{"value": 0.136000, "errors": 119},
{"value": 0.136500, "errors": 122},
{"value": 0.137000, "errors": 122},
{"value": 0.137500, "errors": 125},
{"value": 0.138000, "errors": 127},
{"value": 0.138500, "errors": 127},
{"value": 0.139000, "errors": 132},
{"value": 0.139500, "errors": 138},
{"value": 0.140000, "errors": 138},
{"value": 0.140500, "errors": 140},
{"value": 0.141000, "errors": 140},
{"value": 0.141500, "errors": 140},
{"value": 0.142000, "errors": 144},
{"value": 0.142500, "errors": 144},
{"value": 0.143000, "errors": 147},
{"value": 0.143500, "errors": 153},
{"value": 0.144000, "errors": 155},
{"value": 0.144500, "errors": 154},
{"value": 0.145000, "errors": 158},
{"value": 0.145500, "errors": 171},
{"value": 0.146000, "errors": 177},
{"value": 0.146500, "errors": 180},
{"value": 0.147000, "errors": 186},
{"value": 0.147500, "errors": 188},
{"value": 0.148000, "errors": 194},
{"value": 0.148500, "errors": 196},
{"value": 0.149000, "errors": 208},
{"value": 0.149500, "errors": 209},
{"value": 0.150000, "errors": 215},
{"value": 0.150500, "errors": 225},
{"value": 0.151000, "errors": 233},
{"value": 0.151500, "errors": 254},
{"value": 0.152000, "errors": 261},
{"value": 0.152500, "errors": 270},
{"value": 0.153000, "errors": 279},
{"value": 0.153500, "errors": 284},
{"value": 0.154000, "errors": 294},
{"value": 0.154500, "errors": 297},
{"value": 0.155000, "errors": 301},
{"value": 0.155500, "errors": 315},
{"value": 0.156000, "errors": 324},
{"value": 0.156500, "errors": 326},
{"value": 0.157000, "errors": 334},
{"value": 0.157500, "errors": 341},
{"value": 0.158000, "errors": 346},
{"value": 0.158500, "errors": 354},
{"value": 0.159000, "errors": 365},
{"value": 0.159500, "errors": 371},
{"value": 0.160000, "errors": 388},
{"value": 0.160500, "errors": 400},
{"value": 0.161000, "errors": 412},
{"value": 0.161500, "errors": 414},
{"value": 0.162000, "errors": 419},
{"value": 0.162500, "errors": 428},
{"value": 0.163000, "errors": 429},
{"value": 0.163500, "errors": 434},
{"value": 0.164000, "errors": 438},
{"value": 0.164500, "errors": 449},
{"value": 0.165000, "errors": 452},
{"value": 0.165500, "errors": 462},
{"value": 0.166000, "errors": 475},
{"value": 0.166500, "errors": 478},
{"value": 0.167000, "errors": 478},
{"value": 0.167500, "errors": 478},
{"value": 0.168000, "errors": 488},
{"value": 0.168500, "errors": 492},
{"value": 0.169000, "errors": 498},
{"value": 0.169500, "errors": 504},
{"value": 0.170000, "errors": 509},
{"value": 0.170500, "errors": 521},
{"value": 0.171000, "errors": 525},
{"value": 0.171500, "errors": 530},
{"value": 0.172000, "errors": 534},
{"value": 0.172500, "errors": 549},
{"value": 0.173000, "errors": 559},
{"value": 0.173500, "errors": 565},
{"value": 0.174000, "errors": 570},
{"value": 0.174500, "errors": 575},
{"value": 0.175000, "errors": 579},
{"value": 0.175500, "errors": 587},
{"value": 0.176000, "errors": 588},
{"value": 0.176500, "errors": 594},
{"value": 0.177000, "errors": 600},
{"value": 0.177500, "errors": 606},
{"value": 0.178000, "errors": 623},
{"value": 0.178500, "errors": 627},
{"value": 0.179000, "errors": 637},
{"value": 0.179500, "errors": 643},
{"value": 0.180000, "errors": 643},
{"value": 0.180500, "errors": 648},
{"value": 0.181000, "errors": 650},
{"value": 0.181500, "errors": 651},
{"value": 0.182000, "errors": 656},
{"value": 0.182500, "errors": 665},
{"value": 0.183000, "errors": 666},
{"value": 0.183500, "errors": 669},
{"value": 0.184000, "errors": 673},
{"value": 0.184500, "errors": 675},
{"value": 0.185000, "errors": 677},
{"value": 0.185500, "errors": 679},
{"value": 0.186000, "errors": 680},
{"value": 0.186500, "errors": 686},
{"value": 0.187000, "errors": 686},
{"value": 0.187500, "errors": 688},
{"value": 0.188000, "errors": 691},
{"value": 0.188500, "errors": 693},
{"value": 0.189000, "errors": 701},
{"value": 0.189500, "errors": 701},
{"value": 0.190000, "errors": 704},
{"value": 0.190500, "errors": 704},
{"value": 0.191000, "errors": 707},
{"value": 0.191500, "errors": 707},
{"value": 0.192000, "errors": 709},
{"value": 0.192500, "errors": 711},
{"value": 0.193000, "errors": 717},
{"value": 0.193500, "errors": 717},
{"value": 0.194000, "errors": 719},
{"value": 0.194500, "errors": 720},
{"value": 0.195000, "errors": 721},
{"value": 0.195500, "errors": 721},
{"value": 0.196000, "errors": 721},
{"value": 0.196500, "errors": 721},
{"value": 0.197000, "errors": 721},
{"value": 0.197500, "errors": 721},
{"value": 0.198000, "errors": 724},
{"value": 0.198500, "errors": 724},
{"value": 0.199000, "errors": 724},
{"value": 0.199500, "errors": 726},
{"value": 0.200000, "errors": 726},
{"value": 0.200500, "errors": 726},
{"value": 0.201000, "errors": 730},
{"value": 0.201500, "errors": 735},
{"value": 0.202000, "errors": 735},
{"value": 0.202500, "errors": 735},
{"value": 0.203000, "errors": 735},
{"value": 0.203500, "errors": 736},
{"value": 0.204000, "errors": 736},
{"value": 0.204500, "errors": 736},
{"value": 0.205000, "errors": 736},
{"value": 0.205500, "errors": 736},
{"value": 0.206000, "errors": 736},
{"value": 0.206500, "errors": 738},
{"value": 0.207000, "errors": 738},
{"value": 0.207500, "errors": 738},
{"value": 0.208000, "errors": 738},
{"value": 0.208500, "errors": 738},
{"value": 0.209000, "errors": 739},
{"value": 0.209500, "errors": 740},
{"value": 0.210000, "errors": 743},
{"value": 0.210500, "errors": 743},
{"value": 0.211000, "errors": 745},
{"value": 0.211500, "errors": 745},
{"value": 0.212000, "errors": 745},
{"value": 0.212500, "errors": 747},
{"value": 0.213000, "errors": 747},
{"value": 0.213500, "errors": 747},
{"value": 0.214000, "errors": 747},
{"value": 0.214500, "errors": 747},
{"value": 0.215000, "errors": 747},
{"value": 0.215500, "errors": 747},
{"value": 0.216000, "errors": 747},
{"value": 0.216500, "errors": 747},
{"value": 0.217000, "errors": 747},
{"value": 0.217500, "errors": 749},
{"value": 0.218000, "errors": 749},
{"value": 0.218500, "errors": 749},
{"value": 0.219000, "errors": 749},
{"value": 0.219500, "errors": 749},
{"value": 0.220000, "errors": 750},
{"value": 0.220500, "errors": 750},
{"value": 0.221000, "errors": 750},
{"value": 0.221500, "errors": 750},
{"value": 0.222000, "errors": 750},
{"value": 0.222500, "errors": 750},
{"value": 0.223000, "errors": 750},
{"value": 0.223500, "errors": 750},
{"value": 0.224000, "errors": 750},
{"value": 0.224500, "errors": 750},
{"value": 0.225000, "errors": 750},
{"value": 0.225500, "errors": 750},
{"value": 0.226000, "errors": 750},
{"value": 0.226500, "errors": 750},
{"value": 0.227000, "errors": 750},
{"value": 0.227500, "errors": 750},
{"value": 0.228000, "errors": 750},
{"value": 0.228500, "errors": 750},
{"value": 0.229000, "errors": 750},
{"value": 0.229500, "errors": 750},
{"value": 0.230000, "errors": 750},
{"value": 0.230500, "errors": 750},
{"value": 0.231000, "errors": 750},
{"value": 0.231500, "errors": 750},
{"value": 0.232000, "errors": 750},
{"value": 0.232500, "errors": 750},
{"value": 0.233000, "errors": 750},
{"value": 0.233500, "errors": 750},
{"value": 0.234000, "errors": 750},
{"value": 0.234500, "errors": 751},
{"value": 0.235000, "errors": 751},
{"value": 0.235500, "errors": 751},
{"value": 0.236000, "errors": 751},
{"value": 0.236500, "errors": 751},
{"value": 0.237000, "errors": 751},
{"value": 0.237500, "errors": 751},
{"value": 0.238000, "errors": 751},
{"value": 0.238500, "errors": 752},
{"value": 0.239000, "errors": 752},
{"value": 0.239500, "errors": 752},
{"value": 0.240000, "errors": 754},
{"value": 0.240500, "errors": 754},
{"value": 0.241000, "errors": 754},
{"value": 0.241500, "errors": 754},
{"value": 0.242000, "errors": 754},
{"value": 0.242500, "errors": 754},
{"value": 0.243000, "errors": 754},
{"value": 0.243500, "errors": 754},
{"value": 0.244000, "errors": 754},
{"value": 0.244500, "errors": 754},
{"value": 0.245000, "errors": 754},
{"value": 0.245500, "errors": 754},
{"value": 0.246000, "errors": 754},
{"value": 0.246500, "errors": 754},
{"value": 0.247000, "errors": 754},
{"value": 0.247500, "errors": 754},
{"value": 0.248000, "errors": 754},
{"value": 0.248500, "errors": 754},
{"value": 0.249000, "errors": 754},
{"value": 0.249500, "errors": 754},
{"value": 0.250000, "errors": 754},
{"value": 0.250500, "errors": 754},
{"value": 0.251000, "errors": 754},
{"value": 0.251500, "errors": 754},
{"value": 0.252000, "errors": 754},
{"value": 0.252500, "errors": 754},
{"value": 0.253000, "errors": 754},
{"value": 0.253500, "errors": 754},
{"value": 0.254000, "errors": 754},
{"value": 0.254500, "errors": 754},
{"value": 0.255000, "errors": 754},
{"value": 0.255500, "errors": 754},
{"value": 0.256000, "errors": 754},
{"value": 0.256500, "errors": 754},
{"value": 0.257000, "errors": 754},
{"value": 0.257500, "errors": 754},
{"value": 0.258000, "errors": 754},
{"value": 0.258500, "errors": 754},
{"value": 0.259000, "errors": 754},
{"value": 0.259500, "errors": 754},
{"value": 0.260000, "errors": 754},
{"value": 0.260500, "errors": 754},
{"value": 0.261000, "errors": 754},
{"value": 0.261500, "errors": 754},
{"value": 0.262000, "errors": 754},
{"value": 0.262500, "errors": 754},
{"value": 0.263000, "errors": 754},
{"value": 0.263500, "errors": 754},
{"value": 0.264000, "errors": 754},
{"value": 0.264500, "errors": 754},
{"value": 0.265000, "errors": 754},
{"value": 0.265500, "errors": 754},
{"value": 0.266000, "errors": 754},
{"value": 0.266500, "errors": 754},
{"value": 0.267000, "errors": 754},
{"value": 0.267500, "errors": 754},
{"value": 0.268000, "errors": 754},
{"value": 0.268500, "errors": 754},
{"value": 0.269000, "errors": 754},
{"value": 0.269500, "errors": 754},
{"value": 0.270000, "errors": 754},
{"value": 0.270500, "errors": 754},
{"value": 0.271000, "errors": 754},
{"value": 0.271500, "errors": 754},
{"value": 0.272000, "errors": 754},
{"value": 0.272500, "errors": 754},
{"value": 0.273000, "errors": 754},
{"value": 0.273500, "errors": 754},
{"value": 0.274000, "errors": 754},
{"value": 0.274500, "errors": 754},
{"value": 0.275000, "errors": 754},
{"value": 0.275500, "errors": 754},
{"value": 0.276000, "errors": 754},
{"value": 0.276500, "errors": 754},
{"value": 0.277000, "errors": 754},
{"value": 0.277500, "errors": 754},
{"value": 0.278000, "errors": 754},
{"value": 0.278500, "errors": 754},
{"value": 0.279000, "errors": 754},
{"value": 0.279500, "errors": 754},
{"value": 0.280000, "errors": 754},
{"value": 0.280500, "errors": 754},
{"value": 0.281000, "errors": 754},
{"value": 0.281500, "errors": 754},
{"value": 0.282000, "errors": 754},
{"value": 0.282500, "errors": 754},
{"value": 0.283000, "errors": 753},
{"value": 0.283500, "errors": 753},
{"value": 0.284000, "errors": 753},
{"value": 0.284500, "errors": 753},
{"value": 0.285000, "errors": 753},
{"value": 0.285500, "errors": 753},
{"value": 0.286000, "errors": 753},
{"value": 0.286500, "errors": 753},
{"value": 0.287000, "errors": 753},
{"value": 0.287500, "errors": 753},
{"value": 0.288000, "errors": 753},
{"value": 0.288500, "errors": 753},
{"value": 0.289000, "errors": 753},
{"value": 0.289500, "errors": 753},
{"value": 0.290000, "errors": 753},
{"value": 0.290500, "errors": 753},
{"value": 0.291000, "errors": 753},
{"value": 0.291500, "errors": 753},
{"value": 0.292000, "errors": 753},
{"value": 0.292500, "errors": 753},
{"value": 0.293000, "errors": 753},
{"value": 0.293500, "errors": 753},
{"value": 0.294000, "errors": 753},
{"value": 0.294500, "errors": 753},
{"value": 0.295000, "errors": 753},
{"value": 0.295500, "errors": 753},
{"value": 0.296000, "errors": 753},
{"value": 0.296500, "errors": 753},
{"value": 0.297000, "errors": 753},
{"value": 0.297500, "errors": 753},
{"value": 0.298000, "errors": 753},
{"value": 0.298500, "errors": 753},
{"value": 0.299000, "errors": 753},
{"value": 0.299500, "errors": 753},
{"value": 0.300000, "errors": 753},
{"value": 0.300500, "errors": 753},
{"value": 0.301000, "errors": 753},
{"value": 0.301500, "errors": 753},
{"value": 0.302000, "errors": 753},
{"value": 0.302500, "errors": 753},
{"value": 0.303000, "errors": 753},
{"value": 0.303500, "errors": 753},
{"value": 0.304000, "errors": 753},
{"value": 0.304500, "errors": 753},
{"value": 0.305000, "errors": 753},
{"value": 0.305500, "errors": 753},
{"value": 0.306000, "errors": 753},
{"value": 0.306500, "errors": 752},
{"value": 0.307000, "errors": 752},
{"value": 0.307500, "errors": 752},
{"value": 0.308000, "errors": 752},
{"value": 0.308500, "errors": 752},
{"value": 0.309000, "errors": 752},
{"value": 0.309500, "errors": 752},
{"value": 0.310000, "errors": 752},
{"value": 0.310500, "errors": 752},
{"value": 0.311000, "errors": 752},
{"value": 0.311500, "errors": 752},
{"value": 0.312000, "errors": 752},
{"value": 0.312500, "errors": 752},
{"value": 0.313000, "errors": 752},
{"value": 0.313500, "errors": 752},
{"value": 0.314000, "errors": 752},
{"value": 0.314500, "errors": 752},
{"value": 0.315000, "errors": 752},
{"value": 0.315500, "errors": 752},
{"value": 0.316000, "errors": 752},
{"value": 0.316500, "errors": 752},
{"value": 0.317000, "errors": 752},
{"value": 0.317500, "errors": 752},
{"value": 0.318000, "errors": 752},
{"value": 0.318500, "errors": 752},
{"value": 0.319000, "errors": 752},
{"value": 0.319500, "errors": 752},
{"value": 0.320000, "errors": 752},
{"value": 0.320500, "errors": 752},
{"value": 0.321000, "errors": 752},
{"value": 0.321500, "errors": 752},
{"value": 0.322000, "errors": 752},
{"value": 0.322500, "errors": 752},
{"value": 0.323000, "errors": 752},
{"value": 0.323500, "errors": 752},
{"value": 0.324000, "errors": 752},
{"value": 0.324500, "errors": 752},
{"value": 0.325000, "errors": 752},
{"value": 0.325500, "errors": 752},
{"value": 0.326000, "errors": 752},
{"value": 0.326500, "errors": 752},
{"value": 0.327000, "errors": 752},
{"value": 0.327500, "errors": 752},
{"value": 0.328000, "errors": 752},
{"value": 0.328500, "errors": 752},
{"value": 0.329000, "errors": 752},
{"value": 0.329500, "errors": 752},
{"value": 0.330000, "errors": 752},
{"value": 0.330500, "errors": 752},
{"value": 0.331000, "errors": 752},
{"value": 0.331500, "errors": 752},
{"value": 0.332000, "errors": 752},
{"value": 0.332500, "errors": 752},
{"value": 0.333000, "errors": 752},
{"value": 0.333500, "errors": 752},
{"value": 0.334000, "errors": 752},
{"value": 0.334500, "errors": 752},
{"value": 0.335000, "errors": 752},
{"value": 0.335500, "errors": 752},
{"value": 0.336000, "errors": 752},
{"value": 0.336500, "errors": 752},
{"value": 0.337000, "errors": 752},
{"value": 0.337500, "errors": 752},
{"value": 0.338000, "errors": 752},
{"value": 0.338500, "errors": 752},
{"value": 0.339000, "errors": 752},
{"value": 0.339500, "errors": 752},
{"value": 0.340000, "errors": 752},
{"value": 0.340500, "errors": 752},
{"value": 0.341000, "errors": 752},
{"value": 0.341500, "errors": 752},
{"value": 0.342000, "errors": 752},
{"value": 0.342500, "errors": 752},
{"value": 0.343000, "errors": 751},
{"value": 0.343500, "errors": 751},
{"value": 0.344000, "errors": 751},
{"value": 0.344500, "errors": 751},
{"value": 0.345000, "errors": 751},
{"value": 0.345500, "errors": 751},
{"value": 0.346000, "errors": 751},
{"value": 0.346500, "errors": 751},
{"value": 0.347000, "errors": 751},
{"value": 0.347500, "errors": 751},
{"value": 0.348000, "errors": 751},
{"value": 0.348500, "errors": 751},
{"value": 0.349000, "errors": 751},
{"value": 0.349500, "errors": 751},
{"value": 0.350000, "errors": 751},
{"value": 0.350500, "errors": 751},
{"value": 0.351000, "errors": 751},
{"value": 0.351500, "errors": 751},
{"value": 0.352000, "errors": 751},
{"value": 0.352500, "errors": 751},
{"value": 0.353000, "errors": 751},
{"value": 0.353500, "errors": 751},
{"value": 0.354000, "errors": 751},
{"value": 0.354500, "errors": 751},
{"value": 0.355000, "errors": 751},
{"value": 0.355500, "errors": 751},
{"value": 0.356000, "errors": 751},
{"value": 0.356500, "errors": 751},
{"value": 0.357000, "errors": 751},
{"value": 0.357500, "errors": 751},
{"value": 0.358000, "errors": 751},
{"value": 0.358500, "errors": 751},
{"value": 0.359000, "errors": 751},
{"value": 0.359500, "errors": 751},
{"value": 0.360000, "errors": 751},
{"value": 0.360500, "errors": 751},
{"value": 0.361000, "errors": 751},
{"value": 0.361500, "errors": 751},
{"value": 0.362000, "errors": 751},
{"value": 0.362500, "errors": 751},
{"value": 0.363000, "errors": 751},
{"value": 0.363500, "errors": 751},
{"value": 0.364000, "errors": 751},
{"value": 0.364500, "errors": 751},
{"value": 0.365000, "errors": 751},
{"value": 0.365500, "errors": 751},
{"value": 0.366000, "errors": 751},
{"value": 0.366500, "errors": 751},
{"value": 0.367000, "errors": 751},
{"value": 0.367500, "errors": 751},
{"value": 0.368000, "errors": 751},
{"value": 0.368500, "errors": 751},
{"value": 0.369000, "errors": 751},
{"value": 0.369500, "errors": 751},
{"value": 0.370000, "errors": 751},
{"value": 0.370500, "errors": 751},
{"value": 0.371000, "errors": 751},
{"value": 0.371500, "errors": 751},
{"value": 0.372000, "errors": 751},
{"value": 0.372500, "errors": 751},
{"value": 0.373000, "errors": 751},
{"value": 0.373500, "errors": 751},
{"value": 0.374000, "errors": 751},
{"value": 0.374500, "errors": 751},
{"value": 0.375000, "errors": 751},
{"value": 0.375500, "errors": 751},
{"value": 0.376000, "errors": 751},
{"value": 0.376500, "errors": 751},
{"value": 0.377000, "errors": 751},
{"value": 0.377500, "errors": 751},
{"value": 0.378000, "errors": 751},
{"value": 0.378500, "errors": 751},
{"value": 0.379000, "errors": 751},
{"value": 0.379500, "errors": 751},
{"value": 0.380000, "errors": 751},
{"value": 0.380500, "errors": 751},
{"value": 0.381000, "errors": 751},
{"value": 0.381500, "errors": 751},
{"value": 0.382000, "errors": 751},
{"value": 0.382500, "errors": 751},
{"value": 0.383000, "errors": 751},
{"value": 0.383500, "errors": 751},
{"value": 0.384000, "errors": 751},
{"value": 0.384500, "errors": 751},
{"value": 0.385000, "errors": 751},
{"value": 0.385500, "errors": 751},
{"value": 0.386000, "errors": 751},
{"value": 0.386500, "errors": 751},
{"value": 0.387000, "errors": 751},
{"value": 0.387500, "errors": 751},
{"value": 0.388000, "errors": 751},
{"value": 0.388500, "errors": 751},
{"value": 0.389000, "errors": 751},
{"value": 0.389500, "errors": 751},
{"value": 0.390000, "errors": 751},
{"value": 0.390500, "errors": 751},
{"value": 0.391000, "errors": 751},
{"value": 0.391500, "errors": 751},
{"value": 0.392000, "errors": 751},
{"value": 0.392500, "errors": 751},
{"value": 0.393000, "errors": 751},
{"value": 0.393500, "errors": 751},
{"value": 0.394000, "errors": 751},
{"value": 0.394500, "errors": 751},
{"value": 0.395000, "errors": 751},
{"value": 0.395500, "errors": 751},
{"value": 0.396000, "errors": 751},
{"value": 0.396500, "errors": 751},
{"value": 0.397000, "errors": 751},
{"value": 0.397500, "errors": 751},
{"value": 0.398000, "errors": 751},
{"value": 0.398500, "errors": 751},
{"value": 0.399000, "errors": 751},
{"value": 0.399500, "errors": 751},
{"value": 0.400000, "errors": 751},
{"value": 0.400500, "errors": 751},
{"value": 0.401000, "errors": 751},
{"value": 0.401500, "errors": 751},
{"value": 0.402000, "errors": 751},
{"value": 0.402500, "errors": 751},
{"value": 0.403000, "errors": 751},
{"value": 0.403500, "errors": 751},
{"value": 0.404000, "errors": 751},
{"value": 0.404500, "errors": 751},
{"value": 0.405000, "errors": 751},
{"value": 0.405500, "errors": 751},
{"value": 0.406000, "errors": 751},
{"value": 0.406500, "errors": 751},
{"value": 0.407000, "errors": 751},
{"value": 0.407500, "errors": 751},
{"value": 0.408000, "errors": 751},
{"value": 0.408500, "errors": 751},
{"value": 0.409000, "errors": 751},
{"value": 0.409500, "errors": 751},
{"value": 0.410000, "errors": 751},
{"value": 0.410500, "errors": 751},
{"value": 0.411000, "errors": 751},
{"value": 0.411500, "errors": 751},
{"value": 0.412000, "errors": 751},
{"value": 0.412500, "errors": 751},
{"value": 0.413000, "errors": 751},
{"value": 0.413500, "errors": 751},
{"value": 0.414000, "errors": 751},
{"value": 0.414500, "errors": 751},
{"value": 0.415000, "errors": 751},
{"value": 0.415500, "errors": 751},
{"value": 0.416000, "errors": 751},
{"value": 0.416500, "errors": 751},
{"value": 0.417000, "errors": 751},
{"value": 0.417500, "errors": 751},
{"value": 0.418000, "errors": 751},
{"value": 0.418500, "errors": 751},
{"value": 0.419000, "errors": 751},
{"value": 0.419500, "errors": 751},
{"value": 0.420000, "errors": 751},
{"value": 0.420500, "errors": 751},
{"value": 0.421000, "errors": 751},
{"value": 0.421500, "errors": 751},
{"value": 0.422000, "errors": 751},
{"value": 0.422500, "errors": 751},
{"value": 0.423000, "errors": 751},
{"value": 0.423500, "errors": 751},
{"value": 0.424000, "errors": 751},
{"value": 0.424500, "errors": 751},
{"value": 0.425000, "errors": 751},
{"value": 0.425500, "errors": 751},
{"value": 0.426000, "errors": 751},
{"value": 0.426500, "errors": 751},
{"value": 0.427000, "errors": 751},
{"value": 0.427500, "errors": 751},
{"value": 0.428000, "errors": 751},
{"value": 0.428500, "errors": 751},
{"value": 0.429000, "errors": 751},
{"value": 0.429500, "errors": 751},
{"value": 0.430000, "errors": 751},
{"value": 0.430500, "errors": 751},
{"value": 0.431000, "errors": 751},
{"value": 0.431500, "errors": 751},
{"value": 0.432000, "errors": 751},
{"value": 0.432500, "errors": 751},
{"value": 0.433000, "errors": 751},
{"value": 0.433500, "errors": 751},
{"value": 0.434000, "errors": 751},
{"value": 0.434500, "errors": 751},
{"value": 0.435000, "errors": 751},
{"value": 0.435500, "errors": 751},
{"value": 0.436000, "errors": 751},
{"value": 0.436500, "errors": 751},
{"value": 0.437000, "errors": 751},
{"value": 0.437500, "errors": 751},
{"value": 0.438000, "errors": 751},
{"value": 0.438500, "errors": 751},
{"value": 0.439000, "errors": 751},
{"value": 0.439500, "errors": 751},
{"value": 0.440000, "errors": 751},
{"value": 0.440500, "errors": 751},
{"value": 0.441000, "errors": 751},
{"value": 0.441500, "errors": 751},
{"value": 0.442000, "errors": 751},
{"value": 0.442500, "errors": 751},
{"value": 0.443000, "errors": 751},
{"value": 0.443500, "errors": 751},
{"value": 0.444000, "errors": 751},
{"value": 0.444500, "errors": 751},
{"value": 0.445000, "errors": 751},
{"value": 0.445500, "errors": 751},
{"value": 0.446000, "errors": 751},
{"value": 0.446500, "errors": 751},
{"value": 0.447000, "errors": 751},
{"value": 0.447500, "errors": 751},
{"value": 0.448000, "errors": 751},
{"value": 0.448500, "errors": 751},
{"value": 0.449000, "errors": 751},
{"value": 0.449500, "errors": 751},
{"value": 0.450000, "errors": 751},
{"value": 0.450500, "errors": 751},
{"value": 0.451000, "errors": 751},
{"value": 0.451500, "errors": 751},
{"value": 0.452000, "errors": 751},
{"value": 0.452500, "errors": 751},
{"value": 0.453000, "errors": 751},
{"value": 0.453500, "errors": 751},
{"value": 0.454000, "errors": 751},
{"value": 0.454500, "errors": 751},
{"value": 0.455000, "errors": 751},
{"value": 0.455500, "errors": 751},
{"value": 0.456000, "errors": 751},
{"value": 0.456500, "errors": 751},
{"value": 0.457000, "errors": 751},
{"value": 0.457500, "errors": 751},
{"value": 0.458000, "errors": 751},
{"value": 0.458500, "errors": 751},
{"value": 0.459000, "errors": 750},
{"value": 0.459500, "errors": 750},
{"value": 0.460000, "errors": 750},
{"value": 0.460500, "errors": 750},
{"value": 0.461000, "errors": 750},
{"value": 0.461500, "errors": 750},
{"value": 0.462000, "errors": 750},
{"value": 0.462500, "errors": 750},
{"value": 0.463000, "errors": 750},
{"value": 0.463500, "errors": 750},
{"value": 0.464000, "errors": 750},
{"value": 0.464500, "errors": 750},
{"value": 0.465000, "errors": 750},
{"value": 0.465500, "errors": 750},
{"value": 0.466000, "errors": 750},
{"value": 0.466500, "errors": 750},
{"value": 0.467000, "errors": 750},
{"value": 0.467500, "errors": 750},
{"value": 0.468000, "errors": 750},
{"value": 0.468500, "errors": 750},
{"value": 0.469000, "errors": 750},
{"value": 0.469500, "errors": 750},
{"value": 0.470000, "errors": 750},
{"value": 0.470500, "errors": 750},
{"value": 0.471000, "errors": 750},
{"value": 0.471500, "errors": 750},
{"value": 0.472000, "errors": 750},
{"value": 0.472500, "errors": 750},
{"value": 0.473000, "errors": 750},
{"value": 0.473500, "errors": 750},
{"value": 0.474000, "errors": 750},
{"value": 0.474500, "errors": 750},
{"value": 0.475000, "errors": 750},
{"value": 0.475500, "errors": 750},
{"value": 0.476000, "errors": 750},
{"value": 0.476500, "errors": 750},
{"value": 0.477000, "errors": 750},
{"value": 0.477500, "errors": 750},
{"value": 0.478000, "errors": 750},
{"value": 0.478500, "errors": 750},
{"value": 0.479000, "errors": 750},
{"value": 0.479500, "errors": 749},
{"value": 0.480000, "errors": 749},
{"value": 0.480500, "errors": 749},
{"value": 0.481000, "errors": 749},
{"value": 0.481500, "errors": 749},
{"value": 0.482000, "errors": 749},
{"value": 0.482500, "errors": 749},
{"value": 0.483000, "errors": 749},
{"value": 0.483500, "errors": 749},
{"value": 0.484000, "errors": 749},
{"value": 0.484500, "errors": 749},
{"value": 0.485000, "errors": 749},
{"value": 0.485500, "errors": 749},
{"value": 0.486000, "errors": 749},
{"value": 0.486500, "errors": 749},
{"value": 0.487000, "errors": 749},
{"value": 0.487500, "errors": 749},
{"value": 0.488000, "errors": 749},
{"value": 0.488500, "errors": 749},
{"value": 0.489000, "errors": 749},
{"value": 0.489500, "errors": 749},
{"value": 0.490000, "errors": 749},
{"value": 0.490500, "errors": 749},
{"value": 0.491000, "errors": 749},
{"value": 0.491500, "errors": 749},
{"value": 0.492000, "errors": 749},
{"value": 0.492500, "errors": 749},
{"value": 0.493000, "errors": 749},
{"value": 0.493500, "errors": 749},
{"value": 0.494000, "errors": 749},
{"value": 0.494500, "errors": 749},
{"value": 0.495000, "errors": 749},
{"value": 0.495500, "errors": 749},
{"value": 0.496000, "errors": 749},
{"value": 0.496500, "errors": 749},
{"value": 0.497000, "errors": 749},
{"value": 0.497500, "errors": 749},
{"value": 0.498000, "errors": 749},
{"value": 0.498500, "errors": 749},
{"value": 0.499000, "errors": 749},
{"value": 0.499500, "errors": 749},
{"value": 0.500000, "errors": 749},
{"value": 0.500500, "errors": 749},
{"value": 0.501000, "errors": 749},
{"value": 0.501500, "errors": 749},
{"value": 0.502000, "errors": 749},
{"value": 0.502500, "errors": 749},
{"value": 0.503000, "errors": 749},
{"value": 0.503500, "errors": 749},
{"value": 0.504000, "errors": 749},
{"value": 0.504500, "errors": 749},
{"value": 0.505000, "errors": 749},
{"value": 0.505500, "errors": 749},
{"value": 0.506000, "errors": 749},
{"value": 0.506500, "errors": 749},
{"value": 0.507000, "errors": 749},
{"value": 0.507500, "errors": 749},
{"value": 0.508000, "errors": 749},
{"value": 0.508500, "errors": 749},
{"value": 0.509000, "errors": 749},
{"value": 0.509500, "errors": 749},
{"value": 0.510000, "errors": 749},
{"value": 0.510500, "errors": 749},
{"value": 0.511000, "errors": 749},
{"value": 0.511500, "errors": 749},
{"value": 0.512000, "errors": 749},
{"value": 0.512500, "errors": 749},
{"value": 0.513000, "errors": 749},
{"value": 0.513500, "errors": 749},
{"value": 0.514000, "errors": 749},
{"value": 0.514500, "errors": 749},
{"value": 0.515000, "errors": 749},
{"value": 0.515500, "errors": 749},
{"value": 0.516000, "errors": 749},
{"value": 0.516500, "errors": 749},
{"value": 0.517000, "errors": 749},
{"value": 0.517500, "errors": 749},
{"value": 0.518000, "errors": 749},
{"value": 0.518500, "errors": 749},
{"value": 0.519000, "errors": 749},
{"value": 0.519500, "errors": 749},
{"value": 0.520000, "errors": 749},
{"value": 0.520500, "errors": 749},
{"value": 0.521000, "errors": 749},
{"value": 0.521500, "errors": 749},
{"value": 0.522000, "errors": 749},
{"value": 0.522500, "errors": 749},
{"value": 0.523000, "errors": 749},
{"value": 0.523500, "errors": 749},
{"value": 0.524000, "errors": 749},
{"value": 0.524500, "errors": 749},
{"value": 0.525000, "errors": 749},
{"value": 0.525500, "errors": 749},
{"value": 0.526000, "errors": 749},
{"value": 0.526500, "errors": 749},
{"value": 0.527000, "errors": 749},
{"value": 0.527500, "errors": 749},
{"value": 0.528000, "errors": 749},
{"value": 0.528500, "errors": 749},
{"value": 0.529000, "errors": 749},
{"value": 0.529500, "errors": 749},
{"value": 0.530000, "errors": 749},
{"value": 0.530500, "errors": 749},
{"value": 0.531000, "errors": 749},
{"value": 0.531500, "errors": 749},
{"value": 0.532000, "errors": 749},
{"value": 0.532500, "errors": 749},
{"value": 0.533000, "errors": 749},
{"value": 0.533500, "errors": 749},
{"value": 0.534000, "errors": 749},
{"value": 0.534500, "errors": 749},
{"value": 0.535000, "errors": 749},
{"value": 0.535500, "errors": 749},
{"value": 0.536000, "errors": 749},
{"value": 0.536500, "errors": 749},
{"value": 0.537000, "errors": 749},
{"value": 0.537500, "errors": 749},
{"value": 0.538000, "errors": 748},
{"value": 0.538500, "errors": 748},
{"value": 0.539000, "errors": 748},
{"value": 0.539500, "errors": 748},
{"value": 0.540000, "errors": 748},
{"value": 0.540500, "errors": 748},
{"value": 0.541000, "errors": 748},
{"value": 0.541500, "errors": 748},
{"value": 0.542000, "errors": 748},
{"value": 0.542500, "errors": 748},
{"value": 0.543000, "errors": 748},
{"value": 0.543500, "errors": 748},
{"value": 0.544000, "errors": 748},
{"value": 0.544500, "errors": 748},
{"value": 0.545000, "errors": 748},
{"value": 0.545500, "errors": 748},
{"value": 0.546000, "errors": 748},
{"value": 0.546500, "errors": 748},
{"value": 0.547000, "errors": 748},
{"value": 0.547500, "errors": 748},
{"value": 0.548000, "errors": 748},
{"value": 0.548500, "errors": 748},
{"value": 0.549000, "errors": 748},
{"value": 0.549500, "errors": 748},
{"value": 0.550000, "errors": 748},
{"value": 0.550500, "errors": 748},
{"value": 0.551000, "errors": 748},
{"value": 0.551500, "errors": 748},
{"value": 0.552000, "errors": 748},
{"value": 0.552500, "errors": 748},
{"value": 0.553000, "errors": 748},
{"value": 0.553500, "errors": 748},
{"value": 0.554000, "errors": 748},
{"value": 0.554500, "errors": 748},
{"value": 0.555000, "errors": 748},
{"value": 0.555500, "errors": 748},
{"value": 0.556000, "errors": 748},
{"value": 0.556500, "errors": 748},
{"value": 0.557000, "errors": 748},
{"value": 0.557500, "errors": 748},
{"value": 0.558000, "errors": 748},
{"value": 0.558500, "errors": 748},
{"value": 0.559000, "errors": 748},
{"value": 0.559500, "errors": 748},
{"value": 0.560000, "errors": 748},
{"value": 0.560500, "errors": 748},
{"value": 0.561000, "errors": 748},
{"value": 0.561500, "errors": 748},
{"value": 0.562000, "errors": 748},
{"value": 0.562500, "errors": 748},
{"value": 0.563000, "errors": 748},
{"value": 0.563500, "errors": 748},
{"value": 0.564000, "errors": 748},
{"value": 0.564500, "errors": 748},
{"value": 0.565000, "errors": 748},
{"value": 0.565500, "errors": 748},
{"value": 0.566000, "errors": 748},
{"value": 0.566500, "errors": 748},
{"value": 0.567000, "errors": 748},
{"value": 0.567500, "errors": 748},
{"value": 0.568000, "errors": 748},
{"value": 0.568500, "errors": 748},
{"value": 0.569000, "errors": 748},
{"value": 0.569500, "errors": 748},
{"value": 0.570000, "errors": 748},
{"value": 0.570500, "errors": 748},
{"value": 0.571000, "errors": 748},
{"value": 0.571500, "errors": 748},
{"value": 0.572000, "errors": 747},
{"value": 0.572500, "errors": 747},
{"value": 0.573000, "errors": 747},
{"value": 0.573500, "errors": 747},
{"value": 0.574000, "errors": 747},
{"value": 0.574500, "errors": 747},
{"value": 0.575000, "errors": 747},
{"value": 0.575500, "errors": 747},
{"value": 0.576000, "errors": 747},
{"value": 0.576500, "errors": 747},
{"value": 0.577000, "errors": 747},
{"value": 0.577500, "errors": 747},
{"value": 0.578000, "errors": 747},
{"value": 0.578500, "errors": 747},
{"value": 0.579000, "errors": 747},
{"value": 0.579500, "errors": 747},
{"value": 0.580000, "errors": 747},
{"value": 0.580500, "errors": 747},
{"value": 0.581000, "errors": 747},
{"value": 0.581500, "errors": 747},
{"value": 0.582000, "errors": 747},
{"value": 0.582500, "errors": 747},
{"value": 0.583000, "errors": 747},
{"value": 0.583500, "errors": 747},
{"value": 0.584000, "errors": 747},
{"value": 0.584500, "errors": 747},
{"value": 0.585000, "errors": 747},
{"value": 0.585500, "errors": 747},
{"value": 0.586000, "errors": 747},
{"value": 0.586500, "errors": 747},
{"value": 0.587000, "errors": 747},
{"value": 0.587500, "errors": 747},
{"value": 0.588000, "errors": 747},
{"value": 0.588500, "errors": 747},
{"value": 0.589000, "errors": 747},
{"value": 0.589500, "errors": 747},
{"value": 0.590000, "errors": 747},
{"value": 0.590500, "errors": 747},
{"value": 0.591000, "errors": 747},
{"value": 0.591500, "errors": 747},
{"value": 0.592000, "errors": 747},
{"value": 0.592500, "errors": 747},
{"value": 0.593000, "errors": 747},
{"value": 0.593500, "errors": 747},
{"value": 0.594000, "errors": 747},
{"value": 0.594500, "errors": 747},
{"value": 0.595000, "errors": 747},
{"value": 0.595500, "errors": 747},
{"value": 0.596000, "errors": 747},
{"value": 0.596500, "errors": 747},
{"value": 0.597000, "errors": 747},
{"value": 0.597500, "errors": 747},
{"value": 0.598000, "errors": 747},
{"value": 0.598500, "errors": 747},
{"value": 0.599000, "errors": 747},
{"value": 0.599500, "errors": 747},
{"value": 0.600000, "errors": 747},
{"value": 0.600500, "errors": 747},
{"value": 0.601000, "errors": 747},
{"value": 0.601500, "errors": 747},
{"value": 0.602000, "errors": 747},
{"value": 0.602500, "errors": 747},
{"value": 0.603000, "errors": 747},
{"value": 0.603500, "errors": 747},
{"value": 0.604000, "errors": 747},
{"value": 0.604500, "errors": 747},
{"value": 0.605000, "errors": 747},
{"value": 0.605500, "errors": 747},
{"value": 0.606000, "errors": 747},
{"value": 0.606500, "errors": 747},
{"value": 0.607000, "errors": 747},
{"value": 0.607500, "errors": 747},
{"value": 0.608000, "errors": 747},
{"value": 0.608500, "errors": 747},
{"value": 0.609000, "errors": 747},
{"value": 0.609500, "errors": 747},
{"value": 0.610000, "errors": 747},
{"value": 0.610500, "errors": 747},
{"value": 0.611000, "errors": 747},
{"value": 0.611500, "errors": 747},
{"value": 0.612000, "errors": 747},
{"value": 0.612500, "errors": 747},
{"value": 0.613000, "errors": 747},
{"value": 0.613500, "errors": 747},
{"value": 0.614000, "errors": 747},
{"value": 0.614500, "errors": 747},
{"value": 0.615000, "errors": 747},
{"value": 0.615500, "errors": 747},
{"value": 0.616000, "errors": 747},
{"value": 0.616500, "errors": 747},
{"value": 0.617000, "errors": 747},
{"value": 0.617500, "errors": 747},
{"value": 0.618000, "errors": 747},
{"value": 0.618500, "errors": 747},
{"value": 0.619000, "errors": 747},
{"value": 0.619500, "errors": 747},
{"value": 0.620000, "errors": 747},
{"value": 0.620500, "errors": 747},
{"value": 0.621000, "errors": 747},
{"value": 0.621500, "errors": 747},
{"value": 0.622000, "errors": 747},
{"value": 0.622500, "errors": 747},
{"value": 0.623000, "errors": 747},
{"value": 0.623500, "errors": 747},
{"value": 0.624000, "errors": 747},
{"value": 0.624500, "errors": 747},
{"value": 0.625000, "errors": 747},
{"value": 0.625500, "errors": 747},
{"value": 0.626000, "errors": 747},
{"value": 0.626500, "errors": 747},
{"value": 0.627000, "errors": 747},
{"value": 0.627500, "errors": 747},
{"value": 0.628000, "errors": 747},
{"value": 0.628500, "errors": 747},
{"value": 0.629000, "errors": 747},
{"value": 0.629500, "errors": 747},
{"value": 0.630000, "errors": 747},
{"value": 0.630500, "errors": 747},
{"value": 0.631000, "errors": 747},
{"value": 0.631500, "errors": 747},
{"value": 0.632000, "errors": 747},
{"value": 0.632500, "errors": 747},
{"value": 0.633000, "errors": 747},
{"value": 0.633500, "errors": 747},
{"value": 0.634000, "errors": 747},
{"value": 0.634500, "errors": 747},
{"value": 0.635000, "errors": 747},
{"value": 0.635500, "errors": 747},
{"value": 0.636000, "errors": 747},
{"value": 0.636500, "errors": 747},
{"value": 0.637000, "errors": 747},
{"value": 0.637500, "errors": 747},
{"value": 0.638000, "errors": 747},
{"value": 0.638500, "errors": 747},
{"value": 0.639000, "errors": 747},
{"value": 0.639500, "errors": 747},
{"value": 0.640000, "errors": 747},
{"value": 0.640500, "errors": 747},
{"value": 0.641000, "errors": 747},
{"value": 0.641500, "errors": 747},
{"value": 0.642000, "errors": 747},
{"value": 0.642500, "errors": 747},
{"value": 0.643000, "errors": 747},
{"value": 0.643500, "errors": 747},
{"value": 0.644000, "errors": 747},
{"value": 0.644500, "errors": 747},
{"value": 0.645000, "errors": 747},
{"value": 0.645500, "errors": 747},
{"value": 0.646000, "errors": 747},
{"value": 0.646500, "errors": 747},
{"value": 0.647000, "errors": 747},
{"value": 0.647500, "errors": 747},
{"value": 0.648000, "errors": 747},
{"value": 0.648500, "errors": 747},
{"value": 0.649000, "errors": 747},
{"value": 0.649500, "errors": 747},
{"value": 0.650000, "errors": 747},
{"value": 0.650500, "errors": 747},
{"value": 0.651000, "errors": 747},
{"value": 0.651500, "errors": 747},
{"value": 0.652000, "errors": 747},
{"value": 0.652500, "errors": 747},
{"value": 0.653000, "errors": 747},
{"value": 0.653500, "errors": 747},
{"value": 0.654000, "errors": 747},
{"value": 0.654500, "errors": 747},
{"value": 0.655000, "errors": 747},
{"value": 0.655500, "errors": 747},
{"value": 0.656000, "errors": 747},
{"value": 0.656500, "errors": 747},
{"value": 0.657000, "errors": 747},
{"value": 0.657500, "errors": 747},
{"value": 0.658000, "errors": 747},
{"value": 0.658500, "errors": 747},
{"value": 0.659000, "errors": 747},
{"value": 0.659500, "errors": 747},
{"value": 0.660000, "errors": 747},
{"value": 0.660500, "errors": 747},
{"value": 0.661000, "errors": 747},
{"value": 0.661500, "errors": 747},
{"value": 0.662000, "errors": 747},
{"value": 0.662500, "errors": 747},
{"value": 0.663000, "errors": 747},
{"value": 0.663500, "errors": 747},
{"value": 0.664000, "errors": 747},
{"value": 0.664500, "errors": 747},
{"value": 0.665000, "errors": 747},
{"value": 0.665500, "errors": 747},
{"value": 0.666000, "errors": 747},
{"value": 0.666500, "errors": 747},
{"value": 0.667000, "errors": 747},
{"value": 0.667500, "errors": 747},
{"value": 0.668000, "errors": 747},
{"value": 0.668500, "errors": 747},
{"value": 0.669000, "errors": 747},
{"value": 0.669500, "errors": 747},
{"value": 0.670000, "errors": 747},
{"value": 0.670500, "errors": 747},
{"value": 0.671000, "errors": 747},
{"value": 0.671500, "errors": 747},
{"value": 0.672000, "errors": 747},
{"value": 0.672500, "errors": 747},
{"value": 0.673000, "errors": 747},
{"value": 0.673500, "errors": 747},
{"value": 0.674000, "errors": 747},
{"value": 0.674500, "errors": 747},
{"value": 0.675000, "errors": 747},
{"value": 0.675500, "errors": 747},
{"value": 0.676000, "errors": 747},
{"value": 0.676500, "errors": 747},
{"value": 0.677000, "errors": 747},
{"value": 0.677500, "errors": 747},
{"value": 0.678000, "errors": 747},
{"value": 0.678500, "errors": 747},
{"value": 0.679000, "errors": 747},
{"value": 0.679500, "errors": 747},
{"value": 0.680000, "errors": 747},
{"value": 0.680500, "errors": 747},
{"value": 0.681000, "errors": 747},
{"value": 0.681500, "errors": 747},
{"value": 0.682000, "errors": 747},
{"value": 0.682500, "errors": 747},
{"value": 0.683000, "errors": 747},
{"value": 0.683500, "errors": 747},
{"value": 0.684000, "errors": 747},
{"value": 0.684500, "errors": 747},
{"value": 0.685000, "errors": 747},
{"value": 0.685500, "errors": 747},
{"value": 0.686000, "errors": 747},
{"value": 0.686500, "errors": 747},
{"value": 0.687000, "errors": 747},
{"value": 0.687500, "errors": 747},
{"value": 0.688000, "errors": 747},
{"value": 0.688500, "errors": 747},
{"value": 0.689000, "errors": 747},
{"value": 0.689500, "errors": 747},
{"value": 0.690000, "errors": 747},
{"value": 0.690500, "errors": 747},
{"value": 0.691000, "errors": 747},
{"value": 0.691500, "errors": 747},
{"value": 0.692000, "errors": 747},
{"value": 0.692500, "errors": 747},
{"value": 0.693000, "errors": 747},
{"value": 0.693500, "errors": 747},
{"value": 0.694000, "errors": 747},
{"value": 0.694500, "errors": 747},
{"value": 0.695000, "errors": 747},
{"value": 0.695500, "errors": 747},
{"value": 0.696000, "errors": 747},
{"value": 0.696500, "errors": 747},
{"value": 0.697000, "errors": 747},
{"value": 0.697500, "errors": 747},
{"value": 0.698000, "errors": 747},
{"value": 0.698500, "errors": 747},
{"value": 0.699000, "errors": 747},
{"value": 0.699500, "errors": 747},
{"value": 0.700000, "errors": 747},
]
x = []
y = []
for value in data:
x.append(value["value"])
y.append(value["errors"])
from pandas import *
d = {"x": x, "y": y}
df = DataFrame(d)
import matplotlib.pyplot as plt
from pandas.tools.rplot import *
plt.plot(x, y, 'ro')
plt.ylabel('errors')
plt.xlabel('threshold_p_for_first_filter_separator_character')
plt.title('threshold_p_for_first_filter_separator_character vs errors count')
polynomial = Polynomial(x, y, 4)
new_x = []
new_y = []
current_x = 0.
while current_x < 0.62:
new_x.append(current_x)
new_y.append(polynomial.getval(current_x))
current_x += 0.00005
plt.plot(new_x, new_y, 'ro')
print (polynomial.getval(0.))
|
mit
|
tony/kivy
|
kivy/core/text/markup.py
|
16
|
32053
|
'''
Text Markup
===========
.. versionadded:: 1.1.0
We provide a simple text-markup for inline text styling. The syntax look the
same as the `BBCode <http://en.wikipedia.org/wiki/BBCode>`_.
A tag is defined as ``[tag]``, and should have a corresponding
``[/tag]`` closing tag. For example::
[b]Hello [color=ff0000]world[/color][/b]
The following tags are available:
``[b][/b]``
Activate bold text
``[i][/i]``
Activate italic text
``[u][/u]``
Underlined text
``[s][/s]``
Strikethrough text
``[font=<str>][/font]``
Change the font
``[size=<size>][/size]``
Change the font size. <size> should be an integer, optionally with a
unit (i.e. ``16sp``)
``[color=#<color>][/color]``
Change the text color
``[ref=<str>][/ref]``
Add an interactive zone. The reference + all the word box inside the
reference will be available in :attr:`MarkupLabel.refs`
``[anchor=<str>]``
Put an anchor in the text. You can get the position of your anchor within
the text with :attr:`MarkupLabel.anchors`
``[sub][/sub]``
Display the text at a subscript position relative to the text before it.
``[sup][/sup]``
Display the text at a superscript position relative to the text before it.
If you need to escape the markup from the current text, use
:func:`kivy.utils.escape_markup`.
'''
__all__ = ('MarkupLabel', )
import re
from kivy.properties import dpi2px
from kivy.parser import parse_color
from kivy.logger import Logger
from kivy.core.text import Label, LabelBase
from kivy.core.text.text_layout import layout_text, LayoutWord, LayoutLine
from copy import copy
from functools import partial
# We need to do this trick when documentation is generated
MarkupLabelBase = Label
if Label is None:
MarkupLabelBase = LabelBase
class MarkupLabel(MarkupLabelBase):
'''Markup text label.
See module documentation for more informations.
'''
def __init__(self, *largs, **kwargs):
self._style_stack = {}
self._refs = {}
self._anchors = {}
super(MarkupLabel, self).__init__(*largs, **kwargs)
self._internal_size = 0, 0
self._cached_lines = []
@property
def refs(self):
'''Get the bounding box of all the ``[ref=...]``::
{ 'refA': ((x1, y1, x2, y2), (x1, y1, x2, y2)), ... }
'''
return self._refs
@property
def anchors(self):
'''Get the position of all the ``[anchor=...]``::
{ 'anchorA': (x, y), 'anchorB': (x, y), ... }
'''
return self._anchors
@property
def markup(self):
'''Return the text with all the markup splitted::
>>> MarkupLabel('[b]Hello world[/b]').markup
>>> ('[b]', 'Hello world', '[/b]')
'''
s = re.split('(\[.*?\])', self.label)
s = [x for x in s if x != '']
return s
def _push_style(self, k):
if k not in self._style_stack:
self._style_stack[k] = []
self._style_stack[k].append(self.options[k])
def _pop_style(self, k):
if k not in self._style_stack or len(self._style_stack[k]) == 0:
Logger.warning('Label: pop style stack without push')
return
v = self._style_stack[k].pop()
self.options[k] = v
def render(self, real=False):
options = copy(self.options)
if not real:
ret = self._pre_render()
else:
ret = self._render_real()
self.options = options
return ret
def _pre_render(self):
# split markup, words, and lines
# result: list of word with position and width/height
# during the first pass, we don't care about h/valign
self._cached_lines = lines = []
self._refs = {}
self._anchors = {}
clipped = False
w = h = 0
uw, uh = self.text_size
spush = self._push_style
spop = self._pop_style
opts = options = self.options
options['_ref'] = None
options['_anchor'] = None
options['script'] = 'normal'
shorten = options['shorten']
# if shorten, then don't split lines to fit uw, because it will be
# flattened later when shortening and broken up lines if broken
# mid-word will have space mid-word when lines are joined
uw_temp = None if shorten else uw
xpad = options['padding_x']
uhh = (None if uh is not None and options['valign'] != 'top' or
options['shorten'] else uh)
options['strip'] = options['strip'] or options['halign'] == 'justify'
for item in self.markup:
if item == '[b]':
spush('bold')
options['bold'] = True
self.resolve_font_name()
elif item == '[/b]':
spop('bold')
self.resolve_font_name()
elif item == '[i]':
spush('italic')
options['italic'] = True
self.resolve_font_name()
elif item == '[/i]':
spop('italic')
self.resolve_font_name()
elif item == '[u]':
spush('underline')
options['underline'] = True
self.resolve_font_name()
elif item == '[/u]':
spop('underline')
self.resolve_font_name()
elif item == '[s]':
spush('strikethrough')
options['strikethrough'] = True
self.resolve_font_name()
elif item == '[/s]':
spop('strikethrough')
self.resolve_font_name()
elif item[:6] == '[size=':
item = item[6:-1]
try:
if item[-2:] in ('px', 'pt', 'in', 'cm', 'mm', 'dp', 'sp'):
size = dpi2px(item[:-2], item[-2:])
else:
size = int(item)
except ValueError:
raise
size = options['font_size']
spush('font_size')
options['font_size'] = size
elif item == '[/size]':
spop('font_size')
elif item[:7] == '[color=':
color = parse_color(item[7:-1])
spush('color')
options['color'] = color
elif item == '[/color]':
spop('color')
elif item[:6] == '[font=':
fontname = item[6:-1]
spush('font_name')
options['font_name'] = fontname
self.resolve_font_name()
elif item == '[/font]':
spop('font_name')
self.resolve_font_name()
elif item[:5] == '[sub]':
spush('font_size')
spush('script')
options['font_size'] = options['font_size'] * .5
options['script'] = 'subscript'
elif item == '[/sub]':
spop('font_size')
spop('script')
elif item[:5] == '[sup]':
spush('font_size')
spush('script')
options['font_size'] = options['font_size'] * .5
options['script'] = 'superscript'
elif item == '[/sup]':
spop('font_size')
spop('script')
elif item[:5] == '[ref=':
ref = item[5:-1]
spush('_ref')
options['_ref'] = ref
elif item == '[/ref]':
spop('_ref')
elif not clipped and item[:8] == '[anchor=':
options['_anchor'] = item[8:-1]
elif not clipped:
item = item.replace('&bl;', '[').replace(
'&br;', ']').replace('&', '&')
opts = copy(options)
extents = self.get_cached_extents()
opts['space_width'] = extents(' ')[0]
w, h, clipped = layout_text(
item, lines, (w, h), (uw_temp, uhh),
opts, extents,
append_down=True,
complete=False
)
if len(lines): # remove any trailing spaces from the last line
old_opts = self.options
self.options = copy(opts)
w, h, clipped = layout_text(
'', lines, (w, h), (uw_temp, uhh),
self.options, self.get_cached_extents(),
append_down=True,
complete=True
)
self.options = old_opts
self.is_shortened = False
if shorten:
options['_ref'] = None # no refs for you!
options['_anchor'] = None
w, h, lines = self.shorten_post(lines, w, h)
self._cached_lines = lines
# when valign is not top, for markup we layout everything (text_size[1]
# is temporarily set to None) and after layout cut to size if too tall
elif uh != uhh and h > uh and len(lines) > 1:
if options['valign'] == 'bottom':
i = 0
while i < len(lines) - 1 and h > uh:
h -= lines[i].h
i += 1
del lines[:i]
else: # middle
i = 0
top = int(h / 2. + uh / 2.) # remove extra top portion
while i < len(lines) - 1 and h > top:
h -= lines[i].h
i += 1
del lines[:i]
i = len(lines) - 1 # remove remaining bottom portion
while i and h > uh:
h -= lines[i].h
i -= 1
del lines[i + 1:]
# now justify the text
if options['halign'] == 'justify' and uw is not None:
# XXX: update refs to justified pos
# when justify, each line should've been stripped already
split = partial(re.split, re.compile('( +)'))
uww = uw - 2 * xpad
chr = type(self.text)
space = chr(' ')
empty = chr('')
for i in range(len(lines)):
line = lines[i]
words = line.words
# if there's nothing to justify, we're done
if (not line.w or int(uww - line.w) <= 0 or not len(words) or
line.is_last_line):
continue
done = False
parts = [None, ] * len(words) # contains words split by space
idxs = [None, ] * len(words) # indices of the space in parts
# break each word into spaces and add spaces until it's full
# do first round of split in case we don't need to split all
for w in range(len(words)):
word = words[w]
sw = word.options['space_width']
p = parts[w] = split(word.text)
idxs[w] = [v for v in range(len(p)) if
p[v].startswith(' ')]
# now we have the indices of the spaces in split list
for k in idxs[w]:
# try to add single space at each space
if line.w + sw > uww:
done = True
break
line.w += sw
word.lw += sw
p[k] += space
if done:
break
# there's not a single space in the line?
if not any(idxs):
continue
# now keep adding spaces to already split words until done
while not done:
for w in range(len(words)):
if not idxs[w]:
continue
word = words[w]
sw = word.options['space_width']
p = parts[w]
for k in idxs[w]:
# try to add single space at each space
if line.w + sw > uww:
done = True
break
line.w += sw
word.lw += sw
p[k] += space
if done:
break
# if not completely full, push last words to right edge
diff = int(uww - line.w)
if diff > 0:
# find the last word that had a space
for w in range(len(words) - 1, -1, -1):
if not idxs[w]:
continue
break
old_opts = self.options
self.options = word.options
word = words[w]
# split that word into left/right and push right till uww
l_text = empty.join(parts[w][:idxs[w][-1]])
r_text = empty.join(parts[w][idxs[w][-1]:])
left = LayoutWord(
word.options,
self.get_extents(l_text)[0],
word.lh,
l_text
)
right = LayoutWord(
word.options,
self.get_extents(r_text)[0],
word.lh,
r_text
)
left.lw = max(left.lw, word.lw + diff - right.lw)
self.options = old_opts
# now put words back together with right/left inserted
for k in range(len(words)):
if idxs[k]:
words[k].text = empty.join(parts[k])
words[w] = right
words.insert(w, left)
else:
for k in range(len(words)):
if idxs[k]:
words[k].text = empty.join(parts[k])
line.w = uww
w = max(w, uww)
self._internal_size = w, h
if uw:
w = uw
if uh:
h = uh
if h > 1 and w < 2:
w = 2
if w < 1:
w = 1
if h < 1:
h = 1
return int(w), int(h)
def render_lines(self, lines, options, render_text, y, size):
xpad = options['padding_x']
w = size[0]
halign = options['halign']
refs = self._refs
anchors = self._anchors
for layout_line in lines: # for plain label each line has only one str
lw, lh = layout_line.w, layout_line.h
x = xpad
if halign == 'center':
x = int((w - lw) / 2.)
elif halign == 'right':
x = max(0, int(w - lw - xpad))
layout_line.x = x
layout_line.y = y
psp = pph = 0
for word in layout_line.words:
options = self.options = word.options
# the word height is not scaled by line_height, only lh was
wh = options['line_height'] * word.lh
# calculate sub/super script pos
if options['script'] == 'superscript':
script_pos = max(0, psp if psp else self.get_descent())
psp = script_pos
pph = wh
elif options['script'] == 'subscript':
script_pos = min(lh - wh, ((psp + pph) - wh)
if pph else (lh - wh))
pph = wh
psp = script_pos
else:
script_pos = (lh - wh) / 1.25
psp = pph = 0
if len(word.text):
render_text(word.text, x, y + script_pos)
# should we record refs ?
ref = options['_ref']
if ref is not None:
if ref not in refs:
refs[ref] = []
refs[ref].append((x, y, x + word.lw, y + wh))
# Should we record anchors?
anchor = options['_anchor']
if anchor is not None:
if anchor not in anchors:
anchors[anchor] = (x, y)
x += word.lw
y += lh
return y
def shorten_post(self, lines, w, h, margin=2):
''' Shortens the text to a single line according to the label options.
This function operates on a text that has already been laid out because
for markup, parts of text can have different size and options.
If :attr:`text_size` [0] is None, the lines are returned unchanged.
Otherwise, the lines are converted to a single line fitting within the
constrained width, :attr:`text_size` [0].
:params:
`lines`: list of `LayoutLine` instances describing the text.
`w`: int, the width of the text in lines, including padding.
`h`: int, the height of the text in lines, including padding.
`margin` int, the additional space left on the sides. This is in
addition to :attr:`padding_x`.
:returns:
3-tuple of (xw, h, lines), where w, and h is similar to the input
and contains the resulting width / height of the text, including
padding. lines, is a list containing a single `LayoutLine`, which
contains the words for the line.
'''
def n(line, c):
''' A function similar to text.find, except it's an iterator that
returns successive occurrences of string c in list line. line is
not a string, but a list of LayoutWord instances that we walk
from left to right returning the indices of c in the words as we
encounter them. Note that the options can be different among the
words.
:returns:
3-tuple: the index of the word in line, the index of the
occurrence in word, and the extents (width) of the combined
words until this occurrence, not including the occurrence char.
If no more are found it returns (-1, -1, total_w) where total_w
is the full width of all the words.
'''
total_w = 0
for w in range(len(line)):
word = line[w]
if not word.lw:
continue
f = partial(word.text.find, c)
i = f()
while i != -1:
self.options = word.options
yield w, i, total_w + self.get_extents(word.text[:i])[0]
i = f(i + 1)
self.options = word.options
total_w += self.get_extents(word.text)[0]
yield -1, -1, total_w # this should never be reached, really
def p(line, c):
''' Similar to the `n` function, except it returns occurrences of c
from right to left in the list, line, similar to rfind.
'''
total_w = 0
offset = 0 if len(c) else 1
for w in range(len(line) - 1, -1, -1):
word = line[w]
if not word.lw:
continue
f = partial(word.text.rfind, c)
i = f()
while i != -1:
self.options = word.options
yield (w, i, total_w +
self.get_extents(word.text[i + 1:])[0])
if i:
i = f(0, i - offset)
else:
if not c:
self.options = word.options
yield (w, -1, total_w +
self.get_extents(word.text)[0])
break
self.options = word.options
total_w += self.get_extents(word.text)[0]
yield -1, -1, total_w # this should never be reached, really
def n_restricted(line, uw, c):
''' Similar to the function `n`, except it only returns the first
occurrence and it's not an iterator. Furthermore, if the first
occurrence doesn't fit within width uw, it returns the index of
whatever amount of text will still fit in uw.
:returns:
similar to the function `n`, except it's a 4-tuple, with the
last element a boolean, indicating if we had to clip the text
to fit in uw (True) or if the whole text until the first
occurrence fitted in uw (False).
'''
total_w = 0
if not len(line):
return 0, 0, 0
for w in range(len(line)):
word = line[w]
f = partial(word.text.find, c)
self.options = word.options
extents = self.get_cached_extents()
i = f()
if i != -1:
ww = extents(word.text[:i])[0]
if i != -1 and total_w + ww <= uw: # found and it fits
return w, i, total_w + ww, False
elif i == -1:
ww = extents(word.text)[0]
if total_w + ww <= uw: # wasn't found and all fits
total_w += ww
continue
i = len(word.text)
# now just find whatever amount of the word does fit
e = 0
while e != i and total_w + extents(word.text[:e])[0] <= uw:
e += 1
e = max(0, e - 1)
return w, e, total_w + extents(word.text[:e])[0], True
return -1, -1, total_w, False
def p_restricted(line, uw, c):
''' Similar to `n_restricted`, except it returns the first
occurrence starting from the right, like `p`.
'''
total_w = 0
if not len(line):
return 0, 0, 0
for w in range(len(line) - 1, -1, -1):
word = line[w]
f = partial(word.text.rfind, c)
self.options = word.options
extents = self.get_cached_extents()
i = f()
if i != -1:
ww = extents(word.text[i + 1:])[0]
if i != -1 and total_w + ww <= uw: # found and it fits
return w, i, total_w + ww, False
elif i == -1:
ww = extents(word.text)[0]
if total_w + ww <= uw: # wasn't found and all fits
total_w += ww
continue
# now just find whatever amount of the word does fit
s = len(word.text) - 1
while s >= 0 and total_w + extents(word.text[s:])[0] <= uw:
s -= 1
return w, s, total_w + extents(word.text[s + 1:])[0], True
return -1, -1, total_w, False
textwidth = self.get_cached_extents()
uw = self.text_size[0]
if uw is None:
return w, h, lines
old_opts = copy(self.options)
uw = max(0, int(uw - old_opts['padding_x'] * 2 - margin))
chr = type(self.text)
ssize = textwidth(' ')
c = old_opts['split_str']
line_height = old_opts['line_height']
xpad, ypad = old_opts['padding_x'], old_opts['padding_y']
dir = old_opts['shorten_from'][0]
# flatten lines into single line
line = []
last_w = 0
for l in range(len(lines)):
# concatenate (non-empty) inside lines with a space
this_line = lines[l]
if last_w and this_line.w and not this_line.line_wrap:
line.append(LayoutWord(old_opts, ssize[0], ssize[1], chr(' ')))
last_w = this_line.w or last_w
for word in this_line.words:
if word.lw:
line.append(word)
# if that fits, just return the flattened line
lw = sum([word.lw for word in line])
if lw <= uw:
lh = max([word.lh for word in line] + [0]) * line_height
self.is_shortened = False
return (
lw + 2 * xpad,
lh + 2 * ypad,
[LayoutLine(0, 0, lw, lh, 1, 0, line)]
)
elps_opts = copy(old_opts)
if 'ellipsis_options' in old_opts:
elps_opts.update(old_opts['ellipsis_options'])
# Set new opts for ellipsis
self.options = elps_opts
# find the size of ellipsis that'll fit
elps_s = textwidth('...')
if elps_s[0] > uw: # even ellipsis didn't fit...
self.is_shortened = True
s = textwidth('..')
if s[0] <= uw:
return (
s[0] + 2 * xpad,
s[1] * line_height + 2 * ypad,
[LayoutLine(
0, 0, s[0], s[1], 1, 0,
[LayoutWord(old_opts, s[0], s[1], '..')])]
)
else:
s = textwidth('.')
return (
s[0] + 2 * xpad,
s[1] * line_height + 2 * ypad,
[LayoutLine(
0, 0, s[0], s[1], 1, 0,
[LayoutWord(old_opts, s[0], s[1], '.')])]
)
elps = LayoutWord(elps_opts, elps_s[0], elps_s[1], '...')
uw -= elps_s[0]
# Restore old opts
self.options = old_opts
# now find the first left and right words that fit
w1, e1, l1, clipped1 = n_restricted(line, uw, c)
w2, s2, l2, clipped2 = p_restricted(line, uw, c)
if dir != 'l': # center or right
line1 = None
if clipped1 or clipped2 or l1 + l2 > uw:
# if either was clipped or both don't fit, just take first
if len(c):
self.options = old_opts
old_opts['split_str'] = ''
res = self.shorten_post(lines, w, h, margin)
self.options['split_str'] = c
self.is_shortened = True
return res
line1 = line[:w1]
last_word = line[w1]
last_text = last_word.text[:e1]
self.options = last_word.options
s = self.get_extents(last_text)
line1.append(LayoutWord(last_word.options, s[0], s[1],
last_text))
elif (w1, e1) == (-1, -1): # this shouldn't occur
line1 = line
if line1:
line1.append(elps)
lw = sum([word.lw for word in line1])
lh = max([word.lh for word in line1]) * line_height
self.options = old_opts
self.is_shortened = True
return (
lw + 2 * xpad,
lh + 2 * ypad,
[LayoutLine(0, 0, lw, lh, 1, 0, line1)]
)
# now we know that both the first and last word fit, and that
# there's at least one instances of the split_str in the line
if (w1, e1) != (w2, s2): # more than one split_str
if dir == 'r':
f = n(line, c) # iterator
assert next(f)[:-1] == (w1, e1) # first word should match
ww1, ee1, l1 = next(f)
while l2 + l1 <= uw:
w1, e1 = ww1, ee1
ww1, ee1, l1 = next(f)
if (w1, e1) == (w2, s2):
break
else: # center
f = n(line, c) # iterator
f_inv = p(line, c) # iterator
assert next(f)[:-1] == (w1, e1)
assert next(f_inv)[:-1] == (w2, s2)
while True:
if l1 <= l2:
ww1, ee1, l1 = next(f) # hypothesize that next fit
if l2 + l1 > uw:
break
w1, e1 = ww1, ee1
if (w1, e1) == (w2, s2):
break
else:
ww2, ss2, l2 = next(f_inv)
if l2 + l1 > uw:
break
w2, s2 = ww2, ss2
if (w1, e1) == (w2, s2):
break
else: # left
line1 = [elps]
if clipped1 or clipped2 or l1 + l2 > uw:
# if either was clipped or both don't fit, just take last
if len(c):
self.options = old_opts
old_opts['split_str'] = ''
res = self.shorten_post(lines, w, h, margin)
self.options['split_str'] = c
self.is_shortened = True
return res
first_word = line[w2]
first_text = first_word.text[s2 + 1:]
self.options = first_word.options
s = self.get_extents(first_text)
line1.append(LayoutWord(first_word.options, s[0], s[1],
first_text))
line1.extend(line[w2 + 1:])
elif (w1, e1) == (-1, -1): # this shouldn't occur
line1 = line
if len(line1) != 1:
lw = sum([word.lw for word in line1])
lh = max([word.lh for word in line1]) * line_height
self.options = old_opts
self.is_shortened = True
return (
lw + 2 * xpad,
lh + 2 * ypad,
[LayoutLine(0, 0, lw, lh, 1, 0, line1)]
)
# now we know that both the first and last word fit, and that
# there's at least one instances of the split_str in the line
if (w1, e1) != (w2, s2): # more than one split_str
f_inv = p(line, c) # iterator
assert next(f_inv)[:-1] == (w2, s2) # last word should match
ww2, ss2, l2 = next(f_inv)
while l2 + l1 <= uw:
w2, s2 = ww2, ss2
ww2, ss2, l2 = next(f_inv)
if (w1, e1) == (w2, s2):
break
# now add back the left half
line1 = line[:w1]
last_word = line[w1]
last_text = last_word.text[:e1]
self.options = last_word.options
s = self.get_extents(last_text)
if len(last_text):
line1.append(LayoutWord(last_word.options, s[0], s[1], last_text))
line1.append(elps)
# now add back the right half
first_word = line[w2]
first_text = first_word.text[s2 + 1:]
self.options = first_word.options
s = self.get_extents(first_text)
if len(first_text):
line1.append(LayoutWord(first_word.options, s[0], s[1],
first_text))
line1.extend(line[w2 + 1:])
lw = sum([word.lw for word in line1])
lh = max([word.lh for word in line1]) * line_height
self.options = old_opts
if uw < lw:
self.is_shortened = True
return (
lw + 2 * xpad,
lh + 2 * ypad,
[LayoutLine(0, 0, lw, lh, 1, 0, line1)]
)
|
mit
|
tumblr/collectd
|
contrib/collectd_unixsock.py
|
13
|
8066
|
#-*- coding: ISO-8859-1 -*-
# collect.py: the python collectd-unixsock module.
#
# Requires collectd to be configured with the unixsock plugin, like so:
#
# LoadPlugin unixsock
# <Plugin unixsock>
# SocketFile "/var/run/collectd-unixsock"
# SocketPerms "0775"
# </Plugin>
#
# Copyright (C) 2008 Clay Loveless <[email protected]>
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the author be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import socket
import sys
class Collectd():
def __init__(self, path='/var/run/collectd-unixsock', noisy=False):
self.noisy = noisy
self.path = path
self._sock = self._connect()
def flush(self, timeout=None, plugins=[], identifiers=[]):
"""Send a FLUSH command.
Full documentation:
http://collectd.org/wiki/index.php/Plain_text_protocol#FLUSH
"""
# have to pass at least one plugin or identifier
if not plugins and not identifiers:
return None
args = []
if timeout:
args.append("timeout=%s" % timeout)
if plugins:
plugin_args = map(lambda x: "plugin=%s" % x, plugins)
args.extend(plugin_args)
if identifiers:
identifier_args = map(lambda x: "identifier=%s" % x, identifiers)
args.extend(identifier_args)
return self._cmd('FLUSH %s' % ' '.join(args))
def getthreshold(self, identifier):
"""Send a GETTHRESHOLD command.
Full documentation:
http://collectd.org/wiki/index.php/Plain_text_protocol#GETTHRESHOLD
"""
numvalues = self._cmd('GETTHRESHOLD "%s"' % identifier)
lines = []
if not numvalues or numvalues < 0:
raise KeyError("Identifier '%s' not found" % identifier)
lines = self._readlines(numvalues)
return lines
def getval(self, identifier, flush_after=True):
"""Send a GETVAL command.
Also flushes the identifier if flush_after is True.
Full documentation:
http://collectd.org/wiki/index.php/Plain_text_protocol#GETVAL
"""
numvalues = self._cmd('GETVAL "%s"' % identifier)
lines = []
if not numvalues or numvalues < 0:
raise KeyError("Identifier '%s' not found" % identifier)
lines = self._readlines(numvalues)
if flush_after:
self.flush(identifiers=[identifier])
return lines
def listval(self):
"""Send a LISTVAL command.
Full documentation:
http://collectd.org/wiki/index.php/Plain_text_protocol#LISTVAL
"""
numvalues = self._cmd('LISTVAL')
lines = []
if numvalues:
lines = self._readlines(numvalues)
return lines
def putnotif(self, message, options={}):
"""Send a PUTNOTIF command.
Options must be passed as a Python dictionary. Example:
options={'severity': 'failure', 'host': 'example.com'}
Full documentation:
http://collectd.org/wiki/index.php/Plain_text_protocol#PUTNOTIF
"""
args = []
if options:
options_args = map(lambda x: "%s=%s" % (x, options[x]), options)
args.extend(options_args)
args.append('message="%s"' % message)
return self._cmd('PUTNOTIF %s' % ' '.join(args))
def putval(self, identifier, values, options={}):
"""Send a PUTVAL command.
Options must be passed as a Python dictionary. Example:
options={'interval': 10}
Full documentation:
http://collectd.org/wiki/index.php/Plain_text_protocol#PUTVAL
"""
args = []
args.append('"%s"' % identifier)
if options:
options_args = map(lambda x: "%s=%s" % (x, options[x]), options)
args.extend(options_args)
values = map(str, values)
args.append(':'.join(values))
return self._cmd('PUTVAL %s' % ' '.join(args))
def _cmd(self, c):
try:
return self._cmdattempt(c)
except socket.error, (errno, errstr):
sys.stderr.write("[error] Sending to socket failed: [%d] %s\n"
% (errno, errstr))
self._sock = self._connect()
return self._cmdattempt(c)
def _cmdattempt(self, c):
if self.noisy:
print "[send] %s" % c
if not self._sock:
sys.stderr.write("[error] Socket unavailable. Can not send.")
return False
self._sock.send(c + "\n")
status_message = self._readline()
if self.noisy:
print "[recive] %s" % status_message
if not status_message:
return None
code, message = status_message.split(' ', 1)
if int(code):
return int(code)
return False
def _connect(self):
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.path)
if self.noisy:
print "[socket] connected to %s" % self.path
return sock
except socket.error, (errno, errstr):
sys.stderr.write("[error] Connecting to socket failed: [%d] %s"
% (errno, errstr))
return None
def _readline(self):
"""Read single line from socket"""
if not self._sock:
sys.stderr.write("[error] Socket unavailable. Can not read.")
return None
try:
data = ''
buf = []
recv = self._sock.recv
while data != "\n":
data = recv(1)
if not data:
break
if data != "\n":
buf.append(data)
return ''.join(buf)
except socket.error, (errno, errstr):
sys.stderr.write("[error] Reading from socket failed: [%d] %s"
% (errno, errstr))
self._sock = self._connect()
return None
def _readlines(self, sizehint=0):
"""Read multiple lines from socket"""
total = 0
list = []
while True:
line = self._readline()
if not line:
break
list.append(line)
total = len(list)
if sizehint and total >= sizehint:
break
return list
def __del__(self):
if not self._sock:
return
try:
self._sock.close()
except socket.error, (errno, errstr):
sys.stderr.write("[error] Closing socket failed: [%d] %s"
% (errno, errstr))
if __name__ == '__main__':
"""Collect values from socket and dump to STDOUT"""
c = Collectd('/var/run/collectd-unixsock', noisy=True)
list = c.listval()
for val in list:
stamp, identifier = val.split()
print "\n%s" % identifier
print "\tUpdate time: %s" % stamp
values = c.getval(identifier)
print "\tValue list: %s" % ', '.join(values)
# don't fetch thresholds by default because collectd will crash
# if there is no treshold for the given identifier
#thresholds = c.getthreshold(identifier)
#print "\tThresholds: %s" % ', '.join(thresholds)
|
gpl-2.0
|
flgiordano/netcash
|
+/google-cloud-sdk/lib/surface/deployment_manager/resources/describe.py
|
1
|
3185
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""resources describe command."""
from googlecloudsdk.api_lib.deployment_manager import dm_v2_util
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resource_printer
from googlecloudsdk.third_party.apitools.base.py import exceptions as apitools_exceptions
class Describe(base.Command):
"""Provide information about a resource.
This command prints out all available details about a resource.
"""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To display information about a resource, run:
$ {command} --deployment my-deployment my-resource-name
""",
}
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
parser.add_argument('resource', help='Resource name.')
def Run(self, args):
"""Run 'resources describe'.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
The requested resource.
Raises:
HttpException: An http error response was received while executing api
request.
"""
client = self.context['deploymentmanager-client']
messages = self.context['deploymentmanager-messages']
project = properties.VALUES.core.project.Get(required=True)
try:
return client.resources.Get(
messages.DeploymentmanagerResourcesGetRequest(
project=project,
deployment=args.deployment,
resource=args.resource
)
)
except apitools_exceptions.HttpError as error:
raise exceptions.HttpException(dm_v2_util.GetError(error))
def Display(self, unused_args, result):
"""Display prints information about what just happened to stdout.
Args:
unused_args: The same as the args in Run.
result: a Resource object to display.
Raises:
ValueError: if result is None or not a Resource
"""
messages = self.context['deploymentmanager-messages']
if not isinstance(result, messages.Resource):
raise ValueError('result must be a Resource')
resource_printer.Print(resources=result,
print_format=unused_args.format or 'yaml',
out=log.out)
|
bsd-3-clause
|
vivekmishra1991/scikit-learn
|
sklearn/metrics/classification.py
|
95
|
67713
|
"""Metrics to assess performance on classification task given classe prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Jatin Shah <[email protected]>
# Saurabh Jha <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy.spatial.distance import hamming as sp_hamming
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from .base import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<http://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<http://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
with np.errstate(invalid='ignore'):
mcc = np.corrcoef(y_true, y_pred)[0, 1]
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<http://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
### Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
### Finally, we have all our sufficient statistics. Divide! ###
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
## Average the results ##
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['%s' % l for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<http://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred)
return (n_differences / (y_true.shape[0] * len(classes)))
elif y_type in ["binary", "multiclass"]:
return sp_hamming(y_true, y_pred)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<http://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
http://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
|
bsd-3-clause
|
HeraclesHX/scikit-learn
|
sklearn/cluster/tests/test_dbscan.py
|
114
|
11393
|
"""
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.ones((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
|
bsd-3-clause
|
Jroque/MITMf
|
plugins/upsidedownternet.py
|
21
|
2362
|
# Copyright (c) 2014-2016 Marcello Salvati
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
from cStringIO import StringIO
from plugins.plugin import Plugin
from PIL import Image, ImageFile
class Upsidedownternet(Plugin):
name = "Upsidedownternet"
optname = "upsidedownternet"
desc = 'Flips images 180 degrees'
version = "0.1"
def initialize(self, options):
self.options = options
def responseheaders(self, response, request):
'''Kill the image skipping that's in place for speed reasons'''
if request.isImageRequest:
request.isImageRequest = False
request.isImage = True
self.imageType = response.headers['content-type'].split('/')[1].upper()
def response(self, response, request, data):
try:
isImage = getattr(request, 'isImage')
except AttributeError:
isImage = False
if isImage:
try:
#For some reason more images get parsed using the parser
#rather than a file...PIL still needs some work I guess
p = ImageFile.Parser()
p.feed(data)
im = p.close()
im = im.transpose(Image.ROTATE_180)
output = StringIO()
im.save(output, format=self.imageType)
data = output.getvalue()
output.close()
self.clientlog.info("Flipped image", extra=request.clientInfo)
except Exception as e:
self.clientlog.info("Error: {}".format(e), extra=request.clientInfo)
return {'response': response, 'request': request, 'data': data}
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.