repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
sodhancha/Godown | refs/heads/master | invoices/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
jwlawson/tensorflow | refs/heads/master | tensorflow/contrib/py2tf/convert/__init__.py | 3 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code converters used by Py2TF."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# TODO(mdan): Define a base transformer class that can recognize skip_processing
|
amenonsen/ansible | refs/heads/devel | lib/ansible/modules/cloud/amazon/aws_config_recorder.py | 12 | #!/usr/bin/python
# Copyright: (c) 2018, Aaron Smith <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_config_recorder
short_description: Manage AWS Config Recorders
description:
- Module manages AWS Config configuration recorder settings
version_added: "2.6"
requirements: [ 'botocore', 'boto3' ]
author:
- "Aaron Smith (@slapula)"
options:
name:
description:
- The name of the AWS Config resource.
required: true
state:
description:
- Whether the Config rule should be present or absent.
default: present
choices: ['present', 'absent']
role_arn:
description:
- Amazon Resource Name (ARN) of the IAM role used to describe the AWS resources associated with the account.
- Required when state=present
recording_group:
description:
- Specifies the types of AWS resources for which AWS Config records configuration changes.
- Required when state=present
suboptions:
all_supported:
description:
- Specifies whether AWS Config records configuration changes for every supported type of regional resource.
- If you set this option to `true`, when AWS Config adds support for a new type of regional resource, it starts
recording resources of that type automatically.
- If you set this option to `true`, you cannot enumerate a list of `resource_types`.
include_global_types:
description:
- Specifies whether AWS Config includes all supported types of global resources (for example, IAM resources)
with the resources that it records.
- Before you can set this option to `true`, you must set the allSupported option to `true`.
- If you set this option to `true`, when AWS Config adds support for a new type of global resource, it starts recording
resources of that type automatically.
- The configuration details for any global resource are the same in all regions. To prevent duplicate configuration items,
you should consider customizing AWS Config in only one region to record global resources.
resource_types:
description:
- A list that specifies the types of AWS resources for which AWS Config records configuration changes (for example,
`AWS::EC2::Instance` or `AWS::CloudTrail::Trail`).
- Before you can set this option to `true`, you must set the `all_supported` option to `false`.
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Create Configuration Recorder for AWS Config
aws_config_recorder:
name: test_configuration_recorder
state: present
role_arn: 'arn:aws:iam::123456789012:role/AwsConfigRecorder'
recording_group:
all_supported: true
include_global_types: true
'''
RETURN = '''#'''
try:
import botocore
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, AWSRetry
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict
def resource_exists(client, module, params):
try:
recorder = client.describe_configuration_recorders(
ConfigurationRecorderNames=[params['name']]
)
return recorder['ConfigurationRecorders'][0]
except is_boto3_error_code('NoSuchConfigurationRecorderException'):
return
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e)
def create_resource(client, module, params, result):
try:
response = client.put_configuration_recorder(
ConfigurationRecorder=params
)
result['changed'] = True
result['recorder'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create AWS Config configuration recorder")
def update_resource(client, module, params, result):
current_params = client.describe_configuration_recorders(
ConfigurationRecorderNames=[params['name']]
)
if params != current_params['ConfigurationRecorders'][0]:
try:
response = client.put_configuration_recorder(
ConfigurationRecorder=params
)
result['changed'] = True
result['recorder'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't update AWS Config configuration recorder")
def delete_resource(client, module, params, result):
try:
response = client.delete_configuration_recorder(
ConfigurationRecorderName=params['name']
)
result['changed'] = True
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration recorder")
def main():
module = AnsibleAWSModule(
argument_spec={
'name': dict(type='str', required=True),
'state': dict(type='str', choices=['present', 'absent'], default='present'),
'role_arn': dict(type='str'),
'recording_group': dict(type='dict'),
},
supports_check_mode=False,
required_if=[
('state', 'present', ['role_arn', 'recording_group']),
],
)
result = {
'changed': False
}
name = module.params.get('name')
state = module.params.get('state')
params = {}
if name:
params['name'] = name
if module.params.get('role_arn'):
params['roleARN'] = module.params.get('role_arn')
if module.params.get('recording_group'):
params['recordingGroup'] = {}
if module.params.get('recording_group').get('all_supported') is not None:
params['recordingGroup'].update({
'allSupported': module.params.get('recording_group').get('all_supported')
})
if module.params.get('recording_group').get('include_global_types') is not None:
params['recordingGroup'].update({
'includeGlobalResourceTypes': module.params.get('recording_group').get('include_global_types')
})
if module.params.get('recording_group').get('resource_types'):
params['recordingGroup'].update({
'resourceTypes': module.params.get('recording_group').get('resource_types')
})
else:
params['recordingGroup'].update({
'resourceTypes': []
})
client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
resource_status = resource_exists(client, module, params)
if state == 'present':
if not resource_status:
create_resource(client, module, params, result)
if resource_status:
update_resource(client, module, params, result)
if state == 'absent':
if resource_status:
delete_resource(client, module, params, result)
module.exit_json(changed=result['changed'])
if __name__ == '__main__':
main()
|
linearhub/titanium_mobile | refs/heads/master | node_modules/ioslib/node_modules/node-ios-device/node_modules/node-gyp/gyp/pylib/gyp/common.py | 366 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# Convert to normalized (and therefore absolute paths).
path = os.path.realpath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer(object):
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('aix'):
return 'aix'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# The second argument is an addition that causes a pylint warning.
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Extensions to the recipe.
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
r"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
def CrossCompileRequested():
# TODO: figure out how to not build extra host objects in the
# non-cross-compile case when this is enabled, and enable unconditionally.
return (os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
|
remenska/iSDM | refs/heads/master | iSDM/model.py | 1 |
"""
A module for preparing all environmental layers used for fitting a model, into a single dataframe, of a convenient format.
.. moduleauthor:: Daniela Remenska <[email protected]>
"""
from enum import Enum
from iSDM.environment import RasterEnvironmentalLayer, VectorEnvironmentalLayer
import pandas as pd
import numpy as np
from rasterio.transform import Affine
import gc
import logging
logger = logging.getLogger('iSDM.model')
logger.setLevel(logging.DEBUG)
class Algorithm(Enum):
GAM = 1
GLM = 2
MAXENT = 3
class Evaluation(Enum):
ROC = 1
KAPPA = 2
TSS = 3
AUC = 4
class Model(object):
"""
Model
A class for encapsulating the data preparation functionality for fitting a model. Makes sure all layers are at a consistent
resolution, global scale. Constructs the "base" dataframe and allows adding environmental layers data to this dataframe incrementally.
:ivar pixel_size: The size of the pixel in degrees, i.e., the resolution to use for rasterizing.
:ivar x_res: The number of pixels in the global-scale raster, along the x-axis.
:ivar y_res: The number of pixels in the global-scale raster, along the x-axis.
:ivar base_layer: A "base" raster environmental layer to use for deducing the world coordinates used as index in the base dataframe. \
All burned pixels (value==1) will be converted to global-scale latitude/longitude coordinates (the index).
:vartype base_layer: RasterEnvironmentalLayer
:ivar base_dataframe: A base dataframe containing latitude/longitude columns as an index. Further environmental layers added \
to the model will be merged with this base dataframe. Therefore, it is expected to contain all the necessary latitude/longitude combinations \
that may show up in any further environmental layers. If raster_data is not provided as a base(see below), then all world coordinates \
(at a particular resolution) are taken into account.
:vartype base_dataframe: pandas.DataFrame
"""
def __init__(self, pixel_size, raster_data=None, **kwargs):
logger.info("Preparing a base dataframe...")
x_min, y_min, x_max, y_max = -180, -90, 180, 90 # global
self.pixel_size = pixel_size
self.x_res = int((x_max - x_min) / self.pixel_size)
self.y_res = int((y_max - y_min) / self.pixel_size)
self.base_layer = RasterEnvironmentalLayer()
self.base_layer.raster_affine = Affine(pixel_size, 0.0, x_min, 0.0, -pixel_size, y_max)
logger.info("Base layer: Computing world coordinates...")
if raster_data is not None:
all_coordinates = self.base_layer.pixel_to_world_coordinates(raster_data=raster_data)
else:
all_coordinates = self.base_layer.pixel_to_world_coordinates(raster_data=np.ones((self.y_res, self.x_res), dtype=np.uint8))
# self.base_dataframe = pd.DataFrame([all_coordinates[0], all_coordinates[1]]).T
# self.base_dataframe.columns = ['decimallatitude', 'decimallongitude']
self.base_dataframe = pd.DataFrame(columns=['decimallatitude', 'decimallongitude'])
self.base_dataframe['decimallatitude'] = np.array(all_coordinates[0])
self.base_dataframe['decimallongitude'] = np.array(all_coordinates[1])
self.base_dataframe.set_index(['decimallatitude', 'decimallongitude'], inplace=True, drop=True)
del all_coordinates
gc.collect()
def cross_validate(self, percentage, random_seed):
# Needs to be implemented
pass
def evaluate_performance(self, method=Evaluation.ROC, **kwargs):
self.method = method
def fit(self):
pass
def add_environmental_layer(self, layer, discard_threshold=None, discard_nodata_value=True):
"""
Adds an environmental layer (Raster or Vector) by converting its values to latitude/longitude decimals, and merging with the
base dataframe. If the added layer is already raster, the first band is expected to contain all the (pixel) data values.
The resolution of the provided raster needs to match the Model resolution. The pixels are then converted to world coordinates,
and merged with the base dataframe. If the environmental layer is vector, then it is first rasterized, and the logic proceeds
the same way as with a raster layer.
:param EnvironmentalLayer layer: The environmental layer to be added to the model dataframe.
:param int discard_threshold: Optional pixel value to use for discarding layer pixels below a certain value, before adding the layer.
:param bool discard_nodata_value: Optionally filter out "nodata" pixel values from the raster, when converting the layer \
pixels to world coordinates.
"""
logger.info("Loading environmental layer from %s " % layer.file_path)
if isinstance(layer, RasterEnvironmentalLayer):
layer_reader = layer.load_data()
layer_data = layer_reader.read(1)
if layer_data.shape != (self.y_res, self.x_res):
logger.error("The layer is not at the proper resolution! Layer shape:%s " % (layer_data.shape, ))
return
logger.info("Computing world coordinates...")
if discard_nodata_value:
logger.info("Filtering out no_data pixels.")
layer_data = np.where(layer_data != layer_reader.nodata, layer_data, np.nan)
if discard_threshold is not None:
logger.info("Discarding values below %s " % discard_threshold)
layer_data = np.where(layer_data > discard_threshold, layer_data, np.nan)
layer_coordinates = layer.pixel_to_world_coordinates(raster_data=layer_data,
filter_no_data_value=discard_nodata_value,
no_data_value=layer_reader.nodata)
logger.info("Constructing dataframe for %s ..." % layer.name_layer)
# layer_dataframe = pd.DataFrame([layer_coordinates[0], layer_coordinates[1]]).T
# layer_dataframe.columns = ['decimallatitude', 'decimallongitude']
layer_dataframe = pd.DataFrame(columns=['decimallatitude', 'decimallongitude'])
layer_dataframe['decimallatitude'] = np.array(layer_coordinates[0])
layer_dataframe['decimallongitude'] = np.array(layer_coordinates[1])
del layer_coordinates
gc.collect()
flattened_data = layer_data.reshape(np.product(layer_data.shape))
# if discard_threshold:
# logger.info("Ignoring values below %s " % discard_threshold)
# flattened_data = flattened_data[flattened_data > discard_threshold]
layer_dataframe[layer.name_layer] = flattened_data[~np.isnan(flattened_data)]
layer_dataframe.set_index(['decimallatitude', 'decimallongitude'], inplace=True, drop=True)
logger.info("Shape of layer_dataframe: % s " % (layer_dataframe.shape, ))
logger.info("Finished constructing dataframe for %s ..." % layer.name_layer)
logger.info("Merging with base dataframe...")
self.base_dataframe = pd.merge(self.base_dataframe, layer_dataframe,
how='left',
left_index=True,
right_index=True)
logger.info("Shape of base_dataframe: %s " % (self.base_dataframe.shape, ))
del layer_dataframe
gc.collect()
elif isinstance(layer, VectorEnvironmentalLayer):
layer.load_data()
# if not hasattr(layer, 'pixel_size'):
# logger.error("Please provide a pixel size to use for rasterizing the vector layer.")
# return
if not hasattr(layer, 'raster_file'):
logger.error("Please provide a target raster_file location.")
return
if not hasattr(layer, 'classifier_column'):
logger.info("No classifier column specified; will rasterize everything in one band...")
layer.set_classifier(classifier_column=None)
raster_data = layer.rasterize(raster_file=layer.get_raster_file(), pixel_size=self.pixel_size, classifier_column=layer.get_classifier())
logger.info("Raster data has shape: %s " % (raster_data.shape,))
self.base_dataframe[layer.name_layer] = np.nan
for idx, band in enumerate(raster_data):
logger.info("Computing world coordinates for band number %s " % idx)
band_coordinates = self.base_layer.pixel_to_world_coordinates(raster_data=band)
logger.info("Constructing dataframe for band number %s " % idx)
# band_dataframe = pd.DataFrame([band_coordinates[0], band_coordinates[1]]).T
# band_dataframe.columns = ['decimallatitude', 'decimallongitude']
band_dataframe = pd.DataFrame(columns=['decimallatitude', 'decimallongitude'])
band_dataframe['decimallatitude'] = np.array(band_coordinates[0])
band_dataframe['decimallongitude'] = np.array(band_coordinates[1])
band_dataframe[layer.name_layer] = idx + 1
band_dataframe.set_index(['decimallatitude', 'decimallongitude'], inplace=True, drop=True)
logger.info("Finished constructing dataframe for band number %s " % idx)
logger.info("Merging with base dataframe...")
self.base_dataframe.update(band_dataframe) # much fastter than combine_first
logger.info("Shape of base_dataframe: %s " % (self.base_dataframe.shape, ))
logger.info("Finished processing band number %s " % idx)
del band_dataframe
gc.collect()
def get_base_dataframe(self):
return self.base_dataframe
|
jennolsen84/PyTables | refs/heads/develop | doc/sphinxext/apigen.py | 12 | """Attempt to generate templates for module reference with Sphinx
XXX - we exclude extension modules
To include extension modules, first identify them as valid in the
``_uri2path`` method, then handle them in the ``_parse_module`` script.
We get functions and classes by parsing the text of .py files.
Alternatively we could import the modules for discovery, and we'd have
to do that for extension modules. This would involve changing the
``_parse_module`` method to work via import and introspection, and
might involve changing ``discover_modules`` (which determines which
files are modules, and therefore which module URIs will be passed to
``_parse_module``).
NOTE: this is a modified version of a script originally shipped with the
PyMVPA project, which we've adapted for NIPY use. PyMVPA is an MIT-licensed
project."""
# Stdlib imports
import os
import re
# Functions and classes
class ApiDocWriter(object):
''' Class for automatic detection and parsing of API docs
to Sphinx-parsable reST format'''
# only separating first two levels
rst_section_levels = ['*', '=', '-', '~', '^']
def __init__(self,
package_name,
rst_extension='.rst',
package_skip_patterns=None,
module_skip_patterns=None,
):
''' Initialize package for parsing
Parameters
----------
package_name : string
Name of the top-level package. *package_name* must be the
name of an importable package
rst_extension : string, optional
Extension for reST files, default '.rst'
package_skip_patterns : None or sequence of {strings, regexps}
Sequence of strings giving URIs of packages to be excluded
Operates on the package path, starting at (including) the
first dot in the package path, after *package_name* - so,
if *package_name* is ``sphinx``, then ``sphinx.util`` will
result in ``.util`` being passed for earching by these
regexps. If is None, gives default. Default is:
['\.tests$']
module_skip_patterns : None or sequence
Sequence of strings giving URIs of modules to be excluded
Operates on the module name including preceding URI path,
back to the first dot after *package_name*. For example
``sphinx.util.console`` results in the string to search of
``.util.console``
If is None, gives default. Default is:
['\.setup$', '\._']
'''
if package_skip_patterns is None:
package_skip_patterns = ['\\.tests$']
if module_skip_patterns is None:
module_skip_patterns = ['\\.setup$', '\\._']
self.package_name = package_name
self.rst_extension = rst_extension
self.package_skip_patterns = package_skip_patterns
self.module_skip_patterns = module_skip_patterns
def get_package_name(self):
return self._package_name
def set_package_name(self, package_name):
''' Set package_name
>>> docwriter = ApiDocWriter('sphinx')
>>> import sphinx
>>> docwriter.root_path == sphinx.__path__[0]
True
>>> docwriter.package_name = 'docutils'
>>> import docutils
>>> docwriter.root_path == docutils.__path__[0]
True
'''
# It's also possible to imagine caching the module parsing here
self._package_name = package_name
self.root_module = __import__(package_name)
self.root_path = self.root_module.__path__[0]
self.written_modules = None
package_name = property(get_package_name, set_package_name, None,
'get/set package_name')
def _get_object_name(self, line):
''' Get second token in line
>>> docwriter = ApiDocWriter('sphinx')
>>> docwriter._get_object_name(" def func(): ")
'func'
>>> docwriter._get_object_name(" class Klass(object): ")
'Klass'
>>> docwriter._get_object_name(" class Klass: ")
'Klass'
'''
name = line.split()[1].split('(')[0].strip()
# in case we have classes which are not derived from object
# ie. old style classes
return name.rstrip(':')
def _uri2path(self, uri):
''' Convert uri to absolute filepath
Parameters
----------
uri : string
URI of python module to return path for
Returns
-------
path : None or string
Returns None if there is no valid path for this URI
Otherwise returns absolute file system path for URI
Examples
--------
>>> docwriter = ApiDocWriter('sphinx')
>>> import sphinx
>>> modpath = sphinx.__path__[0]
>>> res = docwriter._uri2path('sphinx.builder')
>>> res == os.path.join(modpath, 'builder.py')
True
>>> res = docwriter._uri2path('sphinx')
>>> res == os.path.join(modpath, '__init__.py')
True
>>> docwriter._uri2path('sphinx.does_not_exist')
'''
if uri == self.package_name:
return os.path.join(self.root_path, '__init__.py')
path = uri.replace('.', os.path.sep)
path = path.replace(self.package_name + os.path.sep, '')
path = os.path.join(self.root_path, path)
# XXX maybe check for extensions as well?
if os.path.exists(path + '.py'): # file
path += '.py'
elif os.path.exists(os.path.join(path, '__init__.py')):
path = os.path.join(path, '__init__.py')
else:
return None
return path
def _path2uri(self, dirpath):
''' Convert directory path to uri '''
relpath = dirpath.replace(self.root_path, self.package_name)
if relpath.startswith(os.path.sep):
relpath = relpath[1:]
return relpath.replace(os.path.sep, '.')
def _parse_module(self, uri):
''' Parse module defined in *uri* '''
filename = self._uri2path(uri)
if filename is None:
# nothing that we could handle here.
return ([], [])
f = open(filename, 'rt')
functions, classes = self._parse_lines(f)
f.close()
return functions, classes
def _parse_lines(self, linesource):
''' Parse lines of text for functions and classes '''
functions = []
classes = []
for line in linesource:
if line.startswith('def ') and line.count('('):
# exclude private stuff
name = self._get_object_name(line)
if not name.startswith('_'):
functions.append(name)
elif line.startswith('class '):
# exclude private stuff
name = self._get_object_name(line)
if not name.startswith('_'):
classes.append(name)
else:
pass
functions.sort()
classes.sort()
return functions, classes
def generate_api_doc(self, uri):
'''Make autodoc documentation template string for a module
Parameters
----------
uri : string
python location of module - e.g 'sphinx.builder'
Returns
-------
S : string
Contents of API doc
'''
# get the names of all classes and functions
functions, classes = self._parse_module(uri)
if not len(functions) and not len(classes):
print 'WARNING: Empty -', uri # dbg
return ''
# Make a shorter version of the uri that omits the package name for
# titles
uri_short = re.sub(r'^%s\.' % self.package_name, '', uri)
ad = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n'
chap_title = uri_short
ad += (chap_title+'\n'+ self.rst_section_levels[1] * len(chap_title)
+ '\n\n')
# Set the chapter title to read 'module' for all modules except for the
# main packages
if '.' in uri:
title = 'Module: :mod:`' + uri_short + '`'
else:
title = ':mod:`' + uri_short + '`'
ad += title + '\n' + self.rst_section_levels[2] * len(title)
if len(classes):
ad += '\nInheritance diagram for ``%s``:\n\n' % uri
ad += '.. inheritance-diagram:: %s \n' % uri
ad += ' :parts: 3\n'
ad += '\n.. automodule:: ' + uri + '\n'
ad += '\n.. currentmodule:: ' + uri + '\n'
multi_class = len(classes) > 1
multi_fx = len(functions) > 1
if multi_class:
ad += '\n' + 'Classes' + '\n' + \
self.rst_section_levels[2] * 7 + '\n'
elif len(classes) and multi_fx:
ad += '\n' + 'Class' + '\n' + \
self.rst_section_levels[2] * 5 + '\n'
for c in classes:
ad += '\n:class:`' + c + '`\n' \
+ self.rst_section_levels[multi_class + 2 ] * \
(len(c)+9) + '\n\n'
ad += '\n.. autoclass:: ' + c + '\n'
# must NOT exclude from index to keep cross-refs working
ad += ' :members:\n' \
' :undoc-members:\n' \
' :show-inheritance:\n' \
' :inherited-members:\n' \
'\n' \
' .. automethod:: __init__\n'
if multi_fx:
ad += '\n' + 'Functions' + '\n' + \
self.rst_section_levels[2] * 9 + '\n\n'
elif len(functions) and multi_class:
ad += '\n' + 'Function' + '\n' + \
self.rst_section_levels[2] * 8 + '\n\n'
for f in functions:
# must NOT exclude from index to keep cross-refs working
ad += '\n.. autofunction:: ' + uri + '.' + f + '\n\n'
return ad
def _survives_exclude(self, matchstr, match_type):
''' Returns True if *matchstr* does not match patterns
``self.package_name`` removed from front of string if present
Examples
--------
>>> dw = ApiDocWriter('sphinx')
>>> dw._survives_exclude('sphinx.okpkg', 'package')
True
>>> dw.package_skip_patterns.append('^\\.badpkg$')
>>> dw._survives_exclude('sphinx.badpkg', 'package')
False
>>> dw._survives_exclude('sphinx.badpkg', 'module')
True
>>> dw._survives_exclude('sphinx.badmod', 'module')
True
>>> dw.module_skip_patterns.append('^\\.badmod$')
>>> dw._survives_exclude('sphinx.badmod', 'module')
False
'''
if match_type == 'module':
patterns = self.module_skip_patterns
elif match_type == 'package':
patterns = self.package_skip_patterns
else:
raise ValueError('Cannot interpret match type "%s"'
% match_type)
# Match to URI without package name
L = len(self.package_name)
if matchstr[:L] == self.package_name:
matchstr = matchstr[L:]
for pat in patterns:
try:
pat.search
except AttributeError:
pat = re.compile(pat)
if pat.search(matchstr):
return False
return True
def discover_modules(self):
''' Return module sequence discovered from ``self.package_name``
Parameters
----------
None
Returns
-------
mods : sequence
Sequence of module names within ``self.package_name``
Examples
--------
>>> dw = ApiDocWriter('sphinx')
>>> mods = dw.discover_modules()
>>> 'sphinx.util' in mods
True
>>> dw.package_skip_patterns.append('\.util$')
>>> 'sphinx.util' in dw.discover_modules()
False
>>>
'''
modules = [self.package_name]
# raw directory parsing
for dirpath, dirnames, filenames in os.walk(self.root_path):
# Check directory names for packages
root_uri = self._path2uri(os.path.join(self.root_path,
dirpath))
for dirname in dirnames[:]: # copy list - we modify inplace
package_uri = '.'.join((root_uri, dirname))
if (self._uri2path(package_uri) and
self._survives_exclude(package_uri, 'package')):
modules.append(package_uri)
else:
dirnames.remove(dirname)
# Check filenames for modules
for filename in filenames:
module_name = filename[:-3]
module_uri = '.'.join((root_uri, module_name))
if (self._uri2path(module_uri) and
self._survives_exclude(module_uri, 'module')):
modules.append(module_uri)
return sorted(modules)
def write_modules_api(self, modules, outdir):
# write the list
written_modules = []
for m in modules:
api_str = self.generate_api_doc(m)
if not api_str:
continue
# write out to file
outfile = os.path.join(outdir,
m + self.rst_extension)
fileobj = open(outfile, 'wt')
fileobj.write(api_str)
fileobj.close()
written_modules.append(m)
self.written_modules = written_modules
def write_api_docs(self, outdir):
"""Generate API reST files.
Parameters
----------
outdir : string
Directory name in which to store files
We create automatic filenames for each module
Returns
-------
None
Notes
-----
Sets self.written_modules to list of written modules
"""
if not os.path.exists(outdir):
os.mkdir(outdir)
# compose list of modules
modules = self.discover_modules()
self.write_modules_api(modules, outdir)
def write_index(self, outdir, froot='gen', relative_to=None):
"""Make a reST API index file from written files
Parameters
----------
path : string
Filename to write index to
outdir : string
Directory to which to write generated index file
froot : string, optional
root (filename without extension) of filename to write to
Defaults to 'gen'. We add ``self.rst_extension``.
relative_to : string
path to which written filenames are relative. This
component of the written file path will be removed from
outdir, in the generated index. Default is None, meaning,
leave path as it is.
"""
if self.written_modules is None:
raise ValueError('No modules written')
# Get full filename path
path = os.path.join(outdir, froot+self.rst_extension)
# Path written into index is relative to rootpath
if relative_to is not None:
relpath = outdir.replace(relative_to + os.path.sep, '')
else:
relpath = outdir
idx = open(path, 'wt')
w = idx.write
w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n')
w('.. toctree::\n\n')
for f in self.written_modules:
w(' %s\n' % os.path.join(relpath, f))
idx.close()
|
charlesccychen/incubator-beam | refs/heads/master | sdks/python/apache_beam/io/restriction_trackers_test.py | 5 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the range_trackers module."""
from __future__ import absolute_import
import logging
import unittest
from apache_beam.io.restriction_trackers import OffsetRange
from apache_beam.io.restriction_trackers import OffsetRestrictionTracker
class OffsetRangeTest(unittest.TestCase):
def test_create(self):
OffsetRange(0, 10)
OffsetRange(10, 10)
OffsetRange(10, 100)
with self.assertRaises(ValueError):
OffsetRange(10, 9)
def test_split_respects_desired_num_splits(self):
range = OffsetRange(10, 100)
splits = list(range.split(desired_num_offsets_per_split=25))
self.assertEqual(4, len(splits))
self.assertIn(OffsetRange(10, 35), splits)
self.assertIn(OffsetRange(35, 60), splits)
self.assertIn(OffsetRange(60, 85), splits)
self.assertIn(OffsetRange(85, 100), splits)
def test_split_respects_min_num_splits(self):
range = OffsetRange(10, 100)
splits = list(range.split(desired_num_offsets_per_split=5,
min_num_offsets_per_split=25))
self.assertEqual(3, len(splits))
self.assertIn(OffsetRange(10, 35), splits)
self.assertIn(OffsetRange(35, 60), splits)
self.assertIn(OffsetRange(60, 100), splits)
def test_split_no_small_split_at_end(self):
range = OffsetRange(10, 90)
splits = list(range.split(desired_num_offsets_per_split=25))
self.assertEqual(3, len(splits))
self.assertIn(OffsetRange(10, 35), splits)
self.assertIn(OffsetRange(35, 60), splits)
self.assertIn(OffsetRange(60, 90), splits)
class OffsetRestrictionTrackerTest(unittest.TestCase):
def test_try_claim(self):
tracker = OffsetRestrictionTracker(100, 200)
self.assertEqual((100, 200), tracker.current_restriction())
self.assertTrue(tracker.try_claim(100))
self.assertTrue(tracker.try_claim(150))
self.assertTrue(tracker.try_claim(199))
self.assertFalse(tracker.try_claim(200))
def test_checkpoint_unstarted(self):
tracker = OffsetRestrictionTracker(100, 200)
checkpoint = tracker.checkpoint()
self.assertEqual((100, 100), tracker.current_restriction())
self.assertEqual((100, 200), checkpoint)
def test_checkpoint_just_started(self):
tracker = OffsetRestrictionTracker(100, 200)
self.assertTrue(tracker.try_claim(100))
checkpoint = tracker.checkpoint()
self.assertEqual((100, 101), tracker.current_restriction())
self.assertEqual((101, 200), checkpoint)
def test_checkpoint_regular(self):
tracker = OffsetRestrictionTracker(100, 200)
self.assertTrue(tracker.try_claim(105))
self.assertTrue(tracker.try_claim(110))
checkpoint = tracker.checkpoint()
self.assertEqual((100, 111), tracker.current_restriction())
self.assertEqual((111, 200), checkpoint)
def test_checkpoint_claimed_last(self):
tracker = OffsetRestrictionTracker(100, 200)
self.assertTrue(tracker.try_claim(105))
self.assertTrue(tracker.try_claim(110))
self.assertTrue(tracker.try_claim(199))
checkpoint = tracker.checkpoint()
self.assertEqual((100, 200), tracker.current_restriction())
self.assertEqual((200, 200), checkpoint)
def test_checkpoint_after_failed_claim(self):
tracker = OffsetRestrictionTracker(100, 200)
self.assertTrue(tracker.try_claim(105))
self.assertTrue(tracker.try_claim(110))
self.assertTrue(tracker.try_claim(160))
self.assertFalse(tracker.try_claim(240))
checkpoint = tracker.checkpoint()
self.assertTrue((100, 161), tracker.current_restriction())
self.assertTrue((161, 200), checkpoint)
def test_non_monotonic_claim(self):
with self.assertRaises(ValueError):
tracker = OffsetRestrictionTracker(100, 200)
self.assertTrue(tracker.try_claim(105))
self.assertTrue(tracker.try_claim(110))
self.assertTrue(tracker.try_claim(103))
def test_claim_before_starting_range(self):
with self.assertRaises(ValueError):
tracker = OffsetRestrictionTracker(100, 200)
tracker.try_claim(90)
def test_check_done_after_try_claim_past_end_of_range(self):
tracker = OffsetRestrictionTracker(100, 200)
self.assertTrue(tracker.try_claim(150))
self.assertTrue(tracker.try_claim(175))
self.assertFalse(tracker.try_claim(220))
tracker.check_done()
def test_check_done_after_try_claim_past_end_of_range(self):
tracker = OffsetRestrictionTracker(100, 200)
self.assertTrue(tracker.try_claim(150))
self.assertTrue(tracker.try_claim(175))
self.assertFalse(tracker.try_claim(200))
tracker.check_done()
def test_check_done_after_try_claim_right_before_end_of_range(self):
tracker = OffsetRestrictionTracker(100, 200)
self.assertTrue(tracker.try_claim(150))
self.assertTrue(tracker.try_claim(175))
self.assertTrue(tracker.try_claim(199))
tracker.check_done()
def test_check_done_when_not_done(self):
tracker = OffsetRestrictionTracker(100, 200)
self.assertTrue(tracker.try_claim(150))
self.assertTrue(tracker.try_claim(175))
with self.assertRaises(ValueError):
tracker.check_done()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
phihag/youtube-dl | refs/heads/master | youtube_dl/extractor/nzz.py | 50 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
extract_attributes,
)
class NZZIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?nzz\.ch/(?:[^/]+/)*[^/?#]+-ld\.(?P<id>\d+)'
_TEST = {
'url': 'http://www.nzz.ch/zuerich/gymizyte/gymizyte-schreiben-schueler-heute-noch-diktate-ld.9153',
'info_dict': {
'id': '9153',
},
'playlist_mincount': 6,
}
def _real_extract(self, url):
page_id = self._match_id(url)
webpage = self._download_webpage(url, page_id)
entries = []
for player_element in re.findall(r'(<[^>]+class="kalturaPlayer"[^>]*>)', webpage):
player_params = extract_attributes(player_element)
if player_params.get('data-type') not in ('kaltura_singleArticle',):
self.report_warning('Unsupported player type')
continue
entry_id = player_params['data-id']
entries.append(self.url_result(
'kaltura:1750922:' + entry_id, 'Kaltura', entry_id))
return self.playlist_result(entries, page_id)
|
gemmaan/moviesenal | refs/heads/master | Hasil/Lib/site-packages/pip/_vendor/requests/adapters.py | 356 | # -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import os.path
import socket
from .models import Response
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce
from .packages.urllib3.util.retry import Retry
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url, urldefragauth,
select_proxy, to_native_string)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import ClosedPoolError
from .packages.urllib3.exceptions import ConnectTimeoutError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import NewConnectionError
from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .packages.urllib3.exceptions import ProtocolError
from .packages.urllib3.exceptions import ReadTimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import ResponseError
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError, RetryError, InvalidSchema)
from .auth import _basic_auth_str
try:
from .packages.urllib3.contrib.socks import SOCKSProxyManager
except ImportError:
def SOCKSProxyManager(*args, **kwargs):
raise InvalidSchema("Missing dependencies for SOCKS support.")
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
DEFAULT_POOL_TIMEOUT = None
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self, request, stream=False, timeout=None, verify=True,
cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
raise NotImplementedError
def close(self):
"""Cleans up adapter specific items."""
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
:rtype: requests.packages.urllib3.ProxyManager
"""
if proxy in self.proxy_manager:
manager = self.proxy_manager[proxy]
elif proxy.lower().startswith('socks'):
username, password = get_auth_from_url(proxy)
manager = self.proxy_manager[proxy] = SOCKSProxyManager(
proxy,
username=username,
password=password,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs
)
else:
proxy_headers = self.proxy_headers(proxy)
manager = self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return manager
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
if not os.path.isdir(cert_loc):
conn.ca_certs = cert_loc
else:
conn.ca_cert_dir = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
conn.ca_cert_dir = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
:rtype: requests.Response
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
:rtype: requests.packages.urllib3.ConnectionPool
"""
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this closes the PoolManager and any active ProxyManager,
which closes any pooled connections.
"""
self.poolmanager.clear()
for proxy in self.proxy_manager.values():
proxy.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
:rtype: str
"""
proxy = select_proxy(request.url, proxies)
scheme = urlparse(request.url).scheme
is_proxied_http_request = (proxy and scheme != 'https')
using_socks_proxy = False
if proxy:
proxy_scheme = urlparse(proxy).scheme.lower()
using_socks_proxy = proxy_scheme.startswith('socks')
url = request.path_url
if is_proxied_http_request and not using_socks_proxy:
url = urldefragauth(request.url)
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:rtype: dict
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
:rtype: requests.Response
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {0}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
# Receive the response from the server
try:
# For Python 2.7+ versions, use buffering of HTTP
# responses
r = low_conn.getresponse(buffering=True)
except TypeError:
# For compatibility with Python 2.6 versions and back
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
# TODO: Remove this in 3.0.0: see #2811
if not isinstance(e.reason, NewConnectionError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
if isinstance(e.reason, _ProxyError):
raise ProxyError(e, request=request)
raise ConnectionError(e, request=request)
except ClosedPoolError as e:
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
|
vlachoudis/sl4a | refs/heads/master | python-build/python-libs/gdata/src/gdata/tlslite/integration/SMTP_TLS.py | 319 | """TLS Lite + smtplib."""
from smtplib import SMTP
from gdata.tlslite.TLSConnection import TLSConnection
from gdata.tlslite.integration.ClientHelper import ClientHelper
class SMTP_TLS(SMTP):
"""This class extends L{smtplib.SMTP} with TLS support."""
def starttls(self,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings=None):
"""Puts the connection to the SMTP server into TLS mode.
If the server supports TLS, this will encrypt the rest of the SMTP
session.
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The caller should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
(resp, reply) = self.docmd("STARTTLS")
if resp == 220:
helper = ClientHelper(
username, password, sharedKey,
certChain, privateKey,
cryptoID, protocol,
x509Fingerprint,
x509TrustList, x509CommonName,
settings)
conn = TLSConnection(self.sock)
conn.closeSocket = True
helper._handshake(conn)
self.sock = conn
self.file = conn.makefile('rb')
return (resp, reply)
|
hottwaj/django | refs/heads/master | tests/staticfiles_tests/cases.py | 270 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import codecs
import os
import shutil
import tempfile
from django.conf import settings
from django.core.management import call_command
from django.template import Context, Template
from django.test import SimpleTestCase, override_settings
from django.utils import six
from django.utils.encoding import force_text
from .settings import TEST_SETTINGS
class BaseStaticFilesTestCase(object):
"""
Test case with a couple utility assertions.
"""
def assertFileContains(self, filepath, text):
self.assertIn(
text,
self._get_file(force_text(filepath)),
"'%s' not in '%s'" % (text, filepath),
)
def assertFileNotFound(self, filepath):
self.assertRaises(IOError, self._get_file, filepath)
def render_template(self, template, **kwargs):
if isinstance(template, six.string_types):
template = Template(template)
return template.render(Context(kwargs)).strip()
def static_template_snippet(self, path, asvar=False):
if asvar:
return "{%% load static from staticfiles %%}{%% static '%s' as var %%}{{ var }}" % path
return "{%% load static from staticfiles %%}{%% static '%s' %%}" % path
def assertStaticRenders(self, path, result, asvar=False, **kwargs):
template = self.static_template_snippet(path, asvar)
self.assertEqual(self.render_template(template, **kwargs), result)
def assertStaticRaises(self, exc, path, result, asvar=False, **kwargs):
self.assertRaises(exc, self.assertStaticRenders, path, result, **kwargs)
@override_settings(**TEST_SETTINGS)
class StaticFilesTestCase(BaseStaticFilesTestCase, SimpleTestCase):
pass
class BaseCollectionTestCase(BaseStaticFilesTestCase):
"""
Tests shared by all file finding features (collectstatic,
findstatic, and static serve view).
This relies on the asserts defined in BaseStaticFilesTestCase, but
is separated because some test cases need those asserts without
all these tests.
"""
def setUp(self):
super(BaseCollectionTestCase, self).setUp()
temp_dir = tempfile.mkdtemp()
# Override the STATIC_ROOT for all tests from setUp to tearDown
# rather than as a context manager
self.patched_settings = self.settings(STATIC_ROOT=temp_dir)
self.patched_settings.enable()
self.run_collectstatic()
# Same comment as in runtests.teardown.
self.addCleanup(shutil.rmtree, six.text_type(temp_dir))
def tearDown(self):
self.patched_settings.disable()
super(BaseCollectionTestCase, self).tearDown()
def run_collectstatic(self, **kwargs):
call_command('collectstatic', interactive=False, verbosity=0,
ignore_patterns=['*.ignoreme'], **kwargs)
def _get_file(self, filepath):
assert filepath, 'filepath is empty.'
filepath = os.path.join(settings.STATIC_ROOT, filepath)
with codecs.open(filepath, "r", "utf-8") as f:
return f.read()
class CollectionTestCase(BaseCollectionTestCase, StaticFilesTestCase):
pass
class TestDefaults(object):
"""
A few standard test cases.
"""
def test_staticfiles_dirs(self):
"""
Can find a file in a STATICFILES_DIRS directory.
"""
self.assertFileContains('test.txt', 'Can we find')
self.assertFileContains(os.path.join('prefix', 'test.txt'), 'Prefix')
def test_staticfiles_dirs_subdir(self):
"""
Can find a file in a subdirectory of a STATICFILES_DIRS
directory.
"""
self.assertFileContains('subdir/test.txt', 'Can we find')
def test_staticfiles_dirs_priority(self):
"""
File in STATICFILES_DIRS has priority over file in app.
"""
self.assertFileContains('test/file.txt', 'STATICFILES_DIRS')
def test_app_files(self):
"""
Can find a file in an app static/ directory.
"""
self.assertFileContains('test/file1.txt', 'file1 in the app dir')
def test_nonascii_filenames(self):
"""
Can find a file with non-ASCII character in an app static/ directory.
"""
self.assertFileContains('test/⊗.txt', '⊗ in the app dir')
def test_camelcase_filenames(self):
"""
Can find a file with capital letters.
"""
self.assertFileContains('test/camelCase.txt', 'camelCase')
|
edgarli/proj8 | refs/heads/master | env/lib/python3.4/site-packages/pip/commands/install.py | 187 | from __future__ import absolute_import
import logging
import operator
import os
import tempfile
import shutil
import warnings
try:
import wheel
except ImportError:
wheel = None
from pip.req import RequirementSet
from pip.basecommand import RequirementCommand
from pip.locations import virtualenv_no_global, distutils_scheme
from pip.index import PackageFinder
from pip.exceptions import (
InstallationError, CommandError, PreviousBuildDirError,
)
from pip import cmdoptions
from pip.utils import ensure_dir
from pip.utils.build import BuildDirectory
from pip.utils.deprecation import RemovedInPip8Warning
from pip.utils.filesystem import check_path_owner
from pip.wheel import WheelCache, WheelBuilder
logger = logging.getLogger(__name__)
class InstallCommand(RequirementCommand):
"""
Install packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports installing from "requirements files", which provide
an easy way to specify a whole environment to be installed.
"""
name = 'install'
usage = """
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Install packages.'
def __init__(self, *args, **kw):
super(InstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(cmdoptions.constraints())
cmd_opts.add_option(cmdoptions.editable())
cmd_opts.add_option(cmdoptions.requirements())
cmd_opts.add_option(cmdoptions.build_dir())
cmd_opts.add_option(
'-t', '--target',
dest='target_dir',
metavar='dir',
default=None,
help='Install packages into <dir>. '
'By default this will not replace existing files/folders in '
'<dir>. Use --upgrade to replace existing packages in <dir> '
'with new versions.'
)
cmd_opts.add_option(
'-d', '--download', '--download-dir', '--download-directory',
dest='download_dir',
metavar='dir',
default=None,
help=("Download packages into <dir> instead of installing them, "
"regardless of what's already installed."),
)
cmd_opts.add_option(cmdoptions.download_cache())
cmd_opts.add_option(cmdoptions.src())
cmd_opts.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all specified packages to the newest available '
'version. This process is recursive regardless of whether '
'a dependency is already satisfied.'
)
cmd_opts.add_option(
'--force-reinstall',
dest='force_reinstall',
action='store_true',
help='When upgrading, reinstall all packages even if they are '
'already up-to-date.')
cmd_opts.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages (reinstalling instead).')
cmd_opts.add_option(cmdoptions.no_deps())
cmd_opts.add_option(cmdoptions.install_options())
cmd_opts.add_option(cmdoptions.global_options())
cmd_opts.add_option(
'--user',
dest='use_user_site',
action='store_true',
help="Install to the Python user install directory for your "
"platform. Typically ~/.local/, or %APPDATA%\Python on "
"Windows. (See the Python documentation for site.USER_BASE "
"for full details.)")
cmd_opts.add_option(
'--egg',
dest='as_egg',
action='store_true',
help="Install packages as eggs, not 'flat', like pip normally "
"does. This option is not about installing *from* eggs. "
"(WARNING: Because this option overrides pip's normal install"
" logic, requirements files may not behave as expected.)")
cmd_opts.add_option(
'--root',
dest='root_path',
metavar='dir',
default=None,
help="Install everything relative to this alternate root "
"directory.")
cmd_opts.add_option(
"--compile",
action="store_true",
dest="compile",
default=True,
help="Compile py files to pyc",
)
cmd_opts.add_option(
"--no-compile",
action="store_false",
dest="compile",
help="Do not compile py files to pyc",
)
cmd_opts.add_option(cmdoptions.use_wheel())
cmd_opts.add_option(cmdoptions.no_use_wheel())
cmd_opts.add_option(cmdoptions.no_binary())
cmd_opts.add_option(cmdoptions.only_binary())
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, "
"pip only finds stable versions.")
cmd_opts.add_option(cmdoptions.no_clean())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this install command.
This method is meant to be overridden by subclasses, not
called directly.
"""
return PackageFinder(
find_links=options.find_links,
format_control=options.format_control,
index_urls=index_urls,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
trusted_hosts=options.trusted_hosts,
allow_all_prereleases=options.pre,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
cmdoptions.resolve_wheel_no_use_binary(options)
cmdoptions.check_install_build_global(options)
if options.download_dir:
options.ignore_installed = True
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
install_options.append('--user')
install_options.append('--prefix=')
temp_target_dir = None
if options.target_dir:
options.ignore_installed = True
temp_target_dir = tempfile.mkdtemp()
options.target_dir = os.path.abspath(options.target_dir)
if (os.path.exists(options.target_dir) and not
os.path.isdir(options.target_dir)):
raise CommandError(
"Target path exists but is not a directory, will not "
"continue."
)
install_options.append('--home=' + temp_target_dir)
global_options = options.global_options or []
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
if options.download_cache:
warnings.warn(
"--download-cache has been deprecated and will be removed in "
"the future. Pip now automatically uses and configures its "
"cache.",
RemovedInPip8Warning,
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
if options.cache_dir and not check_path_owner(options.cache_dir):
logger.warning(
"The directory '%s' or its parent directory is not owned "
"by the current user and caching wheels has been "
"disabled. check the permissions and owner of that "
"directory. If executing pip with sudo, you may want "
"sudo's -H flag.",
options.cache_dir,
)
options.cache_dir = None
with BuildDirectory(options.build_dir,
delete=build_delete) as build_dir:
requirement_set = RequirementSet(
build_dir=build_dir,
src_dir=options.src_dir,
download_dir=options.download_dir,
upgrade=options.upgrade,
as_egg=options.as_egg,
ignore_installed=options.ignore_installed,
ignore_dependencies=options.ignore_dependencies,
force_reinstall=options.force_reinstall,
use_user_site=options.use_user_site,
target_dir=temp_target_dir,
session=session,
pycompile=options.compile,
isolated=options.isolated_mode,
wheel_cache=wheel_cache,
)
self.populate_requirement_set(
requirement_set, args, options, finder, session, self.name,
wheel_cache
)
if not requirement_set.has_requirements:
return
try:
if (options.download_dir or not wheel or not
options.cache_dir):
# on -d don't do complex things like building
# wheels, and don't try to build wheels when wheel is
# not installed.
requirement_set.prepare_files(finder)
else:
# build wheels before install.
wb = WheelBuilder(
requirement_set,
finder,
build_options=[],
global_options=[],
)
# Ignore the result: a failed wheel will be
# installed from the sdist/vcs whatever.
wb.build(autobuilding=True)
if not options.download_dir:
requirement_set.install(
install_options,
global_options,
root=options.root_path,
)
reqs = sorted(
requirement_set.successfully_installed,
key=operator.attrgetter('name'))
items = []
for req in reqs:
item = req.name
try:
if hasattr(req, 'installed_version'):
if req.installed_version:
item += '-' + req.installed_version
except Exception:
pass
items.append(item)
installed = ' '.join(items)
if installed:
logger.info('Successfully installed %s', installed)
else:
downloaded = ' '.join([
req.name
for req in requirement_set.successfully_downloaded
])
if downloaded:
logger.info(
'Successfully downloaded %s', downloaded
)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
# Clean up
if not options.no_clean:
requirement_set.cleanup_files()
if options.target_dir:
ensure_dir(options.target_dir)
lib_dir = distutils_scheme('', home=temp_target_dir)['purelib']
for item in os.listdir(lib_dir):
target_item_dir = os.path.join(options.target_dir, item)
if os.path.exists(target_item_dir):
if not options.upgrade:
logger.warning(
'Target directory %s already exists. Specify '
'--upgrade to force replacement.',
target_item_dir
)
continue
if os.path.islink(target_item_dir):
logger.warning(
'Target directory %s already exists and is '
'a link. Pip will not automatically replace '
'links, please remove if replacement is '
'desired.',
target_item_dir
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(
os.path.join(lib_dir, item),
target_item_dir
)
shutil.rmtree(temp_target_dir)
return requirement_set
|
jdegraaf/espresso | refs/heads/master | testsuite/python/magnetostaticInteractions.py | 6 | #
# Copyright (C) 2013,2014 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Tests particle property setters/getters
import unittest as ut
import espressomd
import numpy as np
from espressomd.magnetostatics import *
class MagnetostaticsInteractionsTests(ut.TestCase):
# Handle to espresso system
system=espressomd.System()
def paramsMatch(self,inParams,outParams):
"""Check, if the parameters set and gotten back match.
Only check keys present in inParams.
"""
# Delete the bjerrum_length from outparams, because
# it is calculated from the prefactor in some cases
# and therefore not necessarily in inparams.
if "bjerrum_length" in outParams:
del outParams["bjerrum"]
for k in inParams.keys():
if k not in outParams:
print(k,"missing from returned parameters")
return False
if outParams[k]!=inParams[k]:
print("Mismatch in parameter ",k,inParams[k],outParams[k])
return False
return True
def setUp(self):
self.system.box_l = 10,10,10
self.system.part[0].pos =0,0,0
self.system.part[1].pos =0.1,0.1,0.1
self.system.part[0].dip =1.3,2.1,-6
self.system.part[1].dip =4.3,4.1,7.5
def generateTestForMagnetostaticInteraction(_interClass,_params):
"""Generates test cases for checking interaction parameters set and gotten back
from Es actually match. Only keys which are present in _params are checked
1st: Interaction parameters as dictionary, i.e., {"k"=1.,"r_0"=0.
2nd: Name of the interaction property to set (i.e. "P3M")
"""
params=_params
interClass=_interClass
def func(self):
# This code is run at the execution of the generated function.
# It will use the state of the variables in the outer function,
# which was there, when the outer function was called
# set Parameter
Inter=interClass(**params)
Inter.validateParams()
Inter._setParamsInEsCore()
# Read them out again
outParams=Inter.getParams()
self.assertTrue(self.paramsMatch(params,outParams), "Missmatch of parameters.\nParameters set "+params.__str__()+" vs. output parameters "+outParams.__str__())
return func
test_DP3M=generateTestForMagnetostaticInteraction(DipolarP3M,dict(prefactor = 1.0,\
epsilon = 0.0,\
inter = 1000,\
mesh_off = [0.5,0.5,0.5],\
r_cut = 2.4,\
mesh = [8,8,8],\
cao = 1,\
alpha = 12,\
accuracy = 0.01))
test_DdsCpu=generateTestForMagnetostaticInteraction(DipolarDirectSumCpu,dict(prefactor = 3.4))
test_DdsRCpu=generateTestForMagnetostaticInteraction(DipolarDirectSumWithReplicaCpu,dict(prefactor = 3.4,n_replica=2))
if __name__ == "__main__":
print("Features: ",espressomd.features())
ut.main()
|
Programmica/pygtk-tutorial | refs/heads/master | examples/assistant.py | 1 | #!/usr/bin/env python
import gtk
class Assistant(gtk.Assistant):
def __init__(self):
gtk.Assistant.__init__(self)
self.set_title("Assistant")
self.set_default_size(400, -1)
self.connect("apply", self.on_apply_clicked)
self.connect("close", self.on_close_clicked)
self.connect("cancel", self.on_cancel_clicked)
box = gtk.VBox()
self.append_page(box)
self.set_page_type(box, gtk.ASSISTANT_PAGE_INTRO)
self.set_page_title(box, "Page 1: Introduction")
label = gtk.Label("An 'Intro' page is the first page of an Assistant. It is used to provide information about what configuration settings need to be configured. The introduction page only has a 'Continue' button.")
label.set_line_wrap(True)
box.pack_start(label, True, True, 0)
self.set_page_complete(box, True)
box = gtk.VBox()
self.append_page(box)
self.set_page_type(box, gtk.ASSISTANT_PAGE_CONTENT)
self.set_page_title(box, "Page 2: Content")
label = gtk.Label("The 'Content' page provides a place where widgets can be positioned. This allows the user to configure a variety of options as needed. The page contains a 'Continue' button to move onto other pages, and a 'Go Back' button to return to the previous page if necessary.")
label.set_line_wrap(True)
box.pack_start(label, True, True, 0)
self.set_page_complete(box, True)
self.complete = gtk.VBox()
self.append_page(self.complete)
self.set_page_type(self.complete, gtk.ASSISTANT_PAGE_PROGRESS)
self.set_page_title(self.complete, "Page 3: Progress")
label = gtk.Label("A 'Progress' page is used to prevent changing pages within the Assistant before a long-running process has completed. The 'Continue' button will be marked as insensitive until the process has finished. Once finished, the button will become sensitive.")
label.set_line_wrap(True)
self.complete.pack_start(label, True, True, 0)
checkbutton = gtk.CheckButton(label="Mark page as complete")
checkbutton.connect("toggled", self.on_complete_toggled)
self.complete.pack_start(checkbutton, False, False, 0)
box = gtk.VBox()
self.append_page(box)
self.set_page_type(box, gtk.ASSISTANT_PAGE_CONFIRM)
self.set_page_title(box, "Page 4: Confirm")
label = gtk.Label("The 'Confirm' page may be set as the final page in the Assistant, however this depends on what the Assistant does. This page provides an 'Apply' button to explicitly set the changes, or a 'Go Back' button to correct any mistakes.")
label.set_line_wrap(True)
box.pack_start(label, True, True, 0)
self.set_page_complete(box, True)
box = gtk.VBox()
self.append_page(box)
self.set_page_type(box, gtk.ASSISTANT_PAGE_SUMMARY)
self.set_page_title(box, "Page 5: Summary")
label = gtk.Label("A 'Summary' should be set as the final page of the Assistant if used however this depends on the purpose of your Assistant. It provides information on the changes that have been made during the configuration or details of what the user should do next. On this page only a Close button is displayed. Once at the Summary page, the user cannot return to any other page.")
label.set_line_wrap(True)
box.pack_start(label, True, True, 0)
self.set_page_complete(box, True)
def on_apply_clicked(self, *args):
print "The 'Apply' button has been clicked"
def on_close_clicked(self, *args):
print "The 'Close' button has been clicked"
gtk.main_quit()
def on_cancel_clicked(self, *args):
print "The Assistant has been cancelled."
gtk.main_quit()
def on_complete_toggled(self, checkbutton):
assistant.set_page_complete(self.complete, checkbutton.get_active())
assistant = Assistant()
assistant.show_all()
gtk.main()
|
MiCHiLU/python-oauth2 | refs/heads/six | oauth2/__init__.py | 1 | """
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import base64
import urllib
import time
import random
import urlparse
import hmac
import binascii
import httplib2
import hashlib
import _version
__version__ = _version.__version__
OAUTH_VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.iteritems()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def to_unicode(s):
""" Convert to unicode, raise exception with instructive error
message if s is not unicode, ascii, or utf-8. """
if not isinstance(s, unicode):
if not isinstance(s, str):
raise TypeError('You are required to pass either unicode or string here, not: %r (%s)' % (type(s), s))
try:
s = s.decode('utf-8')
except UnicodeDecodeError, le:
raise TypeError('You are required to pass either a unicode object or a utf-8 string here. You passed a Python string object which contained non-utf-8: %r. The UnicodeDecodeError that resulted from attempting to interpret it as utf-8 was: %s' % (s, le,))
return s
def to_utf8(s):
return to_unicode(s).encode('utf-8')
def to_unicode_if_string(s):
if isinstance(s, basestring):
return to_unicode(s)
else:
return s
def to_utf8_if_string(s):
if isinstance(s, basestring):
return to_utf8(s)
else:
return s
def to_unicode_optional_iterator(x):
"""
Raise TypeError if x is a str containing non-utf8 bytes or if x is
an iterable which contains such a str.
"""
if isinstance(x, basestring):
return to_unicode(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_unicode(e) for e in l ]
def to_utf8_optional_iterator(x):
"""
Raise TypeError if x is a str or if x is an iterable which
contains a str.
"""
if isinstance(x, basestring):
return to_utf8(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_utf8_if_string(e) for e in l ]
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s.encode('utf-8'), safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = urlparse.parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = OAUTH_VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None,
body='', is_form_encoded=False):
if url is not None:
self.url = to_unicode(url)
self.method = method
if parameters is not None:
for k, v in parameters.iteritems():
k = to_unicode(k)
v = to_unicode_optional_iterator(v)
self[k] = v
self.body = body
self.is_form_encoded = is_form_encoded
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urlparse.urlunparse((scheme, netloc, path, None, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
d = {}
for k, v in self.iteritems():
d[k.encode('utf-8')] = to_utf8_optional_iterator(v)
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(d, True).replace('+', '%20')
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse.urlparse(self.url)
try:
query = base_url.query
except AttributeError:
# must be python <2.5
query = base_url[4]
query = urlparse.parse_qs(query)
for k, v in self.items():
query.setdefault(k, []).append(v)
try:
scheme = base_url.scheme
netloc = base_url.netloc
path = base_url.path
params = base_url.params
fragment = base_url.fragment
except AttributeError:
# must be python <2.5
scheme = base_url[0]
netloc = base_url[1]
path = base_url[2]
params = base_url[3]
fragment = base_url[5]
url = (scheme, netloc, path, params,
urllib.urlencode(query, True), fragment)
return urlparse.urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in self.iteritems():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, basestring):
items.append((to_utf8_if_string(key), to_utf8(value)))
else:
try:
value = list(value)
except TypeError, e:
assert 'is not iterable' in str(e)
items.append((to_utf8_if_string(key), to_utf8_if_string(value)))
else:
items.extend((to_utf8_if_string(key), to_utf8_if_string(item)) for item in value)
# Include any query string parameters from the provided URL
query = urlparse.urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
url_items = [(to_utf8(k), to_utf8(v)) for k, v in url_items if k != 'oauth_signature' ]
items.extend(url_items)
items.sort()
encoded_str = urllib.urlencode(items)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if not self.is_form_encoded:
# according to
# http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
# section 4.1.1 "OAuth Consumers MUST NOT include an
# oauth_body_hash parameter on requests with form-encoded
# request bodies."
self['oauth_body_hash'] = base64.b64encode(hashlib.sha1(self.body).digest())
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None,
body='', is_form_encoded=False):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return Request(http_method, http_url, parameters, body=body,
is_form_encoded=is_form_encoded)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = urlparse.parse_qs(param_str.encode('utf-8'), keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class Client(httplib2.Http):
"""OAuthClient is a worker to attempt to execute a request."""
def __init__(self, consumer, token=None, cache=None, timeout=None,
proxy_info=None):
if consumer is not None and not isinstance(consumer, Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, Token):
raise ValueError("Invalid token.")
self.consumer = consumer
self.token = token
self.method = SignatureMethod_HMAC_SHA1()
httplib2.Http.__init__(self, cache=cache, timeout=timeout, proxy_info=proxy_info)
def set_signature_method(self, method):
if not isinstance(method, SignatureMethod):
raise ValueError("Invalid signature method.")
self.method = method
def request(self, uri, method="GET", body='', headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
DEFAULT_POST_CONTENT_TYPE = 'application/x-www-form-urlencoded'
if not isinstance(headers, dict):
headers = {}
if method == "POST":
headers['Content-Type'] = headers.get('Content-Type',
DEFAULT_POST_CONTENT_TYPE)
is_form_encoded = \
headers.get('Content-Type') == 'application/x-www-form-urlencoded'
if is_form_encoded and body:
parameters = urlparse.parse_qs(body)
else:
parameters = None
req = Request.from_consumer_and_token(self.consumer,
token=self.token, http_method=method, http_url=uri,
parameters=parameters, body=body, is_form_encoded=is_form_encoded)
req.sign_request(self.method, self.consumer, self.token)
schema, rest = urllib.splittype(uri)
if rest.startswith('//'):
hierpart = '//'
else:
hierpart = ''
host, rest = urllib.splithost(rest)
realm = schema + ':' + hierpart + host
if is_form_encoded:
body = req.to_postdata()
elif method == "GET":
uri = req.to_url()
else:
headers.update(req.to_header(realm=realm))
return httplib2.Http.request(self, uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=connection_type)
class Server(object):
"""A skeletal implementation of a service provider, providing protected
resources to requests from authorized consumers.
This class implements the logic to check requests for authorization. You
can use it with your web server or web framework to protect certain
resources with OAuth.
"""
timestamp_threshold = 300 # In seconds, five minutes.
version = OAUTH_VERSION
signature_methods = None
def __init__(self, signature_methods=None):
self.signature_methods = signature_methods or {}
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.name] = signature_method
return self.signature_methods
def verify_request(self, request, consumer, token):
"""Verifies an api call and checks all the parameters."""
self._check_version(request)
self._check_signature(request, consumer, token)
parameters = request.get_nonoauth_parameters()
return parameters
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _check_version(self, request):
"""Verify the correct version of the request for this server."""
version = self._get_version(request)
if version and version != self.version:
raise Error('OAuth version %s not supported.' % str(version))
def _get_version(self, request):
"""Return the version of the request for this server."""
try:
version = request.get_parameter('oauth_version')
except:
version = OAUTH_VERSION
return version
def _get_signature_method(self, request):
"""Figure out the signature with some defaults."""
try:
signature_method = request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise Error('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _check_signature(self, request, consumer, token):
timestamp, nonce = request._get_timestamp_nonce()
self._check_timestamp(timestamp)
signature_method = self._get_signature_method(request)
try:
signature = request.get_parameter('oauth_signature')
except:
raise MissingSignature('Missing oauth_signature.')
# Validate the signature.
valid = signature_method.check(request, consumer, token, signature)
if not valid:
key, base = signature_method.signing_base(request, consumer, token)
raise Error('Invalid signature. Expected signature base '
'string: %s' % base)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise Error('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' % (timestamp, now,
self.timestamp_threshold))
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if not hasattr(request, 'normalized_url') or request.normalized_url is None:
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
hashed = hmac.new(key, raw, hashlib.sha1)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
|
manodeep/Corrfunc | refs/heads/master | Corrfunc/theory/DDrppi.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Python wrapper around the C extension for the pair counter in
``theory/DDrppi/``. This wrapper is in :py:mod:`Corrfunc.theory.DDrppi`
"""
from __future__ import (division, print_function, absolute_import,
unicode_literals)
__author__ = ('Manodeep Sinha')
__all__ = ('DDrppi', )
def DDrppi(autocorr, nthreads, pimax, binfile, X1, Y1, Z1, weights1=None,
periodic=True, X2=None, Y2=None, Z2=None, weights2=None,
verbose=False, boxsize=0.0, output_rpavg=False,
xbin_refine_factor=2, ybin_refine_factor=2,
zbin_refine_factor=1, max_cells_per_dim=100,
copy_particles=True, enable_min_sep_opt=True,
c_api_timer=False, isa=r'fastest', weight_type=None):
"""
Calculate the 3-D pair-counts corresponding to the real-space correlation
function, :math:`\\xi(r_p, \pi)` or :math:`\\wp(r_p)`. Pairs which are
separated by less than the ``rp`` bins (specified in ``binfile``) in the
X-Y plane, and less than ``pimax`` in the Z-dimension are
counted.
If ``weights`` are provided, the mean pair weight is stored in the
``"weightavg"`` field of the results array. The raw pair counts in the
``"npairs"`` field are not weighted. The weighting scheme depends on
``weight_type``.
.. note:: that this module only returns pair counts and not the actual
correlation function :math:`\\xi(r_p, \pi)` or :math:`wp(r_p)`. See the
utilities :py:mod:`Corrfunc.utils.convert_3d_counts_to_cf` and
:py:mod:`Corrfunc.utils.convert_rp_pi_counts_to_wp` for computing
:math:`\\xi(r_p, \pi)` and :math:`wp(r_p)` respectively from the
pair counts.
Parameters
-----------
autocorr: boolean, required
Boolean flag for auto/cross-correlation. If autocorr is set to 1,
then the second set of particle positions are not required.
nthreads: integer
The number of OpenMP threads to use. Has no effect if OpenMP was not
enabled during library compilation.
pimax: double
A double-precision value for the maximum separation along
the Z-dimension.
Distances along the :math:``\\pi`` direction are binned with unit
depth. For instance, if ``pimax=40``, then 40 bins will be created
along the ``pi`` direction.
Note: Only pairs with ``0 <= dz < pimax`` are counted (no equality).
binfile: string or an list/array of floats
For string input: filename specifying the ``rp`` bins for
``DDrppi``. The file should contain white-space separated values
of (rpmin, rpmax) for each ``rp`` wanted. The bins need to be
contiguous and sorted in increasing order (smallest bins come first).
For array-like input: A sequence of ``rp`` values that provides the
bin-edges. For example,
``np.logspace(np.log10(0.1), np.log10(10.0), 15)`` is a valid
input specifying **14** (logarithmic) bins between 0.1 and 10.0. This
array does not need to be sorted.
X1/Y1/Z1: array-like, real (float/double)
The array of X/Y/Z positions for the first set of points.
Calculations are done in the precision of the supplied arrays.
weights1: array_like, real (float/double), optional
A scalar, or an array of weights of shape (n_weights, n_positions) or
(n_positions,). ``weight_type`` specifies how these weights are used;
results are returned in the ``weightavg`` field. If only one of
weights1 and weights2 is specified, the other will be set to uniform
weights.
X2/Y2/Z2: array-like, real (float/double)
Array of XYZ positions for the second set of points. *Must* be the same
precision as the X1/Y1/Z1 arrays. Only required when ``autocorr==0``.
weights2: array-like, real (float/double), optional
Same as weights1, but for the second set of positions
periodic: boolean
Boolean flag to indicate periodic boundary conditions.
verbose: boolean (default false)
Boolean flag to control output of informational messages
boxsize: double
The side-length of the cube in the cosmological simulation.
Present to facilitate exact calculations for periodic wrapping.
If boxsize is not supplied, then the wrapping is done based on
the maximum difference within each dimension of the X/Y/Z arrays.
output_rpavg: boolean (default false)
Boolean flag to output the average ``rp`` for each bin. Code will
run slower if you set this flag.
Note: If you are calculating in single-precision, ``rpavg`` will
suffer from numerical loss of precision and can not be trusted. If
you need accurate ``rpavg`` values, then pass in double precision
arrays for the particle positions.
(xyz)bin_refine_factor: integer, default is (2,2,1); typically within [1-3]
Controls the refinement on the cell sizes. Can have up to a 20% impact
on runtime.
max_cells_per_dim: integer, default is 100, typical values in [50-300]
Controls the maximum number of cells per dimension. Total number of
cells can be up to (max_cells_per_dim)^3. Only increase if ``rpmax`` is
too small relative to the boxsize (and increasing helps the runtime).
copy_particles: boolean (default True)
Boolean flag to make a copy of the particle positions
If set to False, the particles will be re-ordered in-place
.. versionadded:: 2.3.0
enable_min_sep_opt: boolean (default true)
Boolean flag to allow optimizations based on min. separation between
pairs of cells. Here to allow for comparison studies.
.. versionadded:: 2.3.0
c_api_timer: boolean (default false)
Boolean flag to measure actual time spent in the C libraries. Here
to allow for benchmarking and scaling studies.
isa: string (default ``fastest``)
Controls the runtime dispatch for the instruction set to use. Options
are: [``fastest``, ``avx512f``, ``avx``, ``sse42``, ``fallback``]
Setting isa to ``fastest`` will pick the fastest available instruction
set on the current computer. However, if you set ``isa`` to, say,
``avx`` and ``avx`` is not available on the computer, then the code
will revert to using ``fallback`` (even though ``sse42`` might be
available). Unless you are benchmarking the different instruction
sets, you should always leave ``isa`` to the default value. And if
you *are* benchmarking, then the string supplied here gets translated
into an ``enum`` for the instruction set defined in ``utils/defs.h``.
weight_type: string, optional. Default: None.
The type of weighting to apply. One of ["pair_product", None].
Returns
--------
results: Numpy structured array
A numpy structured array containing [rpmin, rpmax, rpavg, pimax,
npairs, weightavg] for each radial bin specified in the ``binfile``.
If ``output_rpavg`` is not set, then ``rpavg`` will be set to 0.0 for
all bins; similarly for ``weightavg``. ``npairs`` contains the number
of pairs in that bin and can be used to compute :math:`\\xi(r_p, \pi)`
by combining with (DR, RR) counts.
api_time: float, optional
Only returned if ``c_api_timer`` is set. ``api_time`` measures only
the time spent within the C library and ignores all python overhead.
Example
--------
>>> from __future__ import print_function
>>> import numpy as np
>>> from os.path import dirname, abspath, join as pjoin
>>> import Corrfunc
>>> from Corrfunc.theory.DDrppi import DDrppi
>>> binfile = pjoin(dirname(abspath(Corrfunc.__file__)),
... "../theory/tests/", "bins")
>>> N = 10000
>>> boxsize = 420.0
>>> nthreads = 4
>>> autocorr = 1
>>> pimax = 40.0
>>> seed = 42
>>> np.random.seed(seed)
>>> X = np.random.uniform(0, boxsize, N)
>>> Y = np.random.uniform(0, boxsize, N)
>>> Z = np.random.uniform(0, boxsize, N)
>>> weights = np.ones_like(X)
>>> results = DDrppi(autocorr, nthreads, pimax, binfile,
... X, Y, Z, weights1=weights, weight_type='pair_product', output_rpavg=True)
>>> for r in results[519:]: print("{0:10.6f} {1:10.6f} {2:10.6f} {3:10.1f}"
... " {4:10d} {5:10.6f}".format(r['rmin'], r['rmax'],
... r['rpavg'], r['pimax'], r['npairs'], r['weightavg']))
... # doctest: +NORMALIZE_WHITESPACE
11.756000 16.753600 14.379250 40.0 1150 1.000000
16.753600 23.875500 20.449131 1.0 2604 1.000000
16.753600 23.875500 20.604834 2.0 2370 1.000000
16.753600 23.875500 20.523989 3.0 2428 1.000000
16.753600 23.875500 20.475181 4.0 2462 1.000000
16.753600 23.875500 20.458005 5.0 2532 1.000000
16.753600 23.875500 20.537162 6.0 2522 1.000000
16.753600 23.875500 20.443087 7.0 2422 1.000000
16.753600 23.875500 20.474580 8.0 2360 1.000000
16.753600 23.875500 20.420360 9.0 2512 1.000000
16.753600 23.875500 20.478355 10.0 2472 1.000000
16.753600 23.875500 20.485268 11.0 2406 1.000000
16.753600 23.875500 20.372985 12.0 2420 1.000000
16.753600 23.875500 20.647998 13.0 2378 1.000000
16.753600 23.875500 20.556208 14.0 2420 1.000000
16.753600 23.875500 20.527992 15.0 2462 1.000000
16.753600 23.875500 20.581017 16.0 2380 1.000000
16.753600 23.875500 20.491819 17.0 2346 1.000000
16.753600 23.875500 20.534440 18.0 2496 1.000000
16.753600 23.875500 20.529129 19.0 2512 1.000000
16.753600 23.875500 20.501946 20.0 2500 1.000000
16.753600 23.875500 20.513349 21.0 2544 1.000000
16.753600 23.875500 20.471915 22.0 2430 1.000000
16.753600 23.875500 20.450651 23.0 2354 1.000000
16.753600 23.875500 20.550753 24.0 2460 1.000000
16.753600 23.875500 20.540262 25.0 2490 1.000000
16.753600 23.875500 20.559572 26.0 2350 1.000000
16.753600 23.875500 20.534245 27.0 2382 1.000000
16.753600 23.875500 20.511302 28.0 2508 1.000000
16.753600 23.875500 20.491632 29.0 2456 1.000000
16.753600 23.875500 20.592493 30.0 2386 1.000000
16.753600 23.875500 20.506234 31.0 2484 1.000000
16.753600 23.875500 20.482109 32.0 2538 1.000000
16.753600 23.875500 20.518463 33.0 2544 1.000000
16.753600 23.875500 20.482515 34.0 2534 1.000000
16.753600 23.875500 20.503124 35.0 2382 1.000000
16.753600 23.875500 20.471307 36.0 2356 1.000000
16.753600 23.875500 20.384231 37.0 2554 1.000000
16.753600 23.875500 20.454012 38.0 2458 1.000000
16.753600 23.875500 20.585543 39.0 2394 1.000000
16.753600 23.875500 20.504965 40.0 2500 1.000000
"""
try:
from Corrfunc._countpairs import countpairs_rp_pi as DDrppi_extn
except ImportError:
msg = "Could not import the C extension for the 3-D "\
"real-space pair counter."
raise ImportError(msg)
import numpy as np
from Corrfunc.utils import translate_isa_string_to_enum,\
return_file_with_rbins, convert_to_native_endian,\
sys_pipes, process_weights
from future.utils import bytes_to_native_str
if not autocorr:
if X2 is None or Y2 is None or Z2 is None:
msg = "Must pass valid arrays for X2/Y2/Z2 for "\
"computing cross-correlation"
raise ValueError(msg)
else:
# TODO: is this needed?
X2 = np.empty(1)
Y2 = np.empty(1)
Z2 = np.empty(1)
weights1, weights2 = process_weights(weights1, weights2, X1, X2, weight_type, autocorr)
# Ensure all input arrays are native endian
X1, Y1, Z1, weights1, X2, Y2, Z2, weights2 = [
convert_to_native_endian(arr, warn=True) for arr in
[X1, Y1, Z1, weights1, X2, Y2, Z2, weights2]]
# Passing None parameters breaks the parsing code, so avoid this
kwargs = {}
for k in ['weights1', 'weights2', 'weight_type', 'X2', 'Y2', 'Z2']:
v = locals()[k]
if v is not None:
kwargs[k] = v
integer_isa = translate_isa_string_to_enum(isa)
rbinfile, delete_after_use = return_file_with_rbins(binfile)
with sys_pipes():
extn_results = DDrppi_extn(autocorr, nthreads,
pimax, rbinfile,
X1, Y1, Z1,
periodic=periodic,
verbose=verbose,
boxsize=boxsize,
output_rpavg=output_rpavg,
xbin_refine_factor=xbin_refine_factor,
ybin_refine_factor=ybin_refine_factor,
zbin_refine_factor=zbin_refine_factor,
max_cells_per_dim=max_cells_per_dim,
copy_particles=copy_particles,
enable_min_sep_opt=enable_min_sep_opt,
c_api_timer=c_api_timer,
isa=integer_isa, **kwargs)
if extn_results is None:
msg = "RuntimeError occurred"
raise RuntimeError(msg)
else:
extn_results, api_time = extn_results
if delete_after_use:
import os
os.remove(rbinfile)
results_dtype = np.dtype([(bytes_to_native_str(b'rmin'), np.float64),
(bytes_to_native_str(b'rmax'), np.float64),
(bytes_to_native_str(b'rpavg'), np.float64),
(bytes_to_native_str(b'pimax'), np.float64),
(bytes_to_native_str(b'npairs'), np.uint64),
(bytes_to_native_str(b'weightavg'), np.float64),
])
results = np.array(extn_results, dtype=results_dtype)
if not c_api_timer:
return results
else:
return results, api_time
if __name__ == '__main__':
import doctest
doctest.testmod()
|
AOSPU/external_chromium_org | refs/heads/android-5.0/py3 | content/shell/tools/breakpad_integration_test.py | 16 | #!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Integration test for breakpad in content shell.
This test checks that content shell and breakpad are correctly hooked up, as
well as that the tools can symbolize a stack trace."""
import glob
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
CONCURRENT_TASKS=4
def main():
parser = optparse.OptionParser()
parser.add_option('', '--build-dir', default='',
help='The build output directory.')
parser.add_option('', '--binary', default='',
help='The path of the binary to generate symbols for.')
parser.add_option('', '--no-symbols', default=False, action='store_true',
help='Symbols are not expected to work.')
parser.add_option('-j', '--jobs', default=CONCURRENT_TASKS, action='store',
type='int', help='Number of parallel tasks to run.')
parser.add_option('-v', '--verbose', action='store_true',
help='Print verbose status output.')
(options, _) = parser.parse_args()
if not options.build_dir:
print "Required option --build-dir missing."
return 1
if not options.binary:
print "Required option --binary missing."
return 1
if not os.access(options.binary, os.X_OK):
print "Cannot find %s." % options.binary
return 1
failure = ''
# Create a temporary directory to store the crash dumps and symbols in.
crash_dir = tempfile.mkdtemp()
try:
print "# Generate symbols."
breakpad_tools_dir = os.path.join(
os.path.dirname(__file__), '..', '..', '..',
'components', 'breakpad', 'tools')
generate_symbols = os.path.join(
breakpad_tools_dir, 'generate_breakpad_symbols.py')
symbols_dir = os.path.join(crash_dir, 'symbols')
cmd = [generate_symbols,
'--build-dir=%s' % options.build_dir,
'--binary=%s' % options.binary,
'--symbols-dir=%s' % symbols_dir,
'--jobs=%d' % options.jobs]
if options.verbose:
cmd.append('--verbose')
print ' '.join(cmd)
failure = 'Failed to run generate_breakpad_symbols.py.'
subprocess.check_call(cmd)
print "# Run content_shell and make it crash."
cmd = [options.binary,
'--dump-render-tree',
'chrome://crash',
'--enable-crash-reporter',
'--crash-dumps-dir=%s' % crash_dir]
if options.verbose:
print ' '.join(cmd)
failure = 'Failed to run content_shell.'
if options.verbose:
subprocess.check_call(cmd)
else:
with open(os.devnull, 'w') as devnull:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
print "# Retrieve crash dump."
dmp_files = glob.glob(os.path.join(crash_dir, '*.dmp'))
failure = 'Expected 1 crash dump, found %d.' % len(dmp_files)
if len(dmp_files) != 1:
raise Exception(failure)
dmp_file = dmp_files[0]
minidump = os.path.join(crash_dir, 'minidump')
dmp_to_minidump = os.path.join(breakpad_tools_dir, 'dmp2minidump.py')
cmd = [dmp_to_minidump, dmp_file, minidump]
if options.verbose:
print ' '.join(cmd)
failure = 'Failed to run dmp_to_minidump.'
subprocess.check_call(cmd)
print "# Symbolize crash dump."
minidump_stackwalk = os.path.join(options.build_dir, 'minidump_stackwalk')
cmd = [minidump_stackwalk, minidump, symbols_dir]
if options.verbose:
print ' '.join(cmd)
failure = 'Failed to run minidump_stackwalk.'
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stack = proc.communicate()[0]
# Check whether the stack contains a CrashIntentionally symbol.
found_symbol = 'CrashIntentionally' in stack
if options.no_symbols:
if found_symbol:
if options.verbose:
print stack
failure = 'Found unexpected reference to CrashIntentionally in stack'
raise Exception(failure)
else:
if not found_symbol:
if options.verbose:
print stack
failure = 'Could not find reference to CrashIntentionally in stack.'
raise Exception(failure)
except:
print "FAIL: %s" % failure
return 1
else:
print "PASS: Breakpad integration test ran successfully."
return 0
finally:
try:
shutil.rmtree(crash_dir)
except:
print 'Failed to delete temp directory "%s".' % crash_dir
if '__main__' == __name__:
sys.exit(main())
|
Bitergia/allura | refs/heads/master | ForgeSVN/forgesvn/templates/__init__.py | 12133432 | |
flwh/KK_mt6589_iq451 | refs/heads/master | prebuilts/python/darwin-x86/2.7.5/lib/python2.7/sqlite3/test/__init__.py | 12133432 | |
simodalla/newage | refs/heads/master | newage/tests/__init__.py | 16 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
|
alqfahad/odoo | refs/heads/8.0 | addons/l10n_pe/__openerp__.py | 260 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Cubic ERP - Teradata SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Peru Localization Chart Account',
'version': '1.0',
'description': """
Peruvian accounting chart and tax localization. According the PCGE 2010.
========================================================================
Plan contable peruano e impuestos de acuerdo a disposiciones vigentes de la
SUNAT 2011 (PCGE 2010).
""",
'author': ['Cubic ERP'],
'website': 'http://cubicERP.com',
'category': 'Localization/Account Charts',
'depends': ['account_chart'],
'data':[
'account_tax_code.xml',
'l10n_pe_chart.xml',
'account_tax.xml',
'l10n_pe_wizard.xml',
],
'demo': [],
'active': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
dssg/wikienergy | refs/heads/master | disaggregator/build/pandas/pandas/core/generic.py | 1 | # pylint: disable=W0231,E1101
import warnings
import operator
import weakref
import gc
import numpy as np
import pandas.lib as lib
import pandas as pd
from pandas.core.base import PandasObject
from pandas.core.index import (Index, MultiIndex, _ensure_index,
InvalidIndexError)
import pandas.core.indexing as indexing
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.internals import BlockManager
import pandas.core.common as com
import pandas.core.datetools as datetools
from pandas import compat
from pandas.compat import map, zip, lrange, string_types, isidentifier, lmap
from pandas.core.common import (isnull, notnull, is_list_like,
_values_from_object, _maybe_promote,
_maybe_box_datetimelike, ABCSeries,
SettingWithCopyError, SettingWithCopyWarning)
import pandas.core.nanops as nanops
from pandas.util.decorators import Appender, Substitution, deprecate_kwarg
from pandas.core import config
# goal is to be able to define the docs close to function, while still being
# able to share
_shared_docs = dict()
_shared_doc_kwargs = dict(axes='keywords for axes',
klass='NDFrame',
axes_single_arg='int or labels for object',
args_transpose='axes to permute (int or label for'
' object)')
def is_dictlike(x):
return isinstance(x, (dict, com.ABCSeries))
def _single_replace(self, to_replace, method, inplace, limit):
if self.ndim != 1:
raise TypeError('cannot replace {0} with method {1} on a {2}'
.format(to_replace, method, type(self).__name__))
orig_dtype = self.dtype
result = self if inplace else self.copy()
fill_f = com._get_fill_func(method)
mask = com.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if values.dtype == orig_dtype and inplace:
return
result = pd.Series(values, index=self.index,
dtype=self.dtype).__finalize__(self)
if inplace:
self._update_inplace(result._data)
return
return result
class NDFrame(PandasObject):
"""
N-dimensional analogue of DataFrame. Store multi-dimensional in a
size-mutable, labeled data structure
Parameters
----------
data : BlockManager
axes : list
copy : boolean, default False
"""
_internal_names = ['_data', '_cacher', '_item_cache', '_cache',
'is_copy', 'str', '_subtyp', '_index', '_default_kind',
'_default_fill_value','__array_struct__','__array_interface__']
_internal_names_set = set(_internal_names)
_metadata = []
is_copy = None
def __init__(self, data, axes=None, copy=False, dtype=None,
fastpath=False):
if not fastpath:
if dtype is not None:
data = data.astype(dtype)
elif copy:
data = data.copy()
if axes is not None:
for i, ax in enumerate(axes):
data = data.reindex_axis(ax, axis=i)
object.__setattr__(self, 'is_copy', None)
object.__setattr__(self, '_data', data)
object.__setattr__(self, '_item_cache', {})
def _validate_dtype(self, dtype):
""" validate the passed dtype """
if dtype is not None:
dtype = com._coerce_to_dtype(dtype)
# a compound dtype
if dtype.kind == 'V':
raise NotImplementedError("compound dtypes are not implemented"
"in the {0} constructor"
.format(self.__class__.__name__))
return dtype
def _init_mgr(self, mgr, axes=None, dtype=None, copy=False):
""" passed a manager and a axes dict """
for a, axe in axes.items():
if axe is not None:
mgr = mgr.reindex_axis(
axe, axis=self._get_block_manager_axis(a), copy=False)
# do not copy BlockManager unless explicitly done
if copy and dtype is None:
mgr = mgr.copy()
elif dtype is not None:
# avoid copy if we can
if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:
mgr = mgr.astype(dtype=dtype)
return mgr
#----------------------------------------------------------------------
# Construction
@property
def _constructor(self):
raise NotImplementedError
def __unicode__(self):
# unicode representation based upon iterating over self
# (since, by definition, `PandasContainers` are iterable)
prepr = '[%s]' % ','.join(map(com.pprint_thing, self))
return '%s(%s)' % (self.__class__.__name__, prepr)
def _local_dir(self):
""" add the string-like attributes from the info_axis """
return [c for c in self._info_axis
if isinstance(c, string_types) and isidentifier(c)]
@property
def _constructor_sliced(self):
raise NotImplementedError
#----------------------------------------------------------------------
# Axis
@classmethod
def _setup_axes(
cls, axes, info_axis=None, stat_axis=None, aliases=None, slicers=None,
axes_are_reversed=False, build_axes=True, ns=None):
""" provide axes setup for the major PandasObjects
axes : the names of the axes in order (lowest to highest)
info_axis_num : the axis of the selector dimension (int)
stat_axis_num : the number of axis for the default stats (int)
aliases : other names for a single axis (dict)
slicers : how axes slice to others (dict)
axes_are_reversed : boolean whether to treat passed axes as
reversed (DataFrame)
build_axes : setup the axis properties (default True)
"""
cls._AXIS_ORDERS = axes
cls._AXIS_NUMBERS = dict((a, i) for i, a in enumerate(axes))
cls._AXIS_LEN = len(axes)
cls._AXIS_ALIASES = aliases or dict()
cls._AXIS_IALIASES = dict((v, k)
for k, v in cls._AXIS_ALIASES.items())
cls._AXIS_NAMES = dict(enumerate(axes))
cls._AXIS_SLICEMAP = slicers or None
cls._AXIS_REVERSED = axes_are_reversed
# typ
setattr(cls, '_typ', cls.__name__.lower())
# indexing support
cls._ix = None
if info_axis is not None:
cls._info_axis_number = info_axis
cls._info_axis_name = axes[info_axis]
if stat_axis is not None:
cls._stat_axis_number = stat_axis
cls._stat_axis_name = axes[stat_axis]
# setup the actual axis
if build_axes:
def set_axis(a, i):
setattr(cls, a, lib.AxisProperty(i))
cls._internal_names_set.add(a)
if axes_are_reversed:
m = cls._AXIS_LEN - 1
for i, a in cls._AXIS_NAMES.items():
set_axis(a, m - i)
else:
for i, a in cls._AXIS_NAMES.items():
set_axis(a, i)
# addtl parms
if isinstance(ns, dict):
for k, v in ns.items():
setattr(cls, k, v)
def _construct_axes_dict(self, axes=None, **kwargs):
""" return an axes dictionary for myself """
d = dict([(a, self._get_axis(a)) for a in (axes or self._AXIS_ORDERS)])
d.update(kwargs)
return d
@staticmethod
def _construct_axes_dict_from(self, axes, **kwargs):
""" return an axes dictionary for the passed axes """
d = dict([(a, ax) for a, ax in zip(self._AXIS_ORDERS, axes)])
d.update(kwargs)
return d
def _construct_axes_dict_for_slice(self, axes=None, **kwargs):
""" return an axes dictionary for myself """
d = dict([(self._AXIS_SLICEMAP[a], self._get_axis(a))
for a in (axes or self._AXIS_ORDERS)])
d.update(kwargs)
return d
def _construct_axes_from_arguments(self, args, kwargs, require_all=False):
""" construct and returns axes if supplied in args/kwargs
if require_all, raise if all axis arguments are not supplied
return a tuple of (axes, kwargs) """
# construct the args
args = list(args)
for a in self._AXIS_ORDERS:
# if we have an alias for this axis
alias = self._AXIS_IALIASES.get(a)
if alias is not None:
if a in kwargs:
if alias in kwargs:
raise TypeError(
"arguments are mutually exclusive for [%s,%s]" %
(a, alias)
)
continue
if alias in kwargs:
kwargs[a] = kwargs.pop(alias)
continue
# look for a argument by position
if a not in kwargs:
try:
kwargs[a] = args.pop(0)
except (IndexError):
if require_all:
raise TypeError(
"not enough/duplicate arguments specified!")
axes = dict([(a, kwargs.get(a)) for a in self._AXIS_ORDERS])
return axes, kwargs
@classmethod
def _from_axes(cls, data, axes, **kwargs):
# for construction from BlockManager
if isinstance(data, BlockManager):
return cls(data, **kwargs)
else:
if cls._AXIS_REVERSED:
axes = axes[::-1]
d = cls._construct_axes_dict_from(cls, axes, copy=False)
d.update(kwargs)
return cls(data, **d)
def _get_axis_number(self, axis):
axis = self._AXIS_ALIASES.get(axis, axis)
if com.is_integer(axis):
if axis in self._AXIS_NAMES:
return axis
else:
try:
return self._AXIS_NUMBERS[axis]
except:
pass
raise ValueError('No axis named {0} for object type {1}'
.format(axis, type(self)))
def _get_axis_name(self, axis):
axis = self._AXIS_ALIASES.get(axis, axis)
if isinstance(axis, string_types):
if axis in self._AXIS_NUMBERS:
return axis
else:
try:
return self._AXIS_NAMES[axis]
except:
pass
raise ValueError('No axis named {0} for object type {1}'
.format(axis, type(self)))
def _get_axis(self, axis):
name = self._get_axis_name(axis)
return getattr(self, name)
def _get_block_manager_axis(self, axis):
""" map the axis to the block_manager axis """
axis = self._get_axis_number(axis)
if self._AXIS_REVERSED:
m = self._AXIS_LEN - 1
return m - axis
return axis
def _get_axis_resolvers(self, axis):
# index or columns
axis_index = getattr(self, axis)
d = dict()
prefix = axis[0]
for i, name in enumerate(axis_index.names):
if name is not None:
key = level = name
else:
# prefix with 'i' or 'c' depending on the input axis
# e.g., you must do ilevel_0 for the 0th level of an unnamed
# multiiindex
key = '{prefix}level_{i}'.format(prefix=prefix, i=i)
level = i
level_values = axis_index.get_level_values(level)
s = level_values.to_series()
s.index = axis_index
d[key] = s
# put the index/columns itself in the dict
if isinstance(axis_index, MultiIndex):
dindex = axis_index
else:
dindex = axis_index.to_series()
d[axis] = dindex
return d
def _get_index_resolvers(self):
d = {}
for axis_name in self._AXIS_ORDERS:
d.update(self._get_axis_resolvers(axis_name))
return d
@property
def _info_axis(self):
return getattr(self, self._info_axis_name)
@property
def _stat_axis(self):
return getattr(self, self._stat_axis_name)
@property
def shape(self):
"tuple of axis dimensions"
return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)
@property
def axes(self):
"index(es) of the NDFrame"
# we do it this way because if we have reversed axes, then
# the block manager shows then reversed
return [self._get_axis(a) for a in self._AXIS_ORDERS]
@property
def ndim(self):
"Number of axes / array dimensions"
return self._data.ndim
@property
def size(self):
"number of elements in the NDFrame"
return np.prod(self.shape)
def _expand_axes(self, key):
new_axes = []
for k, ax in zip(key, self.axes):
if k not in ax:
if type(k) != ax.dtype.type:
ax = ax.astype('O')
new_axes.append(ax.insert(len(ax), k))
else:
new_axes.append(ax)
return new_axes
def set_axis(self, axis, labels):
""" public verson of axis assignment """
setattr(self,self._get_axis_name(axis),labels)
def _set_axis(self, axis, labels):
self._data.set_axis(axis, labels)
self._clear_item_cache()
_shared_docs['transpose'] = """
Permute the dimensions of the %(klass)s
Parameters
----------
args : %(args_transpose)s
copy : boolean, default False
Make a copy of the underlying data. Mixed-dtype data will
always result in a copy
Examples
--------
>>> p.transpose(2, 0, 1)
>>> p.transpose(2, 0, 1, copy=True)
Returns
-------
y : same as input
"""
@Appender(_shared_docs['transpose'] % _shared_doc_kwargs)
def transpose(self, *args, **kwargs):
# construct the args
axes, kwargs = self._construct_axes_from_arguments(
args, kwargs, require_all=True)
axes_names = tuple([self._get_axis_name(axes[a])
for a in self._AXIS_ORDERS])
axes_numbers = tuple([self._get_axis_number(axes[a])
for a in self._AXIS_ORDERS])
# we must have unique axes
if len(axes) != len(set(axes)):
raise ValueError('Must specify %s unique axes' % self._AXIS_LEN)
new_axes = self._construct_axes_dict_from(
self, [self._get_axis(x) for x in axes_names])
new_values = self.values.transpose(axes_numbers)
if kwargs.get('copy') or (len(args) and args[-1]):
new_values = new_values.copy()
return self._constructor(new_values, **new_axes).__finalize__(self)
def swapaxes(self, axis1, axis2, copy=True):
"""
Interchange axes and swap values axes appropriately
Returns
-------
y : same as input
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
if copy:
return self.copy()
return self
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k))
for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
return self._constructor(new_values, *new_axes).__finalize__(self)
def pop(self, item):
"""
Return item and drop from frame. Raise KeyError if not found.
"""
result = self[item]
del self[item]
return result
def squeeze(self):
""" squeeze length 1 dimensions """
try:
return self.ix[tuple([slice(None) if len(a) > 1 else a[0]
for a in self.axes])]
except:
return self
def swaplevel(self, i, j, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : type of caller (new object)
"""
axis = self._get_axis_number(axis)
result = self.copy()
labels = result._data.axes[axis]
result._data.set_axis(axis, labels.swaplevel(i, j))
return result
#----------------------------------------------------------------------
# Rename
# TODO: define separate funcs for DataFrame, Series and Panel so you can
# get completion on keyword arguments.
_shared_docs['rename'] = """
Alter axes input function or functions. Function / dict values must be
unique (1-to-1). Labels not contained in a dict / Series will be left
as-is.
Parameters
----------
%(axes)s : dict-like or function, optional
Transformation to apply to that axis values
copy : boolean, default True
Also copy underlying data
inplace : boolean, default False
Whether to return a new %(klass)s. If True then value of copy is
ignored.
Returns
-------
renamed : %(klass)s (new object)
"""
@Appender(_shared_docs['rename'] % dict(axes='axes keywords for this'
' object', klass='NDFrame'))
def rename(self, *args, **kwargs):
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
copy = kwargs.get('copy', True)
inplace = kwargs.get('inplace', False)
if (com._count_not_none(*axes.values()) == 0):
raise TypeError('must pass an index to rename')
# renamer function if passed a dict
def _get_rename_function(mapper):
if isinstance(mapper, (dict, ABCSeries)):
def f(x):
if x in mapper:
return mapper[x]
else:
return x
else:
f = mapper
return f
self._consolidate_inplace()
result = self if inplace else self.copy(deep=copy)
# start in the axis order to eliminate too many copies
for axis in lrange(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if v is None:
continue
f = _get_rename_function(v)
baxis = self._get_block_manager_axis(axis)
result._data = result._data.rename_axis(f, axis=baxis, copy=copy)
result._clear_item_cache()
if inplace:
self._update_inplace(result._data)
else:
return result.__finalize__(self)
rename.__doc__ = _shared_docs['rename']
def rename_axis(self, mapper, axis=0, copy=True, inplace=False):
"""
Alter index and / or columns using input function or functions.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is.
Parameters
----------
mapper : dict-like or function, optional
axis : int or string, default 0
copy : boolean, default True
Also copy underlying data
inplace : boolean, default False
Returns
-------
renamed : type of caller
"""
axis = self._get_axis_name(axis)
d = {'copy': copy, 'inplace': inplace}
d[axis] = mapper
return self.rename(**d)
#----------------------------------------------------------------------
# Comparisons
def _indexed_same(self, other):
return all([self._get_axis(a).equals(other._get_axis(a))
for a in self._AXIS_ORDERS])
def __neg__(self):
values = _values_from_object(self)
if values.dtype == np.bool_:
arr = operator.inv(values)
else:
arr = operator.neg(values)
return self.__array_wrap__(arr)
def __invert__(self):
try:
arr = operator.inv(_values_from_object(self))
return self.__array_wrap__(arr)
except:
# inv fails with 0 len
if not np.prod(self.shape):
return self
raise
def equals(self, other):
"""
Determines if two NDFrame objects contain the same elements. NaNs in the
same location are considered equal.
"""
if not isinstance(other, self._constructor):
return False
return self._data.equals(other._data)
#----------------------------------------------------------------------
# Iteration
def __hash__(self):
raise TypeError('{0!r} objects are mutable, thus they cannot be'
' hashed'.format(self.__class__.__name__))
def __iter__(self):
"""
Iterate over infor axis
"""
return iter(self._info_axis)
# can we get a better explanation of this?
def keys(self):
"""Get the 'info axis' (see Indexing for more)
This is index for Series, columns for DataFrame and major_axis for
Panel."""
return self._info_axis
def iteritems(self):
"""Iterate over (label, values) on info axis
This is index for Series, columns for DataFrame, major_axis for Panel,
and so on.
"""
for h in self._info_axis:
yield h, self[h]
# originally used to get around 2to3's changes to iteritems.
# Now unnecessary. Sidenote: don't want to deprecate this for a while,
# otherwise libraries that use 2to3 will have issues.
def iterkv(self, *args, **kwargs):
"iteritems alias used to get around 2to3. Deprecated"
warnings.warn("iterkv is deprecated and will be removed in a future "
"release, use ``iteritems`` instead.",
DeprecationWarning)
return self.iteritems(*args, **kwargs)
def __len__(self):
"""Returns length of info axis """
return len(self._info_axis)
def __contains__(self, key):
"""True if the key is in the info axis """
return key in self._info_axis
@property
def empty(self):
"True if NDFrame is entirely empty [no items]"
return not all(len(self._get_axis(a)) > 0 for a in self._AXIS_ORDERS)
def __nonzero__(self):
raise ValueError("The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
.format(self.__class__.__name__))
__bool__ = __nonzero__
def bool(self):
""" Return the bool of a single element PandasObject
This must be a boolean scalar value, either True or False
Raise a ValueError if the PandasObject does not have exactly
1 element, or that element is not boolean """
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif np.isscalar(v):
raise ValueError("bool cannot act on a non-boolean single element "
"{0}".format(self.__class__.__name__))
self.__nonzero__()
def __abs__(self):
return self.abs()
#----------------------------------------------------------------------
# Array Interface
def __array__(self, dtype=None):
return _values_from_object(self)
def __array_wrap__(self, result, context=None):
d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)
return self._constructor(result, **d).__finalize__(self)
# ideally we would define this to avoid the getattr checks, but
# is slower
#@property
#def __array_interface__(self):
# """ provide numpy array interface method """
# values = self.values
# return dict(typestr=values.dtype.str,shape=values.shape,data=values)
def to_dense(self):
"Return dense representation of NDFrame (as opposed to sparse)"
# compat
return self
#----------------------------------------------------------------------
# Picklability
def __getstate__(self):
return self._data
def __setstate__(self, state):
if isinstance(state, BlockManager):
self._data = state
elif isinstance(state, dict):
typ = state.get('_typ')
if typ is not None:
# set in the order of internal names
# to avoid definitional recursion
# e.g. say fill_value needing _data to be
# defined
meta = set(self._internal_names + self._metadata)
for k in list(meta):
if k in state:
v = state[k]
object.__setattr__(self, k, v)
for k, v in state.items():
if k not in meta:
object.__setattr__(self, k, v)
else:
self._unpickle_series_compat(state)
elif isinstance(state[0], dict):
if len(state) == 5:
self._unpickle_sparse_frame_compat(state)
else:
self._unpickle_frame_compat(state)
elif len(state) == 4:
self._unpickle_panel_compat(state)
elif len(state) == 2:
self._unpickle_series_compat(state)
else: # pragma: no cover
# old pickling format, for compatibility
self._unpickle_matrix_compat(state)
self._item_cache = {}
#----------------------------------------------------------------------
# IO
#----------------------------------------------------------------------
# I/O Methods
def to_json(self, path_or_buf=None, orient=None, date_format='epoch',
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None):
"""
Convert the object to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path_or_buf : the path or buffer to write the result string
if this is None, return a StringIO of the converted string
orient : string
* Series
- default is 'index'
- allowed values are: {'split','records','index'}
* DataFrame
- default is 'columns'
- allowed values are:
{'split','records','index','columns','values'}
* The format of the JSON string
- split : dict like
{index -> [index], columns -> [columns], data -> [values]}
- records : list like
[{column -> value}, ... , {column -> value}]
- index : dict like {index -> {column -> value}}
- columns : dict like {column -> {index -> value}}
- values : just the values array
date_format : {'epoch', 'iso'}
Type of date conversion. `epoch` = epoch milliseconds,
`iso`` = ISO8601, default is epoch.
double_precision : The number of decimal places to use when encoding
floating point values, default 10.
force_ascii : force encoded string to be ASCII, default True.
date_unit : string, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
default_handler : callable, default None
Handler to call if object cannot otherwise be converted to a
suitable format for JSON. Should receive a single argument which is
the object to convert and return a serialisable object.
Returns
-------
same type as input object with filtered info axis
"""
from pandas.io import json
return json.to_json(
path_or_buf=path_or_buf,
obj=self, orient=orient,
date_format=date_format,
double_precision=double_precision,
force_ascii=force_ascii,
date_unit=date_unit,
default_handler=default_handler)
def to_hdf(self, path_or_buf, key, **kwargs):
""" activate the HDFStore
Parameters
----------
path_or_buf : the path (string) or buffer to put the store
key : string
indentifier for the group in the store
mode : optional, {'a', 'w', 'r', 'r+'}, default 'a'
``'r'``
Read-only; no data can be modified.
``'w'``
Write; a new file is created (an existing file with the same
name would be deleted).
``'a'``
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
``'r+'``
It is similar to ``'a'``, but the file must already exist.
format : 'fixed(f)|table(t)', default is 'fixed'
fixed(f) : Fixed format
Fast writing/reading. Not-appendable, nor searchable
table(t) : Table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data
append : boolean, default False
For Table formats, append the input data to the existing
complevel : int, 1-9, default 0
If a complib is specified compression will be applied
where possible
complib : {'zlib', 'bzip2', 'lzo', 'blosc', None}, default None
If complevel is > 0 apply compression to objects written
in the store wherever possible
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum
"""
from pandas.io import pytables
return pytables.to_hdf(path_or_buf, key, self, **kwargs)
def to_msgpack(self, path_or_buf=None, **kwargs):
"""
msgpack (serialize) object to input file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path : string File path, buffer-like, or None
if None, return generated string
append : boolean whether to append to an existing msgpack
(default is False)
compress : type of compressor (zlib or blosc), default to None (no
compression)
"""
from pandas.io import packers
return packers.to_msgpack(path_or_buf, self, **kwargs)
def to_sql(self, name, con, flavor='sqlite', schema=None, if_exists='fail',
index=True, index_label=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
name : string
Name of SQL table
con : SQLAlchemy engine or DBAPI2 connection (legacy mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor : {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use. Ignored when using SQLAlchemy engine.
'mysql' is deprecated and will be removed in future versions, but it
will be further supported through SQLAlchemy engines.
schema : string, default None
Specify the schema (if database flavor supports this). If None, use
default schema.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column.
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
"""
from pandas.io import sql
sql.to_sql(
self, name, con, flavor=flavor, schema=schema, if_exists=if_exists,
index=index, index_label=index_label, chunksize=chunksize,
dtype=dtype)
def to_pickle(self, path):
"""
Pickle (serialize) object to input file path
Parameters
----------
path : string
File path
"""
from pandas.io.pickle import to_pickle
return to_pickle(self, path)
def save(self, path): # TODO remove in 0.14
"Deprecated. Use to_pickle instead"
import warnings
from pandas.io.pickle import to_pickle
warnings.warn("save is deprecated, use to_pickle", FutureWarning)
return to_pickle(self, path)
def load(self, path): # TODO remove in 0.14
"Deprecated. Use read_pickle instead."
import warnings
from pandas.io.pickle import read_pickle
warnings.warn("load is deprecated, use pd.read_pickle", FutureWarning)
return read_pickle(path)
def to_clipboard(self, excel=None, sep=None, **kwargs):
"""
Attempt to write text representation of object to the system clipboard
This can be pasted into Excel, for example.
Parameters
----------
excel : boolean, defaults to True
if True, use the provided separator, writing in a csv
format for allowing easy pasting into excel.
if False, write a string representation of the object
to the clipboard
sep : optional, defaults to tab
other keywords are passed to to_csv
Notes
-----
Requirements for your platform
- Linux: xclip, or xsel (with gtk or PyQt4 modules)
- Windows: none
- OS X: none
"""
from pandas.io import clipboard
clipboard.to_clipboard(self, excel=excel, sep=sep, **kwargs)
#----------------------------------------------------------------------
# Fancy Indexing
@classmethod
def _create_indexer(cls, name, indexer):
""" create an indexer like _name in the class """
if getattr(cls, name, None) is None:
iname = '_%s' % name
setattr(cls, iname, None)
def _indexer(self):
i = getattr(self, iname)
if i is None:
i = indexer(self, name)
setattr(self, iname, i)
return i
setattr(cls, name, property(_indexer))
# add to our internal names set
cls._internal_names_set.add(iname)
def get(self, key, default=None):
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found
Parameters
----------
key : object
Returns
-------
value : type of items contained in object
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def __getitem__(self, item):
return self._get_item_cache(item)
def _get_item_cache(self, item):
""" return the cached item, item represents a label indexer """
cache = self._item_cache
res = cache.get(item)
if res is None:
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
res._set_as_cached(item, self)
# for a chain
res.is_copy = self.is_copy
return res
def _set_as_cached(self, item, cacher):
""" set the _cacher attribute on the calling object with
a weakref to cacher """
self._cacher = (item, weakref.ref(cacher))
def _iget_item_cache(self, item):
""" return the cached item, item represents a positional indexer """
ax = self._info_axis
if ax.is_unique:
lower = self._get_item_cache(ax[item])
else:
lower = self.take(item, axis=self._info_axis_number, convert=True)
return lower
def _box_item_values(self, key, values):
raise NotImplementedError
def _maybe_cache_changed(self, item, value):
"""
the object has called back to us saying
maybe it has changed
numpy < 1.8 has an issue with object arrays and aliasing
GH6026
"""
self._data.set(item, value, check=pd._np_version_under1p8)
@property
def _is_cached(self):
""" boolean : return if I am cached """
return getattr(self, '_cacher', None) is not None
def _get_cacher(self):
""" return my cacher or None """
cacher = getattr(self, '_cacher', None)
if cacher is not None:
cacher = cacher[1]()
return cacher
@property
def _is_view(self):
""" boolean : return if I am a view of another array """
return self._data.is_view
def _maybe_update_cacher(self, clear=False, verify_is_copy=True):
"""
see if we need to update our parent cacher
if clear, then clear our cache
Parameters
----------
clear : boolean, default False
clear the item cache
verify_is_copy : boolean, default True
provide is_copy checks
"""
cacher = getattr(self, '_cacher', None)
if cacher is not None:
ref = cacher[1]()
# we are trying to reference a dead referant, hence
# a copy
if ref is None:
del self._cacher
else:
try:
ref._maybe_cache_changed(cacher[0], self)
except:
pass
if verify_is_copy:
self._check_setitem_copy(stacklevel=5, t='referant')
if clear:
self._clear_item_cache()
def _clear_item_cache(self, i=None):
if i is not None:
self._item_cache.pop(i, None)
else:
self._item_cache.clear()
def _slice(self, slobj, axis=0, typ=None):
"""
Construct a slice of this container.
typ parameter is maintained for compatibility with Series slicing.
"""
axis = self._get_block_manager_axis(axis)
result = self._constructor(self._data.get_slice(slobj, axis=axis))
# this could be a view
# but only in a single-dtyped view slicable case
is_copy = axis!=0 or result._is_view
result._set_is_copy(self, copy=is_copy)
return result
def _set_item(self, key, value):
self._data.set(key, value)
self._clear_item_cache()
def _set_is_copy(self, ref=None, copy=True):
if not copy:
self.is_copy = None
else:
if ref is not None:
self.is_copy = weakref.ref(ref)
else:
self.is_copy = None
def _check_is_chained_assignment_possible(self):
"""
check if we are a view, have a cacher, and are of mixed type
if so, then force a setitem_copy check
should be called just near setting a value
will return a boolean if it we are a view and are cached, but a single-dtype
meaning that the cacher should be updated following setting
"""
if self._is_view and self._is_cached:
ref = self._get_cacher()
if ref is not None and ref._is_mixed_type:
self._check_setitem_copy(stacklevel=4, t='referant', force=True)
return True
elif self.is_copy:
self._check_setitem_copy(stacklevel=4, t='referant')
return False
def _check_setitem_copy(self, stacklevel=4, t='setting', force=False):
"""
Parameters
----------
stacklevel : integer, default 4
the level to show of the stack when the error is output
t : string, the type of setting error
force : boolean, default False
if True, then force showing an error
validate if we are doing a settitem on a chained copy.
If you call this function, be sure to set the stacklevel such that the
user will see the error *at the level of setting*
It is technically possible to figure out that we are setting on
a copy even WITH a multi-dtyped pandas object. In other words, some blocks
may be views while other are not. Currently _is_view will ALWAYS return False
for multi-blocks to avoid having to handle this case.
df = DataFrame(np.arange(0,9), columns=['count'])
df['group'] = 'b'
# this technically need not raise SettingWithCopy if both are view (which is not
# generally guaranteed but is usually True
# however, this is in general not a good practice and we recommend using .loc
df.iloc[0:5]['group'] = 'a'
"""
if force or self.is_copy:
value = config.get_option('mode.chained_assignment')
if value is None:
return
# see if the copy is not actually refererd; if so, then disolve
# the copy weakref
try:
gc.collect(2)
if not gc.get_referents(self.is_copy()):
self.is_copy = None
return
except:
pass
# a custom message
if isinstance(self.is_copy, string_types):
t = self.is_copy
elif t == 'referant':
t = ("\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame\n\n"
"See the the caveats in the documentation: "
"http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy")
else:
t = ("\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame.\n"
"Try using .loc[row_indexer,col_indexer] = value instead\n\n"
"See the the caveats in the documentation: "
"http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy")
if value == 'raise':
raise SettingWithCopyError(t)
elif value == 'warn':
warnings.warn(t, SettingWithCopyWarning, stacklevel=stacklevel)
def __delitem__(self, key):
"""
Delete item
"""
deleted = False
maybe_shortcut = False
if hasattr(self, 'columns') and isinstance(self.columns, MultiIndex):
try:
maybe_shortcut = key not in self.columns._engine
except TypeError:
pass
if maybe_shortcut:
# Allow shorthand to delete all columns whose first len(key)
# elements match key:
if not isinstance(key, tuple):
key = (key,)
for col in self.columns:
if isinstance(col, tuple) and col[:len(key)] == key:
del self[col]
deleted = True
if not deleted:
# If the above loop ran and didn't delete anything because
# there was no match, this call should raise the appropriate
# exception:
self._data.delete(key)
try:
del self._item_cache[key]
except KeyError:
pass
def take(self, indices, axis=0, convert=True, is_copy=True):
"""
Analogous to ndarray.take
Parameters
----------
indices : list / array of ints
axis : int, default 0
convert : translate neg to pos indices (default)
is_copy : mark the returned frame as a copy
Returns
-------
taken : type of caller
"""
new_data = self._data.take(indices,
axis=self._get_block_manager_axis(axis),
convert=True, verify=True)
result = self._constructor(new_data).__finalize__(self)
# maybe set copy if we didn't actually change the index
if is_copy and not result._get_axis(axis).equals(self._get_axis(axis)):
result._set_is_copy(self)
return result
def xs(self, key, axis=0, level=None, copy=None, drop_level=True):
"""
Returns a cross-section (row(s) or column(s)) from the Series/DataFrame.
Defaults to cross-section on the rows (axis=0).
Parameters
----------
key : object
Some label contained in the index, or partially in a MultiIndex
axis : int, default 0
Axis to retrieve cross-section on
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
copy : boolean [deprecated]
Whether to make a copy of the data
drop_level : boolean, default True
If False, returns object with same levels as self.
Examples
--------
>>> df
A B C
a 4 5 2
b 4 0 9
c 9 7 3
>>> df.xs('a')
A 4
B 5
C 2
Name: a
>>> df.xs('C', axis=1)
a 2
b 9
c 3
Name: C
>>> df
A B C D
first second third
bar one 1 4 1 8 9
two 1 7 5 5 0
baz one 1 6 6 8 0
three 2 5 3 5 3
>>> df.xs(('baz', 'three'))
A B C D
third
2 5 3 5 3
>>> df.xs('one', level=1)
A B C D
first third
bar 1 4 1 8 9
baz 1 6 6 8 0
>>> df.xs(('baz', 2), level=[0, 'third'])
A B C D
second
three 5 3 5 3
Returns
-------
xs : Series or DataFrame
Notes
-----
xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or levels
it is a superset of xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
if copy is not None:
warnings.warn("copy keyword is deprecated, "
"default is to return a copy or a view if possible")
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level is not None:
loc, new_ax = labels.get_loc_level(key, level=level,
drop_level=drop_level)
# convert to a label indexer if needed
if isinstance(loc, slice):
lev_num = labels._get_level_number(level)
if labels.levels[lev_num].inferred_type == 'integer':
loc = labels[loc]
# create the tuple of the indexer
indexer = [slice(None)] * self.ndim
indexer[axis] = loc
indexer = tuple(indexer)
result = self.ix[indexer]
setattr(result, result._get_axis_name(axis), new_ax)
return result
if axis == 1:
return self[key]
self._consolidate_inplace()
index = self.index
if isinstance(index, MultiIndex):
loc, new_index = self.index.get_loc_level(key,
drop_level=drop_level)
else:
loc = self.index.get_loc(key)
if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
inds, = loc.nonzero()
return self.take(inds, axis=axis, convert=False)
else:
return self.take(loc, axis=axis, convert=True)
if not np.isscalar(loc):
new_index = self.index[loc]
if np.isscalar(loc):
from pandas import Series
new_values = self._data.fast_xs(loc)
# may need to box a datelike-scalar
#
# if we encounter an array-like and we only have 1 dim
# that means that their are list/ndarrays inside the Series!
# so just return them (GH 6394)
if not is_list_like(new_values) or self.ndim == 1:
return _maybe_box_datetimelike(new_values)
result = Series(new_values, index=self.columns,
name=self.index[loc])
else:
result = self.iloc[loc]
result.index = new_index
# this could be a view
# but only in a single-dtyped view slicable case
result._set_is_copy(self, copy=not result._is_view)
return result
_xs = xs
# TODO: Check if this was clearer in 0.12
def select(self, crit, axis=0):
"""
Return data corresponding to axis labels matching criteria
Parameters
----------
crit : function
To be called on each index (label). Should return True or False
axis : int
Returns
-------
selection : type of caller
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis_values = self._get_axis(axis)
if len(axis_values) > 0:
new_axis = axis_values[
np.asarray([bool(crit(label)) for label in axis_values])]
else:
new_axis = axis_values
return self.reindex(**{axis_name: new_axis})
def reindex_like(self, other, method=None, copy=True, limit=None):
""" return an object with matching indicies to myself
Parameters
----------
other : Object
method : string or None
copy : boolean, default True
limit : int, default None
Maximum size gap to forward or backward fill
Notes
-----
Like calling s.reindex(index=other.index, columns=other.columns,
method=...)
Returns
-------
reindexed : same as input
"""
d = other._construct_axes_dict(method=method, copy=copy, limit=limit)
return self.reindex(**d)
def drop(self, labels, axis=0, level=None, inplace=False, **kwargs):
"""
Return new object with labels in requested axis removed
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
dropped : type of caller
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis, axis_ = self._get_axis(axis), axis
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
new_axis = axis.drop(labels, level=level)
else:
new_axis = axis.drop(labels)
dropped = self.reindex(**{axis_name: new_axis})
try:
dropped.axes[axis_].set_names(axis.names, inplace=True)
except AttributeError:
pass
result = dropped
else:
labels = com._index_labels_to_array(labels)
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
indexer = ~lib.ismember(axis.get_level_values(level).values,
set(labels))
else:
indexer = ~axis.isin(labels)
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.ix[tuple(slicer)]
if inplace:
self._update_inplace(result)
else:
return result
def _update_inplace(self, result, verify_is_copy=True):
"""
replace self internals with result.
Parameters
----------
verify_is_copy : boolean, default True
provide is_copy checks
"""
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._data = getattr(result,'_data',result)
self._maybe_update_cacher(verify_is_copy=verify_is_copy)
def add_prefix(self, prefix):
"""
Concatenate prefix string with panel items names.
Parameters
----------
prefix : string
Returns
-------
with_prefix : type of caller
"""
new_data = self._data.add_prefix(prefix)
return self._constructor(new_data).__finalize__(self)
def add_suffix(self, suffix):
"""
Concatenate suffix string with panel items names
Parameters
----------
suffix : string
Returns
-------
with_suffix : type of caller
"""
new_data = self._data.add_suffix(suffix)
return self._constructor(new_data).__finalize__(self)
def sort_index(self, axis=0, ascending=True):
"""
Sort object by labels (along an axis)
Parameters
----------
axis : {0, 1}
Sort index/rows versus columns
ascending : boolean, default True
Sort ascending vs. descending
Returns
-------
sorted_obj : type of caller
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
labels = self._get_axis(axis)
sort_index = labels.argsort()
if not ascending:
sort_index = sort_index[::-1]
new_axis = labels.take(sort_index)
return self.reindex(**{axis_name: new_axis})
_shared_docs['reindex'] = """
Conform %(klass)s to new index with optional filling logic, placing
NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
copy=False
Parameters
----------
%(axes)s : array-like, optional (can be specified in order, or as
keywords)
New labels / index to conform to. Preferably an Index object to
avoid duplicating data
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed DataFrame
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
copy : boolean, default True
Return a new object, even if the passed indexes are the same
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value
limit : int, default None
Maximum size gap to forward or backward fill
Examples
--------
>>> df.reindex(index=[date1, date2, date3], columns=['A', 'B', 'C'])
Returns
-------
reindexed : %(klass)s
"""
# TODO: Decide if we care about having different examples for different
# kinds
@Appender(_shared_docs['reindex'] % dict(axes="axes", klass="NDFrame"))
def reindex(self, *args, **kwargs):
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
method = com._clean_fill_method(kwargs.get('method'))
level = kwargs.get('level')
copy = kwargs.get('copy', True)
limit = kwargs.get('limit')
fill_value = kwargs.get('fill_value', np.nan)
self._consolidate_inplace()
# if all axes that are requested to reindex are equal, then only copy
# if indicated must have index names equal here as well as values
if all([self._get_axis(axis).identical(ax)
for axis, ax in axes.items() if ax is not None]):
if copy:
return self.copy()
return self
# check if we are a multi reindex
if self._needs_reindex_multi(axes, method, level):
try:
return self._reindex_multi(axes, copy, fill_value)
except:
pass
# perform the reindex on the axes
return self._reindex_axes(axes, level, limit,
method, fill_value, copy).__finalize__(self)
def _reindex_axes(self, axes, level, limit, method, fill_value, copy):
""" perform the reinxed for all the axes """
obj = self
for a in self._AXIS_ORDERS:
labels = axes[a]
if labels is None:
continue
ax = self._get_axis(a)
new_index, indexer = ax.reindex(
labels, level=level, limit=limit, method=method)
axis = self._get_axis_number(a)
obj = obj._reindex_with_indexers(
{axis: [new_index, indexer]}, method=method,
fill_value=fill_value, limit=limit, copy=copy,
allow_dups=False)
return obj
def _needs_reindex_multi(self, axes, method, level):
""" check if we do need a multi reindex """
return ((com._count_not_none(*axes.values()) == self._AXIS_LEN) and
method is None and level is None and not self._is_mixed_type)
def _reindex_multi(self, axes, copy, fill_value):
return NotImplemented
_shared_docs['reindex_axis'] = (
"""Conform input object to new index with optional filling logic,
placing NA/NaN in locations having no value in the previous index. A
new object is produced unless the new index is equivalent to the
current one and copy=False
Parameters
----------
labels : array-like
New labels / index to conform to. Preferably an Index object to
avoid duplicating data
axis : %(axes_single_arg)s
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed object.
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
copy : boolean, default True
Return a new object, even if the passed indexes are the same
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
limit : int, default None
Maximum size gap to forward or backward fill
Examples
--------
>>> df.reindex_axis(['A', 'B', 'C'], axis=1)
See also
--------
reindex, reindex_like
Returns
-------
reindexed : %(klass)s
""")
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
self._consolidate_inplace()
axis_name = self._get_axis_name(axis)
axis_values = self._get_axis(axis_name)
method = com._clean_fill_method(method)
new_index, indexer = axis_values.reindex(labels, method, level,
limit=limit)
return self._reindex_with_indexers(
{axis: [new_index, indexer]}, method=method, fill_value=fill_value,
limit=limit, copy=copy)
def _reindex_with_indexers(self, reindexers, method=None,
fill_value=np.nan, limit=None, copy=False,
allow_dups=False):
""" allow_dups indicates an internal call here """
# reindex doing multiple operations on different axes if indiciated
new_data = self._data
for axis in sorted(reindexers.keys()):
index, indexer = reindexers[axis]
baxis = self._get_block_manager_axis(axis)
if index is None:
continue
index = _ensure_index(index)
if indexer is not None:
indexer = com._ensure_int64(indexer)
# TODO: speed up on homogeneous DataFrame objects
new_data = new_data.reindex_indexer(index, indexer, axis=baxis,
fill_value=fill_value,
allow_dups=allow_dups,
copy=copy)
if copy and new_data is self._data:
new_data = new_data.copy()
return self._constructor(new_data).__finalize__(self)
def _reindex_axis(self, new_index, fill_method, axis, copy):
new_data = self._data.reindex_axis(new_index, axis=axis,
method=fill_method, copy=copy)
if new_data is self._data and not copy:
return self
else:
return self._constructor(new_data).__finalize__(self)
def filter(self, items=None, like=None, regex=None, axis=None):
"""
Restrict the info axis to set of items or wildcard
Parameters
----------
items : list-like
List of info axis to restrict to (must not all be present)
like : string
Keep info axis where "arg in col == True"
regex : string (regular expression)
Keep info axis with re.search(regex, col) == True
axis : int or None
The axis to filter on. By default this is the info axis. The "info
axis" is the axis that is used when indexing with ``[]``. For
example, ``df = DataFrame({'a': [1, 2, 3, 4]]}); df['a']``. So,
the ``DataFrame`` columns are the info axis.
Notes
-----
Arguments are mutually exclusive, but this is not checked for
"""
import re
if axis is None:
axis = self._info_axis_name
axis_name = self._get_axis_name(axis)
axis_values = self._get_axis(axis_name)
if items is not None:
return self.reindex(**{axis_name: [r for r in items
if r in axis_values]})
elif like:
matchf = lambda x: (like in x if isinstance(x, string_types)
else like in str(x))
return self.select(matchf, axis=axis_name)
elif regex:
matcher = re.compile(regex)
return self.select(lambda x: matcher.search(x) is not None,
axis=axis_name)
else:
raise TypeError('Must pass either `items`, `like`, or `regex`')
def head(self, n=5):
"""
Returns first n rows
"""
l = len(self)
if l == 0 or n==0:
return self
return self.iloc[:n]
def tail(self, n=5):
"""
Returns last n rows
"""
l = len(self)
if l == 0 or n == 0:
return self
return self.iloc[-n:]
#----------------------------------------------------------------------
# Attribute access
def __finalize__(self, other, method=None, **kwargs):
"""
propagate metadata from other to self
Parameters
----------
other : the object from which to get the attributes that we are going
to propagate
method : optional, a passed method name ; possibly to take different
types of propagation actions based on this
"""
if isinstance(other, NDFrame):
for name in self._metadata:
object.__setattr__(self, name, getattr(other, name, None))
return self
def __getattr__(self, name):
"""After regular attribute access, try looking up the name
This allows simpler access to columns for interactive use.
"""
# Note: obj.x will always call obj.__getattribute__('x') prior to
# calling obj.__getattr__('x').
if name in self._internal_names_set:
return object.__getattribute__(self, name)
elif name in self._metadata:
return object.__getattribute__(self, name)
else:
if name in self._info_axis:
return self[name]
raise AttributeError("'%s' object has no attribute '%s'" %
(type(self).__name__, name))
def __setattr__(self, name, value):
"""After regular attribute access, try setting the name
This allows simpler access to columns for interactive use."""
# first try regular attribute access via __getattribute__, so that
# e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify
# the same attribute.
try:
object.__getattribute__(self, name)
return object.__setattr__(self, name, value)
except AttributeError:
pass
# if this fails, go on to more involved attribute setting
# (note that this matches __getattr__, above).
if name in self._internal_names_set:
object.__setattr__(self, name, value)
elif name in self._metadata:
object.__setattr__(self, name, value)
else:
try:
existing = getattr(self, name)
if isinstance(existing, Index):
object.__setattr__(self, name, value)
elif name in self._info_axis:
self[name] = value
else:
object.__setattr__(self, name, value)
except (AttributeError, TypeError):
object.__setattr__(self, name, value)
#----------------------------------------------------------------------
# Getting and setting elements
#----------------------------------------------------------------------
# Consolidation of internals
def _consolidate_inplace(self):
f = lambda: self._data.consolidate()
self._data = self._protect_consolidate(f)
def consolidate(self, inplace=False):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray). Mainly an internal API function,
but available here to the savvy user
Parameters
----------
inplace : boolean, default False
If False return new object, otherwise modify existing object
Returns
-------
consolidated : type of caller
"""
if inplace:
self._consolidate_inplace()
else:
f = lambda: self._data.consolidate()
cons_data = self._protect_consolidate(f)
if cons_data is self._data:
cons_data = cons_data.copy()
return self._constructor(cons_data).__finalize__(self)
@property
def _is_mixed_type(self):
f = lambda: self._data.is_mixed_type
return self._protect_consolidate(f)
@property
def _is_numeric_mixed_type(self):
f = lambda: self._data.is_numeric_mixed_type
return self._protect_consolidate(f)
@property
def _is_datelike_mixed_type(self):
f = lambda: self._data.is_datelike_mixed_type
return self._protect_consolidate(f)
def _check_inplace_setting(self, value):
""" check whether we allow in-place setting with this type of value """
if self._is_mixed_type:
if not self._is_numeric_mixed_type:
# allow an actual np.nan thru
try:
if np.isnan(value):
return True
except:
pass
raise TypeError(
'Cannot do inplace boolean setting on mixed-types with a non np.nan value')
return True
def _protect_consolidate(self, f):
blocks_before = len(self._data.blocks)
result = f()
if len(self._data.blocks) != blocks_before:
self._clear_item_cache()
return result
def _get_numeric_data(self):
return self._constructor(
self._data.get_numeric_data()).__finalize__(self)
def _get_bool_data(self):
return self._constructor(self._data.get_bool_data()).__finalize__(self)
#----------------------------------------------------------------------
# Internal Interface Methods
def as_matrix(self, columns=None):
"""
Convert the frame to its Numpy-array representation.
Parameters
----------
columns: list, optional, default:None
If None, return all columns, otherwise, returns specified columns.
Returns
-------
values : ndarray
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
Notes
-----
Return is NOT a Numpy-matrix, rather, a Numpy-array.
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcase to
int32.
This method is provided for backwards compatibility. Generally,
it is recommended to use '.values'.
See Also
--------
pandas.DataFrame.values
"""
self._consolidate_inplace()
if self._AXIS_REVERSED:
return self._data.as_matrix(columns).T
return self._data.as_matrix(columns)
@property
def values(self):
"""Numpy representation of NDFrame
Notes
-----
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcase to
int32.
"""
return self.as_matrix()
@property
def _get_values(self):
# compat
return self.as_matrix()
def get_values(self):
""" same as values (but handles sparseness conversions) """
return self.as_matrix()
def get_dtype_counts(self):
""" Return the counts of dtypes in this object """
from pandas import Series
return Series(self._data.get_dtype_counts())
def get_ftype_counts(self):
""" Return the counts of ftypes in this object """
from pandas import Series
return Series(self._data.get_ftype_counts())
@property
def dtypes(self):
""" Return the dtypes in this object """
from pandas import Series
return Series(self._data.get_dtypes(), index=self._info_axis,
dtype=np.object_)
@property
def ftypes(self):
"""
Return the ftypes (indication of sparse/dense and dtype)
in this object.
"""
from pandas import Series
return Series(self._data.get_ftypes(), index=self._info_axis,
dtype=np.object_)
def as_blocks(self):
"""
Convert the frame to a dict of dtype -> Constructor Types that each has
a homogeneous dtype.
are presented in sorted order unless a specific list of columns is
provided.
NOTE: the dtypes of the blocks WILL BE PRESERVED HERE (unlike in
as_matrix)
Parameters
----------
columns : array-like
Specific column order
Returns
-------
values : a list of Object
"""
self._consolidate_inplace()
bd = {}
for b in self._data.blocks:
bd.setdefault(str(b.dtype), []).append(b)
result = {}
for dtype, blocks in bd.items():
# Must combine even after consolidation, because there may be
# sparse items which are never consolidated into one block.
combined = self._data.combine(blocks, copy=True)
result[dtype] = self._constructor(combined).__finalize__(self)
return result
@property
def blocks(self):
"Internal property, property synonym for as_blocks()"
return self.as_blocks()
def astype(self, dtype, copy=True, raise_on_error=True):
"""
Cast object to input numpy.dtype
Return a copy when copy = True (be really careful with this!)
Parameters
----------
dtype : numpy.dtype or Python type
raise_on_error : raise on invalid input
Returns
-------
casted : type of caller
"""
mgr = self._data.astype(
dtype=dtype, copy=copy, raise_on_error=raise_on_error)
return self._constructor(mgr).__finalize__(self)
def copy(self, deep=True):
"""
Make a copy of this object
Parameters
----------
deep : boolean or string, default True
Make a deep copy, i.e. also copy data
Returns
-------
copy : type of caller
"""
data = self._data.copy(deep=deep)
return self._constructor(data).__finalize__(self)
def convert_objects(self, convert_dates=True, convert_numeric=False,
convert_timedeltas=True, copy=True):
"""
Attempt to infer better dtype for object columns
Parameters
----------
convert_dates : if True, attempt to soft convert dates, if 'coerce',
force conversion (and non-convertibles get NaT)
convert_numeric : if True attempt to coerce to numbers (including
strings), non-convertibles get NaN
convert_timedeltas : if True, attempt to soft convert timedeltas, if 'coerce',
force conversion (and non-convertibles get NaT)
copy : Boolean, if True, return copy even if no copy is necessary
(e.g. no conversion was done), default is True.
It is meant for internal use, not to be confused with `inplace` kw.
Returns
-------
converted : asm as input object
"""
return self._constructor(
self._data.convert(convert_dates=convert_dates,
convert_numeric=convert_numeric,
convert_timedeltas=convert_timedeltas,
copy=copy)).__finalize__(self)
#----------------------------------------------------------------------
# Filling NA's
def fillna(self, value=None, method=None, axis=0, inplace=False,
limit=None, downcast=None):
"""
Fill NA/NaN values using the specified method
Parameters
----------
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
value : scalar, dict, Series, or DataFrame
Value to use to fill holes (e.g. 0), alternately a dict/Series/DataFrame of
values specifying which value to use for each index (for a Series) or
column (for a DataFrame). (values not in the dict/Series/DataFrame will not be
filled). This value cannot be a list.
axis : {0, 1}, default 0
* 0: fill column-by-column
* 1: fill row-by-row
inplace : boolean, default False
If True, fill in place. Note: this will modify any
other views on this object, (e.g. a no-copy slice for a column in a
DataFrame).
limit : int, default None
Maximum size gap to forward or backward fill
downcast : dict, default is None
a dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible)
See also
--------
reindex, asfreq
Returns
-------
filled : same type as caller
"""
if isinstance(value, (list, tuple)):
raise TypeError('"value" parameter must be a scalar or dict, but '
'you passed a "{0}"'.format(type(value).__name__))
self._consolidate_inplace()
axis = self._get_axis_number(axis)
method = com._clean_fill_method(method)
from pandas import DataFrame
if value is None:
if method is None:
raise ValueError('must specify a fill method or value')
if self._is_mixed_type and axis == 1:
if inplace:
raise NotImplementedError()
result = self.T.fillna(method=method, limit=limit).T
# need to downcast here because of all of the transposes
result._data = result._data.downcast()
return result
# > 3d
if self.ndim > 3:
raise NotImplementedError(
'Cannot fillna with a method for > 3dims'
)
# 3d
elif self.ndim == 3:
# fill in 2d chunks
result = dict([(col, s.fillna(method=method, value=value))
for col, s in compat.iteritems(self)])
return self._constructor.from_dict(result).__finalize__(self)
# 2d or less
method = com._clean_fill_method(method)
new_data = self._data.interpolate(method=method,
axis=axis,
limit=limit,
inplace=inplace,
coerce=True,
downcast=downcast)
else:
if method is not None:
raise ValueError('cannot specify both a fill method and value')
if len(self._get_axis(axis)) == 0:
return self
if self.ndim == 1:
if isinstance(value, (dict, com.ABCSeries)):
from pandas import Series
value = Series(value)
elif not com.is_list_like(value):
pass
else:
raise ValueError("invalid fill value with a %s" % type(value))
new_data = self._data.fillna(value=value,
limit=limit,
inplace=inplace,
downcast=downcast)
elif isinstance(value, (dict, com.ABCSeries)):
if axis == 1:
raise NotImplementedError('Currently only can fill '
'with dict/Series column '
'by column')
result = self if inplace else self.copy()
for k, v in compat.iteritems(value):
if k not in result:
continue
obj = result[k]
obj.fillna(v, limit=limit, inplace=True)
return result
elif not com.is_list_like(value):
new_data = self._data.fillna(value=value,
limit=limit,
inplace=inplace,
downcast=downcast)
elif isinstance(value, DataFrame) and self.ndim == 2:
new_data = self.where(self.notnull(), value)
else:
raise ValueError("invalid fill value with a %s" % type(value))
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def ffill(self, axis=0, inplace=False, limit=None, downcast=None):
"Synonym for NDFrame.fillna(method='ffill')"
return self.fillna(method='ffill', axis=axis, inplace=inplace,
limit=limit, downcast=downcast)
def bfill(self, axis=0, inplace=False, limit=None, downcast=None):
"Synonym for NDFrame.fillna(method='bfill')"
return self.fillna(method='bfill', axis=axis, inplace=inplace,
limit=limit, downcast=downcast)
def replace(self, to_replace=None, value=None, inplace=False, limit=None,
regex=False, method='pad', axis=None):
"""
Replace values given in 'to_replace' with 'value'.
Parameters
----------
to_replace : str, regex, list, dict, Series, numeric, or None
* str or regex:
- str: string exactly matching `to_replace` will be replaced
with `value`
- regex: regexs matching `to_replace` will be replaced with
`value`
* list of str, regex, or numeric:
- First, if `to_replace` and `value` are both lists, they
**must** be the same length.
- Second, if ``regex=True`` then all of the strings in **both**
lists will be interpreted as regexs otherwise they will match
directly. This doesn't matter much for `value` since there
are only a few possible substitution regexes you can use.
- str and regex rules apply as above.
* dict:
- Nested dictionaries, e.g., {'a': {'b': nan}}, are read as
follows: look in column 'a' for the value 'b' and replace it
with nan. You can nest regular expressions as well. Note that
column names (the top-level dictionary keys in a nested
dictionary) **cannot** be regular expressions.
- Keys map to column names and values map to substitution
values. You can treat this as a special case of passing two
lists except that you are specifying the column to search in.
* None:
- This means that the ``regex`` argument must be a string,
compiled regular expression, or list, dict, ndarray or Series
of such elements. If `value` is also ``None`` then this
**must** be a nested dictionary or ``Series``.
See the examples section for examples of each of these.
value : scalar, dict, list, str, regex, default None
Value to use to fill holes (e.g. 0), alternately a dict of values
specifying which value to use for each column (columns not in the
dict will not be filled). Regular expressions, strings and lists or
dicts of such objects are also allowed.
inplace : boolean, default False
If True, in place. Note: this will modify any
other views on this object (e.g. a column form a DataFrame).
Returns the caller if this is True.
limit : int, default None
Maximum size gap to forward or backward fill
regex : bool or same types as `to_replace`, default False
Whether to interpret `to_replace` and/or `value` as regular
expressions. If this is ``True`` then `to_replace` *must* be a
string. Otherwise, `to_replace` must be ``None`` because this
parameter will be interpreted as a regular expression or a list,
dict, or array of regular expressions.
method : string, optional, {'pad', 'ffill', 'bfill'}
The method to use when for replacement, when ``to_replace`` is a
``list``.
See also
--------
NDFrame.reindex
NDFrame.asfreq
NDFrame.fillna
Returns
-------
filled : NDFrame
Raises
------
AssertionError
* If `regex` is not a ``bool`` and `to_replace` is not ``None``.
TypeError
* If `to_replace` is a ``dict`` and `value` is not a ``list``,
``dict``, ``ndarray``, or ``Series``
* If `to_replace` is ``None`` and `regex` is not compilable into a
regular expression or is a list, dict, ndarray, or Series.
ValueError
* If `to_replace` and `value` are ``list`` s or ``ndarray`` s, but
they are not the same length.
Notes
-----
* Regex substitution is performed under the hood with ``re.sub``. The
rules for substitution for ``re.sub`` are the same.
* Regular expressions will only substitute on strings, meaning you
cannot provide, for example, a regular expression matching floating
point numbers and expect the columns in your frame that have a
numeric dtype to be matched. However, if those floating point numbers
*are* strings, then you can do this.
* This method has *a lot* of options. You are encouraged to experiment
and play with this method to gain intuition about how it works.
"""
if not com.is_bool(regex) and to_replace is not None:
raise AssertionError("'to_replace' must be 'None' if 'regex' is "
"not a bool")
if axis is not None:
from warnings import warn
warn('the "axis" argument is deprecated and will be removed in'
'v0.13; this argument has no effect')
self._consolidate_inplace()
if value is None:
# passing a single value that is scalar like
# when value is None (GH5319), for compat
if not is_dictlike(to_replace) and not is_dictlike(regex):
to_replace = [to_replace]
if isinstance(to_replace, (tuple, list)):
return _single_replace(self, to_replace, method, inplace,
limit)
if not is_dictlike(to_replace):
if not is_dictlike(regex):
raise TypeError('If "to_replace" and "value" are both None'
' and "to_replace" is not a list, then '
'regex must be a mapping')
to_replace = regex
regex = True
items = list(compat.iteritems(to_replace))
keys, values = zip(*items)
are_mappings = [is_dictlike(v) for v in values]
if any(are_mappings):
if not all(are_mappings):
raise TypeError("If a nested mapping is passed, all values"
" of the top level mapping must be "
"mappings")
# passed a nested dict/Series
to_rep_dict = {}
value_dict = {}
for k, v in items:
keys, values = zip(*v.items())
if set(keys) & set(values):
raise ValueError("Replacement not allowed with "
"overlapping keys and values")
to_rep_dict[k] = list(keys)
value_dict[k] = list(values)
to_replace, value = to_rep_dict, value_dict
else:
to_replace, value = keys, values
return self.replace(to_replace, value, inplace=inplace,
limit=limit, regex=regex)
else:
# need a non-zero len on all axes
for a in self._AXIS_ORDERS:
if not len(self._get_axis(a)):
return self
new_data = self._data
if is_dictlike(to_replace):
if is_dictlike(value): # {'A' : NA} -> {'A' : 0}
res = self if inplace else self.copy()
for c, src in compat.iteritems(to_replace):
if c in value and c in self:
res[c] = res[c].replace(to_replace=src,
value=value[c],
inplace=False,
regex=regex)
return None if inplace else res
# {'A': NA} -> 0
elif not com.is_list_like(value):
for k, src in compat.iteritems(to_replace):
if k in self:
new_data = new_data.replace(to_replace=src,
value=value,
filter=[k],
inplace=inplace,
regex=regex)
else:
raise TypeError('value argument must be scalar, dict, or '
'Series')
elif com.is_list_like(to_replace): # [NA, ''] -> [0, 'missing']
if com.is_list_like(value):
if len(to_replace) != len(value):
raise ValueError('Replacement lists must match '
'in length. Expecting %d got %d ' %
(len(to_replace), len(value)))
new_data = self._data.replace_list(src_list=to_replace,
dest_list=value,
inplace=inplace,
regex=regex)
else: # [NA, ''] -> 0
new_data = self._data.replace(to_replace=to_replace,
value=value,
inplace=inplace,
regex=regex)
elif to_replace is None:
if not (com.is_re_compilable(regex) or
com.is_list_like(regex) or
is_dictlike(regex)):
raise TypeError("'regex' must be a string or a compiled "
"regular expression or a list or dict of "
"strings or regular expressions, you "
"passed a"
" {0!r}".format(type(regex).__name__))
return self.replace(regex, value, inplace=inplace, limit=limit,
regex=True)
else:
# dest iterable dict-like
if is_dictlike(value): # NA -> {'A' : 0, 'B' : -1}
new_data = self._data
for k, v in compat.iteritems(value):
if k in self:
new_data = new_data.replace(to_replace=to_replace,
value=v,
filter=[k],
inplace=inplace,
regex=regex)
elif not com.is_list_like(value): # NA -> 0
new_data = self._data.replace(to_replace=to_replace, value=value,
inplace=inplace, regex=regex)
else:
msg = ('Invalid "to_replace" type: '
'{0!r}').format(type(to_replace).__name__)
raise TypeError(msg) # pragma: no cover
new_data = new_data.convert(copy=not inplace, convert_numeric=False)
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
downcast=None, **kwargs):
"""
Interpolate values according to different methods.
Parameters
----------
method : {'linear', 'time', 'index', 'values', 'nearest', 'zero',
'slinear', 'quadratic', 'cubic', 'barycentric', 'krogh',
'polynomial', 'spline' 'piecewise_polynomial', 'pchip'}
* 'linear': ignore the index and treat the values as equally
spaced. default
* 'time': interpolation works on daily and higher resolution
data to interpolate given length of interval
* 'index', 'values': use the actual numerical values of the index
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'barycentric', 'polynomial' is passed to
`scipy.interpolate.interp1d` with the order given both
'polynomial' and 'spline' requre that you also specify and order
(int) e.g. df.interpolate(method='polynomial', order=4)
* 'krogh', 'piecewise_polynomial', 'spline', and 'pchip' are all
wrappers around the scipy interpolation methods of similar
names. See the scipy documentation for more on their behavior:
http://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation
http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html
axis : {0, 1}, default 0
* 0: fill column-by-column
* 1: fill row-by-row
limit : int, default None.
Maximum number of consecutive NaNs to fill.
inplace : bool, default False
Update the NDFrame in place if possible.
downcast : optional, 'infer' or None, defaults to None
Downcast dtypes if possible.
Returns
-------
Series or DataFrame of same shape interpolated at the NaNs
See Also
--------
reindex, replace, fillna
Examples
--------
# Filling in NaNs:
>>> s = pd.Series([0, 1, np.nan, 3])
>>> s.interpolate()
0 0
1 1
2 2
3 3
dtype: float64
"""
if self.ndim > 2:
raise NotImplementedError("Interpolate has not been implemented "
"on Panel and Panel 4D objects.")
if axis == 0:
ax = self._info_axis_name
elif axis == 1:
self = self.T
ax = 1
ax = self._get_axis_number(ax)
if self.ndim == 2:
alt_ax = 1 - ax
else:
alt_ax = ax
if isinstance(self.index, MultiIndex) and method != 'linear':
raise ValueError("Only `method=linear` interpolation is supported "
"on MultiIndexes.")
if self._data.get_dtype_counts().get('object') == len(self.T):
raise TypeError("Cannot interpolate with all NaNs.")
# create/use the index
if method == 'linear':
index = np.arange(len(self._get_axis(alt_ax))) # prior default
else:
index = self._get_axis(alt_ax)
if pd.isnull(index).any():
raise NotImplementedError("Interpolation with NaNs in the index "
"has not been implemented. Try filling "
"those NaNs before interpolating.")
new_data = self._data.interpolate(method=method,
axis=ax,
index=index,
values=self,
limit=limit,
inplace=inplace,
downcast=downcast,
**kwargs)
if inplace:
if axis == 1:
self._update_inplace(new_data)
self = self.T
else:
self._update_inplace(new_data)
else:
res = self._constructor(new_data).__finalize__(self)
if axis == 1:
res = res.T
return res
#----------------------------------------------------------------------
# Action Methods
def isnull(self):
"""
Return a boolean same-sized object indicating if the values are null
See also
--------
notnull : boolean inverse of isnull
"""
return isnull(self).__finalize__(self)
def notnull(self):
"""Return a boolean same-sized object indicating if the values are
not null
See also
--------
isnull : boolean inverse of notnull
"""
return notnull(self).__finalize__(self)
def clip(self, lower=None, upper=None, out=None):
"""
Trim values at input threshold(s)
Parameters
----------
lower : float, default None
upper : float, default None
Returns
-------
clipped : Series
"""
if out is not None: # pragma: no cover
raise Exception('out argument is not supported yet')
# GH 2747 (arguments were reversed)
if lower is not None and upper is not None:
lower, upper = min(lower, upper), max(lower, upper)
result = self
if lower is not None:
result = result.clip_lower(lower)
if upper is not None:
result = result.clip_upper(upper)
return result
def clip_upper(self, threshold):
"""
Return copy of input with values above given value truncated
See also
--------
clip
Returns
-------
clipped : same type as input
"""
if isnull(threshold):
raise ValueError("Cannot use an NA value as a clip threshold")
return self.where((self <= threshold) | isnull(self), threshold)
def clip_lower(self, threshold):
"""
Return copy of the input with values below given value truncated
See also
--------
clip
Returns
-------
clipped : same type as input
"""
if isnull(threshold):
raise ValueError("Cannot use an NA value as a clip threshold")
return self.where((self >= threshold) | isnull(self), threshold)
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
group_keys=True, squeeze=False):
"""
Group series using mapper (dict or key function, apply given function
to group, return result as series) or by a series of columns
Parameters
----------
by : mapping function / list of functions, dict, Series, or tuple /
list of column names.
Called on each element of the object index to determine the groups.
If a dict or Series is passed, the Series or dict VALUES will be
used to determine the groups
axis : int, default 0
level : int, level name, or sequence of such, default None
If the axis is a MultiIndex (hierarchical), group by a particular
level or levels
as_index : boolean, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output
sort : boolean, default True
Sort group keys. Get better performance by turning this off
group_keys : boolean, default True
When calling apply, add group keys to index to identify pieces
squeeze : boolean, default False
reduce the dimensionaility of the return type if possible,
otherwise return a consistent type
Examples
--------
# DataFrame result
>>> data.groupby(func, axis=0).mean()
# DataFrame result
>>> data.groupby(['col1', 'col2'])['col3'].mean()
# DataFrame with hierarchical index
>>> data.groupby(['col1', 'col2']).mean()
Returns
-------
GroupBy object
"""
from pandas.core.groupby import groupby
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
return groupby(self, by=by, axis=axis, level=level, as_index=as_index,
sort=sort, group_keys=group_keys, squeeze=squeeze)
def asfreq(self, freq, method=None, how=None, normalize=False):
"""
Convert all TimeSeries inside to specified frequency using DateOffset
objects. Optionally provide fill method to pad/backfill missing values.
Parameters
----------
freq : DateOffset object, or string
method : {'backfill', 'bfill', 'pad', 'ffill', None}
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill method
how : {'start', 'end'}, default end
For PeriodIndex only, see PeriodIndex.asfreq
normalize : bool, default False
Whether to reset output index to midnight
Returns
-------
converted : type of caller
"""
from pandas.tseries.resample import asfreq
return asfreq(self, freq, method=method, how=how,
normalize=normalize)
def at_time(self, time, asof=False):
"""
Select values at particular time of day (e.g. 9:30AM)
Parameters
----------
time : datetime.time or string
Returns
-------
values_at_time : type of caller
"""
try:
indexer = self.index.indexer_at_time(time, asof=asof)
return self.take(indexer, convert=False)
except AttributeError:
raise TypeError('Index must be DatetimeIndex')
def between_time(self, start_time, end_time, include_start=True,
include_end=True):
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM)
Parameters
----------
start_time : datetime.time or string
end_time : datetime.time or string
include_start : boolean, default True
include_end : boolean, default True
Returns
-------
values_between_time : type of caller
"""
try:
indexer = self.index.indexer_between_time(
start_time, end_time, include_start=include_start,
include_end=include_end)
return self.take(indexer, convert=False)
except AttributeError:
raise TypeError('Index must be DatetimeIndex')
def resample(self, rule, how=None, axis=0, fill_method=None,
closed=None, label=None, convention='start',
kind=None, loffset=None, limit=None, base=0):
"""
Convenience method for frequency conversion and resampling of regular
time-series data.
Parameters
----------
rule : string
the offset string or object representing target conversion
how : string
method for down- or re-sampling, default to 'mean' for
downsampling
axis : int, optional, default 0
fill_method : string, default None
fill_method for upsampling
closed : {'right', 'left'}
Which side of bin interval is closed
label : {'right', 'left'}
Which bin edge label to label bucket with
convention : {'start', 'end', 's', 'e'}
kind : "period"/"timestamp"
loffset : timedelta
Adjust the resampled time labels
limit : int, default None
Maximum size gap to when reindexing with fill_method
base : int, default 0
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0
"""
from pandas.tseries.resample import TimeGrouper
axis = self._get_axis_number(axis)
sampler = TimeGrouper(rule, label=label, closed=closed, how=how,
axis=axis, kind=kind, loffset=loffset,
fill_method=fill_method, convention=convention,
limit=limit, base=base)
return sampler.resample(self).__finalize__(self)
def first(self, offset):
"""
Convenience method for subsetting initial periods of time series data
based on a date offset
Parameters
----------
offset : string, DateOffset, dateutil.relativedelta
Examples
--------
ts.last('10D') -> First 10 days
Returns
-------
subset : type of caller
"""
from pandas.tseries.frequencies import to_offset
if not isinstance(self.index, DatetimeIndex):
raise NotImplementedError
if len(self.index) == 0:
return self
offset = to_offset(offset)
end_date = end = self.index[0] + offset
# Tick-like, e.g. 3 weeks
if not offset.isAnchored() and hasattr(offset, '_inc'):
if end_date in self.index:
end = self.index.searchsorted(end_date, side='left')
return self.ix[:end]
def last(self, offset):
"""
Convenience method for subsetting final periods of time series data
based on a date offset
Parameters
----------
offset : string, DateOffset, dateutil.relativedelta
Examples
--------
ts.last('5M') -> Last 5 months
Returns
-------
subset : type of caller
"""
from pandas.tseries.frequencies import to_offset
if not isinstance(self.index, DatetimeIndex):
raise NotImplementedError
if len(self.index) == 0:
return self
offset = to_offset(offset)
start_date = start = self.index[-1] - offset
start = self.index.searchsorted(start_date, side='right')
return self.ix[start:]
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0):
"""
Align two object on their axes with the
specified join method for each axis Index
Parameters
----------
other : DataFrame or Series
join : {'outer', 'inner', 'left', 'right'}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None)
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level
copy : boolean, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value
method : str, default None
limit : int, default None
fill_axis : {0, 1}, default 0
Filling axis, method and limit
Returns
-------
(left, right) : (type of input, type of other)
Aligned objects
"""
from pandas import DataFrame, Series
method = com._clean_fill_method(method)
if axis is not None:
axis = self._get_axis_number(axis)
if isinstance(other, DataFrame):
return self._align_frame(other, join=join, axis=axis, level=level,
copy=copy, fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis)
elif isinstance(other, Series):
return self._align_series(other, join=join, axis=axis, level=level,
copy=copy, fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis)
else: # pragma: no cover
raise TypeError('unsupported type: %s' % type(other))
def _align_frame(self, other, join='outer', axis=None, level=None,
copy=True, fill_value=np.nan, method=None, limit=None,
fill_axis=0):
# defaults
join_index, join_columns = None, None
ilidx, iridx = None, None
clidx, cridx = None, None
if axis is None or axis == 0:
if not self.index.equals(other.index):
join_index, ilidx, iridx = \
self.index.join(other.index, how=join, level=level,
return_indexers=True)
if axis is None or axis == 1:
if not self.columns.equals(other.columns):
join_columns, clidx, cridx = \
self.columns.join(other.columns, how=join, level=level,
return_indexers=True)
left = self._reindex_with_indexers({0: [join_index, ilidx],
1: [join_columns, clidx]},
copy=copy, fill_value=fill_value,
allow_dups=True)
right = other._reindex_with_indexers({0: [join_index, iridx],
1: [join_columns, cridx]},
copy=copy, fill_value=fill_value,
allow_dups=True)
if method is not None:
left = left.fillna(axis=fill_axis, method=method, limit=limit)
right = right.fillna(axis=fill_axis, method=method, limit=limit)
return left.__finalize__(self), right.__finalize__(other)
def _align_series(self, other, join='outer', axis=None, level=None,
copy=True, fill_value=None, method=None, limit=None,
fill_axis=0):
from pandas import DataFrame
# series/series compat
if isinstance(self, ABCSeries) and isinstance(other, ABCSeries):
if axis:
raise ValueError('cannot align series to a series other than '
'axis 0')
# equal
if self.index.equals(other.index):
join_index, lidx, ridx = None, None, None
else:
join_index, lidx, ridx = self.index.join(other.index, how=join,
level=level,
return_indexers=True)
left_result = self._reindex_indexer(join_index, lidx, copy)
right_result = other._reindex_indexer(join_index, ridx, copy)
else:
# for join compat if we have an unnamed index, but
# are specifying a level join
other_index = other.index
if level is not None and other.index.name is None:
other_index = other_index.set_names([level])
# one has > 1 ndim
fdata = self._data
if axis == 0:
join_index = self.index
lidx, ridx = None, None
if not self.index.equals(other_index):
join_index, lidx, ridx = self.index.join(
other_index, how=join, return_indexers=True)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=1)
elif axis == 1:
join_index = self.columns
lidx, ridx = None, None
if not self.columns.equals(other_index):
join_index, lidx, ridx = \
self.columns.join(other_index, how=join,
return_indexers=True)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=0)
else:
raise ValueError('Must specify axis=0 or 1')
if copy and fdata is self._data:
fdata = fdata.copy()
left_result = DataFrame(fdata)
if ridx is None:
right_result = other
else:
right_result = other.reindex(join_index, level=level)
# fill
fill_na = notnull(fill_value) or (method is not None)
if fill_na:
return (left_result.fillna(fill_value, method=method, limit=limit,
axis=fill_axis),
right_result.fillna(fill_value, method=method,
limit=limit))
else:
return (left_result.__finalize__(self),
right_result.__finalize__(other))
def where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
try_cast=False, raise_on_error=True):
"""
Return an object of same shape as self and whose corresponding
entries are from self where cond is True and otherwise are from other.
Parameters
----------
cond : boolean NDFrame or array
other : scalar or NDFrame
inplace : boolean, default False
Whether to perform the operation in place on the data
axis : alignment axis if needed, default None
level : alignment level if needed, default None
try_cast : boolean, default False
try to cast the result back to the input type (if possible),
raise_on_error : boolean, default True
Whether to raise on invalid data types (e.g. trying to where on
strings)
Returns
-------
wh : same type as caller
"""
if isinstance(cond, NDFrame):
cond = cond.reindex(**self._construct_axes_dict())
else:
if not hasattr(cond, 'shape'):
raise ValueError('where requires an ndarray like object for '
'its condition')
if cond.shape != self.shape:
raise ValueError(
'Array conditional must be same shape as self')
cond = self._constructor(cond, **self._construct_axes_dict())
if inplace:
cond = -(cond.fillna(True).astype(bool))
else:
cond = cond.fillna(False).astype(bool)
# try to align
try_quick = True
if hasattr(other, 'align'):
# align with me
if other.ndim <= self.ndim:
_, other = self.align(other, join='left',
axis=axis, level=level,
fill_value=np.nan)
# if we are NOT aligned, raise as we cannot where index
if (axis is None and
not all([other._get_axis(i).equals(ax)
for i, ax in enumerate(self.axes)])):
raise InvalidIndexError
# slice me out of the other
else:
raise NotImplemented(
"cannot align with a higher dimensional NDFrame"
)
elif is_list_like(other):
if self.ndim == 1:
# try to set the same dtype as ourselves
new_other = np.array(other, dtype=self.dtype)
if not (new_other == np.array(other)).all():
other = np.array(other)
# we can't use our existing dtype
# because of incompatibilities
try_quick = False
else:
other = new_other
else:
other = np.array(other)
if isinstance(other, np.ndarray):
if other.shape != self.shape:
if self.ndim == 1:
icond = cond.values
# GH 2745 / GH 4192
# treat like a scalar
if len(other) == 1:
other = np.array(other[0])
# GH 3235
# match True cond to other
elif len(cond[icond]) == len(other):
# try to not change dtype at first (if try_quick)
if try_quick:
try:
new_other = _values_from_object(self).copy()
new_other[icond] = other
other = new_other
except:
try_quick = False
# let's create a new (if we failed at the above
# or not try_quick
if not try_quick:
dtype, fill_value = _maybe_promote(other.dtype)
new_other = np.empty(len(icond), dtype=dtype)
new_other.fill(fill_value)
com._maybe_upcast_putmask(new_other, icond, other)
other = new_other
else:
raise ValueError(
'Length of replacements must equal series length')
else:
raise ValueError('other must be the same shape as self '
'when an ndarray')
# we are the same shape, so create an actual object for alignment
else:
other = self._constructor(other, **self._construct_axes_dict())
if inplace:
# we may have different type blocks come out of putmask, so
# reconstruct the block manager
self._check_inplace_setting(other)
new_data = self._data.putmask(mask=cond, new=other, align=axis is None,
inplace=True)
self._update_inplace(new_data)
else:
new_data = self._data.where(other=other, cond=cond, align=axis is None,
raise_on_error=raise_on_error,
try_cast=try_cast)
return self._constructor(new_data).__finalize__(self)
def mask(self, cond):
"""
Returns copy whose values are replaced with nan if the
inverted condition is True
Parameters
----------
cond : boolean NDFrame or array
Returns
-------
wh: same as input
"""
return self.where(~cond, np.nan)
def shift(self, periods=1, freq=None, axis=0, **kwds):
"""
Shift index by desired number of periods with an optional time freq
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
freq : DateOffset, timedelta, or time rule string, optional
Increment to use from datetools module or time rule (e.g. 'EOM').
See Notes.
Notes
-----
If freq is specified then the index values are shifted but the data
is not realigned. That is, use freq if you would like to extend the
index when shifting and preserve the original data.
Returns
-------
shifted : same type as caller
"""
if periods == 0:
return self
block_axis = self._get_block_manager_axis(axis)
if freq is None and not len(kwds):
new_data = self._data.shift(periods=periods, axis=block_axis)
else:
return self.tshift(periods, freq, **kwds)
return self._constructor(new_data).__finalize__(self)
def slice_shift(self, periods=1, axis=0, **kwds):
"""
Equivalent to `shift` without copying data. The shifted data will
not include the dropped periods and the shifted axis will be smaller
than the original.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Notes
-----
While the `slice_shift` is faster than `shift`, you may pay for it
later during alignment.
Returns
-------
shifted : same type as caller
"""
if periods == 0:
return self
if periods > 0:
vslicer = slice(None, -periods)
islicer = slice(periods, None)
else:
vslicer = slice(-periods, None)
islicer = slice(None, periods)
new_obj = self._slice(vslicer, axis=axis)
shifted_axis = self._get_axis(axis)[islicer]
new_obj.set_axis(axis, shifted_axis)
return new_obj.__finalize__(self)
def tshift(self, periods=1, freq=None, axis=0, **kwds):
"""
Shift the time index, using the index's frequency if available
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
freq : DateOffset, timedelta, or time rule string, default None
Increment to use from datetools module or time rule (e.g. 'EOM')
axis : int or basestring
Corresponds to the axis that contains the Index
Notes
-----
If freq is not specified then tries to use the freq or inferred_freq
attributes of the index. If neither of those attributes exist, a
ValueError is thrown
Returns
-------
shifted : NDFrame
"""
from pandas.core.datetools import _resolve_offset
index = self._get_axis(axis)
if freq is None:
freq = getattr(index, 'freq', None)
if freq is None:
freq = getattr(index, 'inferred_freq', None)
if freq is None:
msg = 'Freq was not given and was not set in the index'
raise ValueError(msg)
if periods == 0:
return self
offset = _resolve_offset(freq, kwds)
if isinstance(offset, string_types):
offset = datetools.to_offset(offset)
block_axis = self._get_block_manager_axis(axis)
if isinstance(index, PeriodIndex):
orig_offset = datetools.to_offset(index.freq)
if offset == orig_offset:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods)
else:
msg = ('Given freq %s does not match PeriodIndex freq %s' %
(offset.rule_code, orig_offset.rule_code))
raise ValueError(msg)
else:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods, offset)
return self._constructor(new_data).__finalize__(self)
def truncate(self, before=None, after=None, axis=None, copy=True):
"""Truncates a sorted NDFrame before and/or after some particular
dates.
Parameters
----------
before : date
Truncate before date
after : date
Truncate after date
axis : the truncation axis, defaults to the stat axis
copy : boolean, default is True,
return a copy of the truncated section
Returns
-------
truncated : type of caller
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
# if we have a date index, convert to dates, otherwise
# treat like a slice
if ax.is_all_dates:
from pandas.tseries.tools import to_datetime
before = to_datetime(before)
after = to_datetime(after)
if before is not None and after is not None:
if before > after:
raise ValueError('Truncate: %s must be after %s' %
(after, before))
slicer = [slice(None, None)] * self._AXIS_LEN
slicer[axis] = slice(before, after)
result = self.ix[tuple(slicer)]
if isinstance(ax, MultiIndex):
setattr(result, self._get_axis_name(axis),
ax.truncate(before, after))
if copy:
result = result.copy()
return result
def tz_convert(self, tz, axis=0, level=None, copy=True):
"""
Convert the axis to target time zone. If it is time zone naive, it
will be localized to the passed time zone.
Parameters
----------
tz : string or pytz.timezone object
axis : the axis to convert
level : int, str, default None
If axis ia a MultiIndex, convert a specific level. Otherwise
must be None
copy : boolean, default True
Also make a copy of the underlying data
Returns
-------
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_convert(ax, tz):
if not hasattr(ax, 'tz_convert'):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError('%s is not a valid DatetimeIndex or PeriodIndex' %
ax_name)
else:
ax = DatetimeIndex([],tz=tz)
else:
ax = ax.tz_convert(tz)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_convert(ax.levels[level], tz)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError("The level {0} is not valid".format(level))
ax = _tz_convert(ax, tz)
result = self._constructor(self._data, copy=copy)
result.set_axis(axis,ax)
return result.__finalize__(self)
@deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',
mapping={True: 'infer', False: 'raise'})
def tz_localize(self, tz, axis=0, level=None, copy=True,
ambiguous='raise'):
"""
Localize tz-naive TimeSeries to target time zone
Parameters
----------
tz : string or pytz.timezone object
axis : the axis to localize
level : int, str, default None
If axis ia a MultiIndex, localize a specific level. Otherwise
must be None
copy : boolean, default True
Also make a copy of the underlying data
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
- 'infer' will attempt to infer fall dst-transition hours based on order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous times
infer_dst : boolean, default False (DEPRECATED)
Attempt to infer fall dst-transition hours based on order
Returns
-------
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_localize(ax, tz, ambiguous):
if not hasattr(ax, 'tz_localize'):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError('%s is not a valid DatetimeIndex or PeriodIndex' %
ax_name)
else:
ax = DatetimeIndex([],tz=tz)
else:
ax = ax.tz_localize(tz, ambiguous=ambiguous)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_localize(ax.levels[level], tz, ambiguous)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError("The level {0} is not valid".format(level))
ax = _tz_localize(ax, tz, ambiguous)
result = self._constructor(self._data, copy=copy)
result.set_axis(axis,ax)
return result.__finalize__(self)
#----------------------------------------------------------------------
# Numeric Methods
def abs(self):
"""
Return an object with absolute value taken. Only applicable to objects
that are all numeric
Returns
-------
abs: type of caller
"""
return np.abs(self)
_shared_docs['describe'] = """
Generate various summary statistics, excluding NaN values.
Parameters
----------
percentile_width : float, deprecated
The ``percentile_width`` argument will be removed in a future
version. Use ``percentiles`` instead.
width of the desired uncertainty interval, default is 50,
which corresponds to lower=25, upper=75
percentiles : array-like, optional
The percentiles to include in the output. Should all
be in the interval [0, 1]. By default `percentiles` is
[.25, .5, .75], returning the 25th, 50th, and 75th percentiles.
include, exclude : list-like, 'all', or None (default)
Specify the form of the returned result. Either:
- None to both (default). The result will include only numeric-typed
columns or, if none are, only categorical columns.
- A list of dtypes or strings to be included/excluded.
To select all numeric types use numpy numpy.number. To select
categorical objects use type object. See also the select_dtypes
documentation. eg. df.describe(include=['O'])
- If include is the string 'all', the output column-set will
match the input one.
Returns
-------
summary: %(klass)s of summary statistics
Notes
-----
The output DataFrame index depends on the requested dtypes:
For numeric dtypes, it will include: count, mean, std, min,
max, and lower, 50, and upper percentiles.
For object dtypes (e.g. timestamps or strings), the index
will include the count, unique, most common, and frequency of the
most common. Timestamps also include the first and last items.
For mixed dtypes, the index will be the union of the corresponding
output types. Non-applicable entries will be filled with NaN.
Note that mixed-dtype outputs can only be returned from mixed-dtype
inputs and appropriate use of the include/exclude arguments.
If multiple values have the highest count, then the
`count` and `most common` pair will be arbitrarily chosen from
among those with the highest count.
The include, exclude arguments are ignored for Series.
See also
--------
DataFrame.select_dtypes
"""
@Appender(_shared_docs['describe'] % _shared_doc_kwargs)
def describe(self, percentile_width=None, percentiles=None, include=None, exclude=None ):
if self.ndim >= 3:
msg = "describe is not implemented on on Panel or PanelND objects."
raise NotImplementedError(msg)
if percentile_width is not None and percentiles is not None:
msg = "Cannot specify both 'percentile_width' and 'percentiles.'"
raise ValueError(msg)
if percentiles is not None:
# get them all to be in [0, 1]
percentiles = np.asarray(percentiles)
if (percentiles > 1).any():
percentiles = percentiles / 100.0
msg = ("percentiles should all be in the interval [0, 1]. "
"Try {0} instead.")
raise ValueError(msg.format(list(percentiles)))
else:
# only warn if they change the default
if percentile_width is not None:
do_warn = True
else:
do_warn = False
percentile_width = percentile_width or 50
lb = .5 * (1. - percentile_width / 100.)
ub = 1. - lb
percentiles = np.array([lb, 0.5, ub])
if do_warn:
msg = ("The `percentile_width` keyword is deprecated. "
"Use percentiles={0} instead".format(list(percentiles)))
warnings.warn(msg, FutureWarning)
# median should always be included
if (percentiles != 0.5).all(): # median isn't included
lh = percentiles[percentiles < .5]
uh = percentiles[percentiles > .5]
percentiles = np.hstack([lh, 0.5, uh])
def pretty_name(x):
x *= 100
if x == int(x):
return '%.0f%%' % x
else:
return '%.1f%%' % x
def describe_numeric_1d(series, percentiles):
stat_index = (['count', 'mean', 'std', 'min'] +
[pretty_name(x) for x in percentiles] + ['max'])
d = ([series.count(), series.mean(), series.std(), series.min()] +
[series.quantile(x) for x in percentiles] + [series.max()])
return pd.Series(d, index=stat_index, name=series.name)
def describe_categorical_1d(data):
names = ['count', 'unique']
objcounts = data.value_counts()
result = [data.count(), len(objcounts[objcounts!=0])]
if result[1] > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
if data.dtype == object or com.is_categorical_dtype(data.dtype):
names += ['top', 'freq']
result += [top, freq]
elif com.is_datetime64_dtype(data):
asint = data.dropna().values.view('i8')
names += ['top', 'freq', 'first', 'last']
result += [lib.Timestamp(top), freq,
lib.Timestamp(asint.min()),
lib.Timestamp(asint.max())]
return pd.Series(result, index=names, name=data.name)
def describe_1d(data, percentiles):
if com.is_numeric_dtype(data):
return describe_numeric_1d(data, percentiles)
elif com.is_timedelta64_dtype(data):
return describe_numeric_1d(data, percentiles)
else:
return describe_categorical_1d(data)
if self.ndim == 1:
return describe_1d(self, percentiles)
elif (include is None) and (exclude is None):
if len(self._get_numeric_data()._info_axis) > 0:
# when some numerics are found, keep only numerics
data = self.select_dtypes(include=[np.number, np.bool])
else:
data = self
elif include == 'all':
if exclude != None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
data = self
else:
data = self.select_dtypes(include=include, exclude=exclude)
ldesc = [describe_1d(s, percentiles) for _, s in data.iteritems()]
# set a convenient order for rows
names = []
ldesc_indexes = sorted([x.index for x in ldesc], key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
d = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)
return d
_shared_docs['pct_change'] = """
Percent change over given number of periods.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change
fill_method : str, default 'pad'
How to handle NAs before computing percent changes
limit : int, default None
The number of consecutive NAs to fill before stopping
freq : DateOffset, timedelta, or offset alias string, optional
Increment to use from time series API (e.g. 'M' or BDay())
Returns
-------
chg : %(klass)s
Notes
-----
By default, the percentage change is calculated along the stat
axis: 0, or ``Index``, for ``DataFrame`` and 1, or ``minor`` for
``Panel``. You can change this with the ``axis`` keyword argument.
"""
@Appender(_shared_docs['pct_change'] % _shared_doc_kwargs)
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
**kwds):
# TODO: Not sure if above is correct - need someone to confirm.
axis = self._get_axis_number(kwds.pop('axis', self._stat_axis_name))
if fill_method is None:
data = self
else:
data = self.fillna(method=fill_method, limit=limit)
rs = (data.div(data.shift(periods=periods, freq=freq,
axis=axis, **kwds)) - 1)
if freq is None:
mask = com.isnull(_values_from_object(self))
np.putmask(rs.values, mask, np.nan)
return rs
def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwds):
grouped = self.groupby(level=level, axis=axis)
if hasattr(grouped, name) and skipna:
return getattr(grouped, name)(**kwds)
axis = self._get_axis_number(axis)
method = getattr(type(self), name)
applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwds)
return grouped.aggregate(applyf)
@classmethod
def _add_numeric_operations(cls):
""" add the operations to the cls; evaluate the doc strings again """
axis_descr = "{%s}" % ', '.join([
"{0} ({1})".format(a, i) for i, a in enumerate(cls._AXIS_ORDERS)
])
name = (cls._constructor_sliced.__name__
if cls._AXIS_LEN > 1 else 'scalar')
_num_doc = """
%(desc)s
Parameters
----------
axis : """ + axis_descr + """
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a """ + name + """
numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
Returns
-------
%(outname)s : """ + name + " or " + cls.__name__ + " (if level specified)\n"
_bool_doc = """
%(desc)s
Parameters
----------
axis : """ + axis_descr + """
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a """ + name + """
bool_only : boolean, default None
Include only boolean data. If None, will attempt to use everything,
then use only boolean data
Returns
-------
%(outname)s : """ + name + " or " + cls.__name__ + " (if level specified)\n"
_cnum_doc = """
Parameters
----------
axis : """ + axis_descr + """
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
Returns
-------
%(outname)s : """ + name + "\n"
def _make_stat_function(name, desc, f):
@Substitution(outname=name, desc=desc)
@Appender(_num_doc)
def stat_func(self, axis=None, skipna=None, level=None,
numeric_only=None, **kwargs):
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(name, axis=axis, level=level,
skipna=skipna)
return self._reduce(f, name, axis=axis,
skipna=skipna, numeric_only=numeric_only)
stat_func.__name__ = name
return stat_func
cls.sum = _make_stat_function(
'sum', 'Return the sum of the values for the requested axis',
nanops.nansum)
cls.mean = _make_stat_function(
'mean', 'Return the mean of the values for the requested axis',
nanops.nanmean)
cls.skew = _make_stat_function(
'skew',
'Return unbiased skew over requested axis\nNormalized by N-1',
nanops.nanskew)
cls.kurt = _make_stat_function(
'kurt',
'Return unbiased kurtosis over requested axis\nNormalized by N-1',
nanops.nankurt)
cls.kurtosis = cls.kurt
cls.prod = _make_stat_function(
'prod', 'Return the product of the values for the requested axis',
nanops.nanprod)
cls.product = cls.prod
cls.median = _make_stat_function(
'median', 'Return the median of the values for the requested axis',
nanops.nanmedian)
cls.max = _make_stat_function('max', """
This method returns the maximum of the values in the object. If you
want the *index* of the maximum, use ``idxmax``. This is the
equivalent of the ``numpy.ndarray`` method ``argmax``.""", nanops.nanmax)
cls.min = _make_stat_function('min', """
This method returns the minimum of the values in the object. If you
want the *index* of the minimum, use ``idxmin``. This is the
equivalent of the ``numpy.ndarray`` method ``argmin``.""", nanops.nanmin)
def _make_logical_function(name, desc, f):
@Substitution(outname=name, desc=desc)
@Appender(_bool_doc)
def logical_func(self, axis=None, bool_only=None, skipna=None,
level=None, **kwargs):
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
if bool_only is not None:
raise NotImplementedError(
"Option bool_only is not implemented with option "
"level.")
return self._agg_by_level(name, axis=axis, level=level,
skipna=skipna)
return self._reduce(f, axis=axis, skipna=skipna,
numeric_only=bool_only, filter_type='bool',
name=name)
logical_func.__name__ = name
return logical_func
cls.any = _make_logical_function(
'any', 'Return whether any element is True over requested axis',
nanops.nanany)
cls.all = _make_logical_function(
'all', 'Return whether all elements are True over requested axis',
nanops.nanall)
@Substitution(outname='mad',
desc="Return the mean absolute deviation of the values "
"for the requested axis")
@Appender(_num_doc)
def mad(self, axis=None, skipna=None, level=None, **kwargs):
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level('mad', axis=axis, level=level,
skipna=skipna)
data = self._get_numeric_data()
if axis == 0:
demeaned = data - data.mean(axis=0)
else:
demeaned = data.sub(data.mean(axis=1), axis=0)
return np.abs(demeaned).mean(axis=axis, skipna=skipna)
cls.mad = mad
def _make_stat_function_ddof(name, desc, f):
@Substitution(outname=name, desc=desc)
@Appender(_num_doc)
def stat_func(self, axis=None, skipna=None, level=None, ddof=1,
**kwargs):
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(name, axis=axis, level=level,
skipna=skipna, ddof=ddof)
return self._reduce(f, name, axis=axis,
skipna=skipna, ddof=ddof)
stat_func.__name__ = name
return stat_func
cls.sem = _make_stat_function_ddof(
'sem',
"Return unbiased standard error of the mean over "
"requested axis.\n\nNormalized by N-1 by default. "
"This can be changed using the ddof argument",
nanops.nansem)
cls.var = _make_stat_function_ddof(
'var',
"Return unbiased variance over requested "
"axis.\n\nNormalized by N-1 by default. "
"This can be changed using the ddof argument",
nanops.nanvar)
cls.std = _make_stat_function_ddof(
'std',
"Return unbiased standard deviation over requested "
"axis.\n\nNormalized by N-1 by default. "
"This can be changed using the ddof argument",
nanops.nanstd)
@Substitution(outname='compounded',
desc="Return the compound percentage of the values for "
"the requested axis")
@Appender(_num_doc)
def compound(self, axis=None, skipna=None, level=None, **kwargs):
if skipna is None:
skipna = True
return (1 + self).prod(axis=axis, skipna=skipna, level=level) - 1
cls.compound = compound
def _make_cum_function(name, accum_func, mask_a, mask_b):
@Substitution(outname=name)
@Appender("Return cumulative {0} over requested axis.".format(name)
+ _cnum_doc)
def func(self, axis=None, dtype=None, out=None, skipna=True,
**kwargs):
if axis is None:
axis = self._stat_axis_number
else:
axis = self._get_axis_number(axis)
y = _values_from_object(self).copy()
if skipna and issubclass(y.dtype.type,
(np.datetime64, np.timedelta64)):
result = accum_func(y, axis)
mask = isnull(self)
np.putmask(result, mask, pd.tslib.iNaT)
elif skipna and not issubclass(y.dtype.type, (np.integer, np.bool_)):
mask = isnull(self)
np.putmask(y, mask, mask_a)
result = accum_func(y, axis)
np.putmask(result, mask, mask_b)
else:
result = accum_func(y, axis)
d = self._construct_axes_dict()
d['copy'] = False
return self._constructor(result, **d).__finalize__(self)
func.__name__ = name
return func
cls.cummin = _make_cum_function(
'min', lambda y, axis: np.minimum.accumulate(y, axis),
np.inf, np.nan)
cls.cumsum = _make_cum_function(
'sum', lambda y, axis: y.cumsum(axis), 0., np.nan)
cls.cumprod = _make_cum_function(
'prod', lambda y, axis: y.cumprod(axis), 1., np.nan)
cls.cummax = _make_cum_function(
'max', lambda y, axis: np.maximum.accumulate(y, axis),
-np.inf, np.nan)
# install the indexerse
for _name, _indexer in indexing.get_indexers_list():
NDFrame._create_indexer(_name, _indexer)
|
jantman/pyobd | refs/heads/master | obd_recorder.py | 9 | #!/usr/bin/env python
import obd_io
import serial
import platform
import obd_sensors
from datetime import datetime
import time
from obd_utils import scanSerial
class OBD_Recorder():
def __init__(self, path, log_items):
self.port = None
self.sensorlist = []
localtime = time.localtime(time.time())
filename = path+"bike-"+str(localtime[0])+"-"+str(localtime[1])+"-"+str(localtime[2])+"-"+str(localtime[3])+"-"+str(localtime[4])+"-"+str(localtime[5])+".log"
self.log_file = open(filename, "w", 128)
self.log_file.write("Time,RPM,MPH,Throttle,Load,Gear\n");
for item in log_items:
self.add_log_item(item)
self.gear_ratios = [34/13, 39/21, 36/23, 27/20, 26/21, 25/22]
#log_formatter = logging.Formatter('%(asctime)s.%(msecs).03d,%(message)s', "%H:%M:%S")
def connect(self):
portnames = scanSerial()
#portnames = ['COM10']
print portnames
for port in portnames:
self.port = obd_io.OBDPort(port, None, 2, 2)
if(self.port.State == 0):
self.port.close()
self.port = None
else:
break
if(self.port):
print "Connected to "+self.port.port.name
def is_connected(self):
return self.port
def add_log_item(self, item):
for index, e in enumerate(obd_sensors.SENSORS):
if(item == e.shortname):
self.sensorlist.append(index)
print "Logging item: "+e.name
break
def record_data(self):
if(self.port is None):
return None
print "Logging started"
while 1:
localtime = datetime.now()
current_time = str(localtime.hour)+":"+str(localtime.minute)+":"+str(localtime.second)+"."+str(localtime.microsecond)
log_string = current_time
results = {}
for index in self.sensorlist:
(name, value, unit) = self.port.sensor(index)
log_string = log_string + ","+str(value)
results[obd_sensors.SENSORS[index].shortname] = value;
gear = self.calculate_gear(results["rpm"], results["speed"])
log_string = log_string + "," + str(gear)
self.log_file.write(log_string+"\n")
def calculate_gear(self, rpm, speed):
if speed == "" or speed == 0:
return 0
if rpm == "" or rpm == 0:
return 0
rps = rpm/60
mps = (speed*1.609*1000)/3600
primary_gear = 85/46 #street triple
final_drive = 47/16
tyre_circumference = 1.978 #meters
current_gear_ratio = (rps*tyre_circumference)/(mps*primary_gear*final_drive)
print current_gear_ratio
gear = min((abs(current_gear_ratio - i), i) for i in self.gear_ratios)[1]
return gear
logitems = ["rpm", "speed", "throttle_pos", "load"]
o = OBD_Recorder('/home/pi/logs/', logitems)
o.connect()
if not o.is_connected():
print "Not connected"
o.record_data()
|
elshize/qutebrowser | refs/heads/master | qutebrowser/commands/command.py | 3 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Contains the Command class, a skeleton for a command."""
import inspect
import collections
import traceback
import typing
import attr
from qutebrowser.commands import cmdexc, argparser
from qutebrowser.utils import log, message, docutils, objreg, usertypes
from qutebrowser.utils import debug as debug_utils
from qutebrowser.misc import objects
@attr.s
class ArgInfo:
"""Information about an argument."""
win_id = attr.ib(False)
count = attr.ib(False)
hide = attr.ib(False)
metavar = attr.ib(None)
flag = attr.ib(None)
completion = attr.ib(None)
choices = attr.ib(None)
def __attrs_post_init__(self):
if self.win_id and self.count:
raise TypeError("Argument marked as both count/win_id!")
class Command:
"""Base skeleton for a command.
Attributes:
name: The main name of the command.
maxsplit: The maximum amount of splits to do for the commandline, or
None.
deprecated: False, or a string to describe why a command is deprecated.
desc: The description of the command.
handler: The handler function to call.
debug: Whether this is a debugging command (only shown with --debug).
parser: The ArgumentParser to use to parse this command.
flags_with_args: A list of flags which take an argument.
no_cmd_split: If true, ';;' to split sub-commands is ignored.
backend: Which backend the command works with (or None if it works with
both)
no_replace_variables: Don't replace variables like {url}
modes: The modes the command can be executed in.
_qute_args: The saved data from @cmdutils.argument
_count: The count set for the command.
_instance: The object to bind 'self' to.
_scope: The scope to get _instance for in the object registry.
"""
def __init__(self, *, handler, name, instance=None, maxsplit=None,
modes=None, not_modes=None, debug=False, deprecated=False,
no_cmd_split=False, star_args_optional=False, scope='global',
backend=None, no_replace_variables=False):
if modes is not None and not_modes is not None:
raise ValueError("Only modes or not_modes can be given!")
if modes is not None:
for m in modes:
if not isinstance(m, usertypes.KeyMode):
raise TypeError("Mode {} is no KeyMode member!".format(m))
self.modes = set(modes)
elif not_modes is not None:
for m in not_modes:
if not isinstance(m, usertypes.KeyMode):
raise TypeError("Mode {} is no KeyMode member!".format(m))
self.modes = set(usertypes.KeyMode).difference(not_modes)
else:
self.modes = set(usertypes.KeyMode)
if scope != 'global' and instance is None:
raise ValueError("Setting scope without setting instance makes "
"no sense!")
self.name = name
self.maxsplit = maxsplit
self.deprecated = deprecated
self._instance = instance
self._scope = scope
self._star_args_optional = star_args_optional
self.debug = debug
self.handler = handler
self.no_cmd_split = no_cmd_split
self.backend = backend
self.no_replace_variables = no_replace_variables
self.docparser = docutils.DocstringParser(handler)
self.parser = argparser.ArgumentParser(
name, description=self.docparser.short_desc,
epilog=self.docparser.long_desc)
self.parser.add_argument('-h', '--help', action=argparser.HelpAction,
default=argparser.SUPPRESS, nargs=0,
help=argparser.SUPPRESS)
self._check_func()
self.opt_args = collections.OrderedDict()
self.namespace = None
self._count = None
self.pos_args = []
self.desc = None
self.flags_with_args = []
# This is checked by future @cmdutils.argument calls so they fail
# (as they'd be silently ignored otherwise)
self._qute_args = getattr(self.handler, 'qute_args', {})
self.handler.qute_args = None
self._inspect_func()
def _check_prerequisites(self, win_id):
"""Check if the command is permitted to run currently.
Args:
win_id: The window ID the command is run in.
"""
mode_manager = objreg.get('mode-manager', scope='window',
window=win_id)
self.validate_mode(mode_manager.mode)
if self.backend is not None and objects.backend != self.backend:
raise cmdexc.PrerequisitesError(
"{}: Only available with {} "
"backend.".format(self.name, self.backend.name))
if self.deprecated:
message.warning('{} is deprecated - {}'.format(self.name,
self.deprecated))
def _check_func(self):
"""Make sure the function parameters don't violate any rules."""
signature = inspect.signature(self.handler)
if 'self' in signature.parameters and self._instance is None:
raise TypeError("{} is a class method, but instance was not "
"given!".format(self.name[0]))
elif 'self' not in signature.parameters and self._instance is not None:
raise TypeError("{} is not a class method, but instance was "
"given!".format(self.name[0]))
elif any(param.kind == inspect.Parameter.VAR_KEYWORD
for param in signature.parameters.values()):
raise TypeError("{}: functions with varkw arguments are not "
"supported!".format(self.name[0]))
def get_arg_info(self, param):
"""Get an ArgInfo tuple for the given inspect.Parameter."""
return self._qute_args.get(param.name, ArgInfo())
def get_pos_arg_info(self, pos):
"""Get an ArgInfo tuple for the given positional parameter."""
name = self.pos_args[pos][0]
return self._qute_args.get(name, ArgInfo())
def _inspect_special_param(self, param):
"""Check if the given parameter is a special one.
Args:
param: The inspect.Parameter to handle.
Return:
True if the parameter is special, False otherwise.
"""
arg_info = self.get_arg_info(param)
if arg_info.count:
if param.default is inspect.Parameter.empty:
raise TypeError("{}: handler has count parameter "
"without default!".format(self.name))
return True
elif arg_info.win_id:
return True
return False
def _inspect_func(self):
"""Inspect the function to get useful information from it.
Sets instance attributes (desc, type_conv, name_conv) based on the
information.
Return:
How many user-visible arguments the command has.
"""
signature = inspect.signature(self.handler)
doc = inspect.getdoc(self.handler)
if doc is not None:
self.desc = doc.splitlines()[0].strip()
else:
self.desc = ""
for param in signature.parameters.values():
# https://docs.python.org/3/library/inspect.html#inspect.Parameter.kind
# "Python has no explicit syntax for defining positional-only
# parameters, but many built-in and extension module functions
# (especially those that accept only one or two parameters) accept
# them."
assert param.kind != inspect.Parameter.POSITIONAL_ONLY
if param.name == 'self':
continue
if self._inspect_special_param(param):
continue
if (param.kind == inspect.Parameter.KEYWORD_ONLY and
param.default is inspect.Parameter.empty):
raise TypeError("{}: handler has keyword only argument {!r} "
"without default!".format(
self.name, param.name))
typ = self._get_type(param)
is_bool = typ is bool
kwargs = self._param_to_argparse_kwargs(param, is_bool)
args = self._param_to_argparse_args(param, is_bool)
callsig = debug_utils.format_call(self.parser.add_argument, args,
kwargs, full=False)
log.commands.vdebug('Adding arg {} of type {} -> {}'.format(
param.name, typ, callsig))
self.parser.add_argument(*args, **kwargs)
return signature.parameters.values()
def _param_to_argparse_kwargs(self, param, is_bool):
"""Get argparse keyword arguments for a parameter.
Args:
param: The inspect.Parameter object to get the args for.
is_bool: Whether the parameter is a boolean.
Return:
A kwargs dict.
"""
kwargs = {}
try:
kwargs['help'] = self.docparser.arg_descs[param.name]
except KeyError:
pass
kwargs['dest'] = param.name
arg_info = self.get_arg_info(param)
if is_bool:
kwargs['action'] = 'store_true'
else:
if arg_info.metavar is not None:
kwargs['metavar'] = arg_info.metavar
else:
kwargs['metavar'] = argparser.arg_name(param.name)
if param.kind == inspect.Parameter.VAR_POSITIONAL:
kwargs['nargs'] = '*' if self._star_args_optional else '+'
elif param.kind == inspect.Parameter.KEYWORD_ONLY:
kwargs['default'] = param.default
elif not is_bool and param.default is not inspect.Parameter.empty:
kwargs['default'] = param.default
kwargs['nargs'] = '?'
return kwargs
def _param_to_argparse_args(self, param, is_bool):
"""Get argparse positional arguments for a parameter.
Args:
param: The inspect.Parameter object to get the args for.
is_bool: Whether the parameter is a boolean.
Return:
A list of args.
"""
args = []
name = argparser.arg_name(param.name)
arg_info = self.get_arg_info(param)
if arg_info.flag is not None:
shortname = arg_info.flag
else:
shortname = name[0]
if len(shortname) != 1:
raise ValueError("Flag '{}' of parameter {} (command {}) must be "
"exactly 1 char!".format(shortname, name,
self.name))
if is_bool or param.kind == inspect.Parameter.KEYWORD_ONLY:
long_flag = '--{}'.format(name)
short_flag = '-{}'.format(shortname)
args.append(long_flag)
args.append(short_flag)
self.opt_args[param.name] = long_flag, short_flag
if not is_bool:
self.flags_with_args += [short_flag, long_flag]
else:
if not arg_info.hide:
self.pos_args.append((param.name, name))
return args
def _get_type(self, param):
"""Get the type of an argument from its default value or annotation.
Args:
param: The inspect.Parameter to look at.
"""
arginfo = self.get_arg_info(param)
if param.annotation is not inspect.Parameter.empty:
return param.annotation
elif param.default not in [None, inspect.Parameter.empty]:
return type(param.default)
elif arginfo.count or arginfo.win_id or param.kind in [
inspect.Parameter.VAR_POSITIONAL,
inspect.Parameter.VAR_KEYWORD]:
return None
else:
return str
def _get_self_arg(self, win_id, param, args):
"""Get the self argument for a function call.
Arguments:
win_id: The window id this command should be executed in.
param: The count parameter.
args: The positional argument list. Gets modified directly.
"""
assert param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
if self._scope == 'global':
tab_id = None
win_id = None
elif self._scope == 'tab':
tab_id = 'current'
elif self._scope == 'window':
tab_id = None
else:
raise ValueError("Invalid scope {}!".format(self._scope))
obj = objreg.get(self._instance, scope=self._scope, window=win_id,
tab=tab_id)
args.append(obj)
def _get_count_arg(self, param, args, kwargs):
"""Add the count argument to a function call.
Arguments:
param: The count parameter.
args: The positional argument list. Gets modified directly.
kwargs: The keyword argument dict. Gets modified directly.
"""
if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
if self._count is not None:
args.append(self._count)
else:
args.append(param.default)
elif param.kind == inspect.Parameter.KEYWORD_ONLY:
if self._count is not None:
kwargs[param.name] = self._count
else:
raise TypeError("{}: invalid parameter type {} for argument "
"{!r}!".format(self.name, param.kind, param.name))
def _get_win_id_arg(self, win_id, param, args, kwargs):
"""Add the win_id argument to a function call.
Arguments:
win_id: The window ID to add.
param: The count parameter.
args: The positional argument list. Gets modified directly.
kwargs: The keyword argument dict. Gets modified directly.
"""
if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
args.append(win_id)
elif param.kind == inspect.Parameter.KEYWORD_ONLY:
kwargs[param.name] = win_id
else:
raise TypeError("{}: invalid parameter type {} for argument "
"{!r}!".format(self.name, param.kind, param.name))
def _get_param_value(self, param):
"""Get the converted value for an inspect.Parameter."""
value = getattr(self.namespace, param.name)
typ = self._get_type(param)
if isinstance(typ, tuple):
raise TypeError("{}: Legacy tuple type annotation!".format(
self.name))
elif getattr(typ, '__origin__', None) is typing.Union or (
# Older Python 3.5 patch versions
# pylint: disable=no-member,useless-suppression
hasattr(typing, 'UnionMeta') and
isinstance(typ, typing.UnionMeta)):
# this is... slightly evil, I know
try:
types = list(typ.__args__)
except AttributeError:
# Older Python 3.5 patch versions
types = list(typ.__union_params__)
# pylint: enable=no-member,useless-suppression
if param.default is not inspect.Parameter.empty:
types.append(type(param.default))
choices = self.get_arg_info(param).choices
value = argparser.multitype_conv(param, types, value,
str_choices=choices)
elif typ is str:
choices = self.get_arg_info(param).choices
value = argparser.type_conv(param, typ, value, str_choices=choices)
elif typ is bool: # no type conversion for flags
assert isinstance(value, bool)
elif typ is None:
pass
else:
value = argparser.type_conv(param, typ, value)
return value
def _get_call_args(self, win_id):
"""Get arguments for a function call.
Args:
win_id: The window id this command should be executed in.
Return:
An (args, kwargs) tuple.
"""
args = []
kwargs = {}
signature = inspect.signature(self.handler)
for i, param in enumerate(signature.parameters.values()):
arg_info = self.get_arg_info(param)
if i == 0 and self._instance is not None:
# Special case for 'self'.
self._get_self_arg(win_id, param, args)
continue
elif arg_info.count:
# Special case for count parameter.
self._get_count_arg(param, args, kwargs)
continue
# elif arg_info.win_id:
elif arg_info.win_id:
# Special case for win_id parameter.
self._get_win_id_arg(win_id, param, args, kwargs)
continue
value = self._get_param_value(param)
if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
args.append(value)
elif param.kind == inspect.Parameter.VAR_POSITIONAL:
if value is not None:
args += value
elif param.kind == inspect.Parameter.KEYWORD_ONLY:
kwargs[param.name] = value
else:
raise TypeError("{}: Invalid parameter type {} for argument "
"'{}'!".format(
self.name, param.kind, param.name))
return args, kwargs
def run(self, win_id, args=None, count=None):
"""Run the command.
Note we don't catch CommandError here as it might happen async.
Args:
win_id: The window ID the command is run in.
args: Arguments to the command.
count: Command repetition count.
"""
dbgout = ["command called:", self.name]
if args:
dbgout.append(str(args))
elif args is None:
args = []
if count is not None:
dbgout.append("(count={})".format(count))
log.commands.debug(' '.join(dbgout))
try:
self.namespace = self.parser.parse_args(args)
except argparser.ArgumentParserError as e:
message.error('{}: {}'.format(self.name, e),
stack=traceback.format_exc())
return
except argparser.ArgumentParserExit as e:
log.commands.debug("argparser exited with status {}: {}".format(
e.status, e))
return
self._count = count
self._check_prerequisites(win_id)
posargs, kwargs = self._get_call_args(win_id)
log.commands.debug('Calling {}'.format(
debug_utils.format_call(self.handler, posargs, kwargs)))
self.handler(*posargs, **kwargs)
def validate_mode(self, mode):
"""Raise cmdexc.PrerequisitesError unless allowed in the given mode.
Args:
mode: The usertypes.KeyMode to check.
"""
if mode not in self.modes:
mode_names = '/'.join(sorted(m.name for m in self.modes))
raise cmdexc.PrerequisitesError(
"{}: This command is only allowed in {} mode, not {}.".format(
self.name, mode_names, mode.name))
def takes_count(self):
"""Return true iff this command can take a count argument."""
return any(arg.count for arg in self._qute_args)
|
rebost/django | refs/heads/master | django/contrib/localflavor/si/forms.py | 8 | """
Slovenian specific form helpers.
"""
from __future__ import absolute_import, unicode_literals
import datetime
import re
from django.contrib.localflavor.si.si_postalcodes import SI_POSTALCODES_CHOICES
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import CharField, Select, ChoiceField
from django.utils.translation import ugettext_lazy as _
class SIEMSOField(CharField):
"""A form for validating Slovenian personal identification number.
Additionally stores gender, nationality and birthday to self.info dictionary.
"""
default_error_messages = {
'invalid': _('This field should contain exactly 13 digits.'),
'date': _('The first 7 digits of the EMSO must represent a valid past date.'),
'checksum': _('The EMSO is not valid.'),
}
emso_regex = re.compile('^(\d{2})(\d{2})(\d{3})(\d{2})(\d{3})(\d)$')
def clean(self, value):
super(SIEMSOField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = value.strip()
m = self.emso_regex.match(value)
if m is None:
raise ValidationError(self.default_error_messages['invalid'])
# Validate EMSO
s = 0
int_values = [int(i) for i in value]
for a, b in zip(int_values, range(7, 1, -1) * 2):
s += a * b
chk = s % 11
if chk == 0:
K = 0
else:
K = 11 - chk
if K == 10 or int_values[-1] != K:
raise ValidationError(self.default_error_messages['checksum'])
# Extract extra info in the identification number
day, month, year, nationality, gender, chksum = [int(i) for i in m.groups()]
if year < 890:
year += 2000
else:
year += 1000
# validate birthday
try:
birthday = datetime.date(year, month, day)
except ValueError:
raise ValidationError(self.error_messages['date'])
if datetime.date.today() < birthday:
raise ValidationError(self.error_messages['date'])
self.info = {
'gender': gender < 500 and 'male' or 'female',
'birthdate': birthday,
'nationality': nationality,
}
return value
class SITaxNumberField(CharField):
"""Slovenian tax number field.
Valid input is SIXXXXXXXX or XXXXXXXX where X is a number.
"""
default_error_messages = {
'invalid': _('Enter a valid tax number in form SIXXXXXXXX'),
}
sitax_regex = re.compile('^(?:SI)?([1-9]\d{7})$')
def clean(self, value):
super(SITaxNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = value.strip()
m = self.sitax_regex.match(value)
if m is None:
raise ValidationError(self.default_error_messages['invalid'])
value = m.groups()[0]
# Validate Tax number
s = 0
int_values = [int(i) for i in value]
for a, b in zip(int_values, range(8, 1, -1)):
s += a * b
chk = 11 - (s % 11)
if chk == 10:
chk = 0
if int_values[-1] != chk:
raise ValidationError(self.default_error_messages['invalid'])
return value
class SIPostalCodeField(ChoiceField):
"""Slovenian post codes field.
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('choices', SI_POSTALCODES_CHOICES)
super(SIPostalCodeField, self).__init__(*args, **kwargs)
class SIPostalCodeSelect(Select):
"""A Select widget that uses Slovenian postal codes as its choices.
"""
def __init__(self, attrs=None):
super(SIPostalCodeSelect, self).__init__(attrs,
choices=SI_POSTALCODES_CHOICES)
class SIPhoneNumberField(CharField):
"""Slovenian phone number field.
Phone number must contain at least local area code.
Country code can be present.
Examples:
* +38640XXXXXX
* 0038640XXXXXX
* 040XXXXXX
* 01XXXXXX
* 0590XXXXX
"""
default_error_messages = {
'invalid': _('Enter phone number in form +386XXXXXXXX or 0XXXXXXXX.'),
}
phone_regex = re.compile('^(?:(?:00|\+)386|0)(\d{7,8})$')
def clean(self, value):
super(SIPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = value.replace(' ', '').replace('-', '').replace('/', '')
m = self.phone_regex.match(value)
if m is None:
raise ValidationError(self.default_error_messages['invalid'])
return m.groups()[0]
|
mvaled/sentry | refs/heads/master | src/bitfield/compat.py | 3 | from __future__ import absolute_import
__all__ = ("bitand", "bitor")
def bitand(a, b):
return a.bitand(b)
def bitor(a, b):
return a.bitor(b)
|
DaanHoogland/cloudstack | refs/heads/patch-2 | tools/marvin/marvin/settings.py | 8 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Settings for the XML test runner
# Use this setting to choose between a verbose and a non-verbose output.
TEST_OUTPUT_VERBOSE = 2.
# If your test methods contains docstrings, you can display such docstrings
# instead of display the test name (ex: module.TestCase.test_method).
# In order to use this feature, you have to enable verbose output by setting
# TEST_OUTPUT_VERBOSE = 2.
TEST_OUTPUT_DESCRIPTIONS = True
|
tangrams/yaml-cpp | refs/heads/master | test/gtest-1.8.0/googletest/test/gtest_env_var_test.py | 343 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print('Expected: %s' % (expected,))
print(' Actual: %s' % (actual,))
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
SetEnvVar('XML_OUTPUT_FILE', None) # For 'output' test
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
def testXmlOutputFile(self):
"""Tests that $XML_OUTPUT_FILE affects the output flag."""
SetEnvVar('GTEST_OUTPUT', None)
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/bar.xml', GetFlag('output'))
def testXmlOutputFileOverride(self):
"""Tests that $XML_OUTPUT_FILE is overridden by $GTEST_OUTPUT"""
SetEnvVar('GTEST_OUTPUT', 'xml:tmp/foo.xml')
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/foo.xml', GetFlag('output'))
if __name__ == '__main__':
gtest_test_utils.Main()
|
MemeticParadigm/TensorFlow | refs/heads/master | tensorflow/models/rnn/seq2seq.py | 2 | """Library for creating sequence-to-sequence models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.models.rnn import linear
from tensorflow.models.rnn import rnn
from tensorflow.models.rnn import rnn_cell
def rnn_decoder(decoder_inputs, initial_state, cell, loop_function=None,
scope=None):
"""RNN decoder for the sequence-to-sequence model.
Args:
decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
initial_state: 2D Tensor with shape [batch_size x cell.state_size].
cell: rnn_cell.RNNCell defining the cell function and size.
loop_function: if not None, this function will be applied to i-th output
in order to generate i+1-th input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/pdf/1506.03099v2.pdf.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x cell.output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x cell.input_size].
scope: VariableScope for the created subgraph; defaults to "rnn_decoder".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x cell.output_size] containing generated outputs.
states: The state of each cell in each time-step. This is a list with
length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
(Note that in some cases, like basic RNN cell or GRU cell, outputs and
states can be the same. They are different for LSTM cells though.)
"""
with tf.variable_scope(scope or "rnn_decoder"):
states = [initial_state]
outputs = []
prev = None
for i in xrange(len(decoder_inputs)):
inp = decoder_inputs[i]
if loop_function is not None and prev is not None:
with tf.variable_scope("loop_function", reuse=True):
# We do not propagate gradients over the loop function.
inp = tf.stop_gradient(loop_function(prev, i))
if i > 0:
tf.get_variable_scope().reuse_variables()
output, new_state = cell(inp, states[-1])
outputs.append(output)
states.append(new_state)
if loop_function is not None:
prev = tf.stop_gradient(output)
return outputs, states
def basic_rnn_seq2seq(
encoder_inputs, decoder_inputs, cell, dtype=tf.float32, scope=None):
"""Basic RNN sequence-to-sequence model.
This model first runs an RNN to encode encoder_inputs into a state vector, and
then runs decoder, initialized with the last encoder state, on decoder_inputs.
Encoder and decoder use the same RNN cell type, but don't share parameters.
Args:
encoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
cell: rnn_cell.RNNCell defining the cell function and size.
dtype: The dtype of the initial state of the RNN cell (default: tf.float32).
scope: VariableScope for the created subgraph; default: "basic_rnn_seq2seq".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x cell.output_size] containing the generated outputs.
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with tf.variable_scope(scope or "basic_rnn_seq2seq"):
_, enc_states = rnn.rnn(cell, encoder_inputs, dtype=dtype)
return rnn_decoder(decoder_inputs, enc_states[-1], cell)
def tied_rnn_seq2seq(encoder_inputs, decoder_inputs, cell,
loop_function=None, dtype=tf.float32, scope=None):
"""RNN sequence-to-sequence model with tied encoder and decoder parameters.
This model first runs an RNN to encode encoder_inputs into a state vector, and
then runs decoder, initialized with the last encoder state, on decoder_inputs.
Encoder and decoder use the same RNN cell and share parameters.
Args:
encoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
cell: rnn_cell.RNNCell defining the cell function and size.
loop_function: if not None, this function will be applied to i-th output
in order to generate i+1-th input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol), see rnn_decoder for details.
dtype: The dtype of the initial state of the rnn cell (default: tf.float32).
scope: VariableScope for the created subgraph; default: "tied_rnn_seq2seq".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x cell.output_size] containing the generated outputs.
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with tf.variable_scope("combined_tied_rnn_seq2seq"):
scope = scope or "tied_rnn_seq2seq"
_, enc_states = rnn.rnn(
cell, encoder_inputs, dtype=dtype, scope=scope)
tf.get_variable_scope().reuse_variables()
return rnn_decoder(decoder_inputs, enc_states[-1], cell,
loop_function=loop_function, scope=scope)
def embedding_rnn_decoder(decoder_inputs, initial_state, cell, num_symbols,
output_projection=None, feed_previous=False,
scope=None):
"""RNN decoder with embedding and a pure-decoding option.
Args:
decoder_inputs: a list of 1D batch-sized int32-Tensors (decoder inputs).
initial_state: 2D Tensor [batch_size x cell.state_size].
cell: rnn_cell.RNNCell defining the cell function.
num_symbols: integer, how many symbols come into the embedding.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [cell.output_size x num_symbols] and B has
shape [num_symbols]; if provided and feed_previous=True, each fed
previous output will first be multiplied by W and added B.
feed_previous: Boolean; if True, only the first of decoder_inputs will be
used (the "GO" symbol), and all other decoder inputs will be generated by:
next = embedding_lookup(embedding, argmax(previous_output)),
In effect, this implements a greedy decoder. It can also be used
during training to emulate http://arxiv.org/pdf/1506.03099v2.pdf.
If False, decoder_inputs are used as given (the standard decoder case).
scope: VariableScope for the created subgraph; defaults to
"embedding_rnn_decoder".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x cell.output_size] containing the generated outputs.
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: when output_projection has the wrong shape.
"""
if output_projection is not None:
proj_weights = tf.convert_to_tensor(output_projection[0], dtype=tf.float32)
proj_weights.get_shape().assert_is_compatible_with([cell.output_size,
num_symbols])
proj_biases = tf.convert_to_tensor(output_projection[1], dtype=tf.float32)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
with tf.variable_scope(scope or "embedding_rnn_decoder"):
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [num_symbols, cell.input_size])
def extract_argmax_and_embed(prev, _):
"""Loop_function that extracts the symbol from prev and embeds it."""
if output_projection is not None:
prev = tf.nn.xw_plus_b(prev, output_projection[0], output_projection[1])
prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))
return tf.nn.embedding_lookup(embedding, prev_symbol)
loop_function = None
if feed_previous:
loop_function = extract_argmax_and_embed
emb_inp = [tf.nn.embedding_lookup(embedding, i) for i in decoder_inputs]
return rnn_decoder(emb_inp, initial_state, cell,
loop_function=loop_function)
def embedding_rnn_seq2seq(encoder_inputs, decoder_inputs, cell,
num_encoder_symbols, num_decoder_symbols,
output_projection=None, feed_previous=False,
dtype=tf.float32, scope=None):
"""Embedding RNN sequence-to-sequence model.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_encoder_symbols x cell.input_size]). Then it runs an RNN to encode
embedded encoder_inputs into a state vector. Next, it embeds decoder_inputs
by another newly created embedding (of shape [num_decoder_symbols x
cell.input_size]). Then it runs RNN decoder, initialized with the last
encoder state, on embedded decoder_inputs.
Args:
encoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
cell: rnn_cell.RNNCell defining the cell function and size.
num_encoder_symbols: integer; number of symbols on the encoder side.
num_decoder_symbols: integer; number of symbols on the decoder side.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [cell.output_size x num_decoder_symbols] and B has
shape [num_decoder_symbols]; if provided and feed_previous=True, each
fed previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first
of decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype of the initial state for both the encoder and encoder
rnn cells (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_rnn_seq2seq"
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x num_decoder_symbols] containing the generated outputs.
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with tf.variable_scope(scope or "embedding_rnn_seq2seq"):
# Encoder.
encoder_cell = rnn_cell.EmbeddingWrapper(cell, num_encoder_symbols)
_, encoder_states = rnn.rnn(encoder_cell, encoder_inputs, dtype=dtype)
# Decoder.
if output_projection is None:
cell = rnn_cell.OutputProjectionWrapper(cell, num_decoder_symbols)
if isinstance(feed_previous, bool):
return embedding_rnn_decoder(decoder_inputs, encoder_states[-1], cell,
num_decoder_symbols, output_projection,
feed_previous)
else: # If feed_previous is a Tensor, we construct 2 graphs and use cond.
outputs1, states1 = embedding_rnn_decoder(
decoder_inputs, encoder_states[-1], cell, num_decoder_symbols,
output_projection, True)
tf.get_variable_scope().reuse_variables()
outputs2, states2 = embedding_rnn_decoder(
decoder_inputs, encoder_states[-1], cell, num_decoder_symbols,
output_projection, False)
outputs = tf.control_flow_ops.cond(feed_previous,
lambda: outputs1, lambda: outputs2)
states = tf.control_flow_ops.cond(feed_previous,
lambda: states1, lambda: states2)
return outputs, states
def embedding_tied_rnn_seq2seq(encoder_inputs, decoder_inputs, cell,
num_symbols, output_projection=None,
feed_previous=False, dtype=tf.float32,
scope=None):
"""Embedding RNN sequence-to-sequence model with tied (shared) parameters.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_symbols x cell.input_size]). Then it runs an RNN to encode embedded
encoder_inputs into a state vector. Next, it embeds decoder_inputs using
the same embedding. Then it runs RNN decoder, initialized with the last
encoder state, on embedded decoder_inputs.
Args:
encoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
cell: rnn_cell.RNNCell defining the cell function and size.
num_symbols: integer; number of symbols for both encoder and decoder.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [cell.output_size x num_symbols] and B has
shape [num_symbols]; if provided and feed_previous=True, each
fed previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first
of decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype to use for the initial RNN states (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_tied_rnn_seq2seq".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x num_decoder_symbols] containing the generated outputs.
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: when output_projection has the wrong shape.
"""
if output_projection is not None:
proj_weights = tf.convert_to_tensor(output_projection[0], dtype=dtype)
proj_weights.get_shape().assert_is_compatible_with([cell.output_size,
num_symbols])
proj_biases = tf.convert_to_tensor(output_projection[1], dtype=dtype)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
with tf.variable_scope(scope or "embedding_tied_rnn_seq2seq"):
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [num_symbols, cell.input_size])
emb_encoder_inputs = [tf.nn.embedding_lookup(embedding, x)
for x in encoder_inputs]
emb_decoder_inputs = [tf.nn.embedding_lookup(embedding, x)
for x in decoder_inputs]
def extract_argmax_and_embed(prev, _):
"""Loop_function that extracts the symbol from prev and embeds it."""
if output_projection is not None:
prev = tf.nn.xw_plus_b(prev, output_projection[0], output_projection[1])
prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))
return tf.nn.embedding_lookup(embedding, prev_symbol)
if output_projection is None:
cell = rnn_cell.OutputProjectionWrapper(cell, num_symbols)
if isinstance(feed_previous, bool):
loop_function = extract_argmax_and_embed if feed_previous else None
return tied_rnn_seq2seq(emb_encoder_inputs, emb_decoder_inputs, cell,
loop_function=loop_function, dtype=dtype)
else: # If feed_previous is a Tensor, we construct 2 graphs and use cond.
outputs1, states1 = tied_rnn_seq2seq(
emb_encoder_inputs, emb_decoder_inputs, cell,
loop_function=extract_argmax_and_embed, dtype=dtype)
tf.get_variable_scope().reuse_variables()
outputs2, states2 = tied_rnn_seq2seq(
emb_encoder_inputs, emb_decoder_inputs, cell, dtype=dtype)
outputs = tf.control_flow_ops.cond(feed_previous,
lambda: outputs1, lambda: outputs2)
states = tf.control_flow_ops.cond(feed_previous,
lambda: states1, lambda: states2)
return outputs, states
def attention_decoder(decoder_inputs, initial_state, attention_states, cell,
output_size=None, num_heads=1, loop_function=None,
dtype=tf.float32, scope=None):
"""RNN decoder with attention for the sequence-to-sequence model.
Args:
decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
initial_state: 2D Tensor [batch_size x cell.state_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].
cell: rnn_cell.RNNCell defining the cell function and size.
output_size: size of the output vectors; if None, we use cell.output_size.
num_heads: number of attention heads that read from attention_states.
loop_function: if not None, this function will be applied to i-th output
in order to generate i+1-th input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/pdf/1506.03099v2.pdf.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x cell.output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x cell.input_size].
dtype: The dtype to use for the RNN initial state (default: tf.float32).
scope: VariableScope for the created subgraph; default: "attention_decoder".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors of shape
[batch_size x output_size]. These represent the generated outputs.
Output i is computed from input i (which is either i-th decoder_inputs or
loop_function(output {i-1}, i)) as follows. First, we run the cell
on a combination of the input and previous attention masks:
cell_output, new_state = cell(linear(input, prev_attn), prev_state).
Then, we calculate new attention masks:
new_attn = softmax(V^T * tanh(W * attention_states + U * new_state))
and then we calculate the output:
output = linear(cell_output, new_attn).
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: when num_heads is not positive, there are no inputs, or shapes
of attention_states are not set.
"""
if not decoder_inputs:
raise ValueError("Must provide at least 1 input to attention decoder.")
if num_heads < 1:
raise ValueError("With less than 1 heads, use a non-attention decoder.")
if not attention_states.get_shape()[1:2].is_fully_defined():
raise ValueError("Shape[1] and [2] of attention_states must be known: %s"
% attention_states.get_shape())
if output_size is None:
output_size = cell.output_size
with tf.variable_scope(scope or "attention_decoder"):
batch_size = tf.shape(decoder_inputs[0])[0] # Needed for reshaping.
attn_length = attention_states.get_shape()[1].value
attn_size = attention_states.get_shape()[2].value
# To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.
hidden = tf.reshape(attention_states, [-1, attn_length, 1, attn_size])
hidden_features = []
v = []
attention_vec_size = attn_size # Size of query vectors for attention.
for a in xrange(num_heads):
k = tf.get_variable("AttnW_%d" % a, [1, 1, attn_size, attention_vec_size])
hidden_features.append(tf.nn.conv2d(hidden, k, [1, 1, 1, 1], "SAME"))
v.append(tf.get_variable("AttnV_%d" % a, [attention_vec_size]))
states = [initial_state]
def attention(query):
"""Put attention masks on hidden using hidden_features and query."""
ds = [] # Results of attention reads will be stored here.
for a in xrange(num_heads):
with tf.variable_scope("Attention_%d" % a):
y = linear.linear(query, attention_vec_size, True)
y = tf.reshape(y, [-1, 1, 1, attention_vec_size])
# Attention mask is a softmax of v^T * tanh(...).
s = tf.reduce_sum(v[a] * tf.tanh(hidden_features[a] + y), [2, 3])
a = tf.nn.softmax(s)
# Now calculate the attention-weighted vector d.
d = tf.reduce_sum(tf.reshape(a, [-1, attn_length, 1, 1]) * hidden,
[1, 2])
ds.append(tf.reshape(d, [-1, attn_size]))
return ds
outputs = []
prev = None
batch_attn_size = tf.pack([batch_size, attn_size])
attns = [tf.zeros(batch_attn_size, dtype=dtype)
for _ in xrange(num_heads)]
for a in attns: # Ensure the second shape of attention vectors is set.
a.set_shape([None, attn_size])
for i in xrange(len(decoder_inputs)):
if i > 0:
tf.get_variable_scope().reuse_variables()
inp = decoder_inputs[i]
# If loop_function is set, we use it instead of decoder_inputs.
if loop_function is not None and prev is not None:
with tf.variable_scope("loop_function", reuse=True):
inp = tf.stop_gradient(loop_function(prev, i))
# Merge input and previous attentions into one vector of the right size.
x = linear.linear([inp] + attns, cell.input_size, True)
# Run the RNN.
cell_output, new_state = cell(x, states[-1])
states.append(new_state)
# Run the attention mechanism.
attns = attention(new_state)
with tf.variable_scope("AttnOutputProjection"):
output = linear.linear([cell_output] + attns, output_size, True)
if loop_function is not None:
# We do not propagate gradients over the loop function.
prev = tf.stop_gradient(output)
outputs.append(output)
return outputs, states
def embedding_attention_decoder(decoder_inputs, initial_state, attention_states,
cell, num_symbols, num_heads=1,
output_size=None, output_projection=None,
feed_previous=False, dtype=tf.float32,
scope=None):
"""RNN decoder with embedding and attention and a pure-decoding option.
Args:
decoder_inputs: a list of 1D batch-sized int32-Tensors (decoder inputs).
initial_state: 2D Tensor [batch_size x cell.state_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].
cell: rnn_cell.RNNCell defining the cell function.
num_symbols: integer, how many symbols come into the embedding.
num_heads: number of attention heads that read from attention_states.
output_size: size of the output vectors; if None, use cell.output_size.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_symbols] and B has shape
[num_symbols]; if provided and feed_previous=True, each fed previous
output will first be multiplied by W and added B.
feed_previous: Boolean; if True, only the first of decoder_inputs will be
used (the "GO" symbol), and all other decoder inputs will be generated by:
next = embedding_lookup(embedding, argmax(previous_output)),
In effect, this implements a greedy decoder. It can also be used
during training to emulate http://arxiv.org/pdf/1506.03099v2.pdf.
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype to use for the RNN initial states (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_attention_decoder".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing the generated outputs.
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: when output_projection has the wrong shape.
"""
if output_size is None:
output_size = cell.output_size
if output_projection is not None:
proj_weights = tf.convert_to_tensor(output_projection[0], dtype=dtype)
proj_weights.get_shape().assert_is_compatible_with([cell.output_size,
num_symbols])
proj_biases = tf.convert_to_tensor(output_projection[1], dtype=dtype)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
with tf.variable_scope(scope or "embedding_attention_decoder"):
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [num_symbols, cell.input_size])
def extract_argmax_and_embed(prev, _):
"""Loop_function that extracts the symbol from prev and embeds it."""
if output_projection is not None:
prev = tf.nn.xw_plus_b(prev, output_projection[0], output_projection[1])
prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))
emb_prev = tf.nn.embedding_lookup(embedding, prev_symbol)
return emb_prev
loop_function = None
if feed_previous:
loop_function = extract_argmax_and_embed
emb_inp = [tf.nn.embedding_lookup(embedding, i) for i in decoder_inputs]
return attention_decoder(
emb_inp, initial_state, attention_states, cell, output_size=output_size,
num_heads=num_heads, loop_function=loop_function)
def embedding_attention_seq2seq(encoder_inputs, decoder_inputs, cell,
num_encoder_symbols, num_decoder_symbols,
num_heads=1, output_projection=None,
feed_previous=False, dtype=tf.float32,
scope=None):
"""Embedding sequence-to-sequence model with attention.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_encoder_symbols x cell.input_size]). Then it runs an RNN to encode
embedded encoder_inputs into a state vector. It keeps the outputs of this
RNN at every step to use for attention later. Next, it embeds decoder_inputs
by another newly created embedding (of shape [num_decoder_symbols x
cell.input_size]). Then it runs attention decoder, initialized with the last
encoder state, on embedded decoder_inputs and attending to encoder outputs.
Args:
encoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
decoder_inputs: a list of 2D Tensors [batch_size x cell.input_size].
cell: rnn_cell.RNNCell defining the cell function and size.
num_encoder_symbols: integer; number of symbols on the encoder side.
num_decoder_symbols: integer; number of symbols on the decoder side.
num_heads: number of attention heads that read from attention_states.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [cell.output_size x num_decoder_symbols] and B has
shape [num_decoder_symbols]; if provided and feed_previous=True, each
fed previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first
of decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype of the initial RNN state (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_attention_seq2seq".
Returns:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x num_decoder_symbols] containing the generated outputs.
states: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
Each item is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with tf.variable_scope(scope or "embedding_attention_seq2seq"):
# Encoder.
encoder_cell = rnn_cell.EmbeddingWrapper(cell, num_encoder_symbols)
encoder_outputs, encoder_states = rnn.rnn(
encoder_cell, encoder_inputs, dtype=dtype)
# First calculate a concatenation of encoder outputs to put attention on.
top_states = [tf.reshape(e, [-1, 1, cell.output_size])
for e in encoder_outputs]
attention_states = tf.concat(1, top_states)
# Decoder.
output_size = None
if output_projection is None:
cell = rnn_cell.OutputProjectionWrapper(cell, num_decoder_symbols)
output_size = num_decoder_symbols
if isinstance(feed_previous, bool):
return embedding_attention_decoder(
decoder_inputs, encoder_states[-1], attention_states, cell,
num_decoder_symbols, num_heads, output_size, output_projection,
feed_previous)
else: # If feed_previous is a Tensor, we construct 2 graphs and use cond.
outputs1, states1 = embedding_attention_decoder(
decoder_inputs, encoder_states[-1], attention_states, cell,
num_decoder_symbols, num_heads, output_size, output_projection, True)
tf.get_variable_scope().reuse_variables()
outputs2, states2 = embedding_attention_decoder(
decoder_inputs, encoder_states[-1], attention_states, cell,
num_decoder_symbols, num_heads, output_size, output_projection, False)
outputs = tf.control_flow_ops.cond(feed_previous,
lambda: outputs1, lambda: outputs2)
states = tf.control_flow_ops.cond(feed_previous,
lambda: states1, lambda: states2)
return outputs, states
def sequence_loss_by_example(logits, targets, weights, num_decoder_symbols,
average_across_timesteps=True,
softmax_loss_function=None, name=None):
"""Weighted cross-entropy loss for a sequence of logits (per example).
Args:
logits: list of 2D Tensors of shape [batch_size x num_decoder_symbols].
targets: list of 1D batch-sized int32-Tensors of the same length as logits.
weights: list of 1D batch-sized float-Tensors of the same length as logits.
num_decoder_symbols: integer, number of decoder symbols (output classes).
average_across_timesteps: If set, divide the returned cost by the total
label weight.
softmax_loss_function: function (inputs-batch, labels-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
name: optional name for this operation, default: "sequence_loss_by_example".
Returns:
1D batch-sized float Tensor: the log-perplexity for each sequence.
Raises:
ValueError: if len(logits) is different from len(targets) or len(weights).
"""
if len(targets) != len(logits) or len(weights) != len(logits):
raise ValueError("Lengths of logits, weights, and targets must be the same "
"%d, %d, %d." % (len(logits), len(weights), len(targets)))
with tf.op_scope(logits + targets + weights, name,
"sequence_loss_by_example"):
batch_size = tf.shape(targets[0])[0]
log_perp_list = []
length = batch_size * num_decoder_symbols
for i in xrange(len(logits)):
if softmax_loss_function is None:
# TODO(lukaszkaiser): There is no SparseCrossEntropy in TensorFlow, so
# we need to first cast targets into a dense representation, and as
# SparseToDense does not accept batched inputs, we need to do this by
# re-indexing and re-sizing. When TensorFlow adds SparseCrossEntropy,
# rewrite this method.
indices = targets[i] + num_decoder_symbols * tf.range(0, batch_size)
with tf.device("/cpu:0"): # Sparse-to-dense must happen on CPU for now.
dense = tf.sparse_to_dense(indices, tf.expand_dims(length, 0), 1.0,
0.0)
target = tf.reshape(dense, [-1, num_decoder_symbols])
crossent = tf.nn.softmax_cross_entropy_with_logits(
logits[i], target, name="SequenceLoss/CrossEntropy{0}".format(i))
else:
crossent = softmax_loss_function(logits[i], targets[i])
log_perp_list.append(crossent * weights[i])
log_perps = tf.add_n(log_perp_list)
if average_across_timesteps:
total_size = tf.add_n(weights)
total_size += 1e-12 # Just to avoid division by 0 for all-0 weights.
log_perps /= total_size
return log_perps
def sequence_loss(logits, targets, weights, num_decoder_symbols,
average_across_timesteps=True, average_across_batch=True,
softmax_loss_function=None, name=None):
"""Weighted cross-entropy loss for a sequence of logits, batch-collapsed.
Args:
logits: list of 2D Tensors os shape [batch_size x num_decoder_symbols].
targets: list of 1D batch-sized int32-Tensors of the same length as logits.
weights: list of 1D batch-sized float-Tensors of the same length as logits.
num_decoder_symbols: integer, number of decoder symbols (output classes).
average_across_timesteps: If set, divide the returned cost by the total
label weight.
average_across_batch: If set, divide the returned cost by the batch size.
softmax_loss_function: function (inputs-batch, labels-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
name: optional name for this operation, defaults to "sequence_loss".
Returns:
A scalar float Tensor: the average log-perplexity per symbol (weighted).
Raises:
ValueError: if len(logits) is different from len(targets) or len(weights).
"""
with tf.op_scope(logits + targets + weights, name, "sequence_loss"):
cost = tf.reduce_sum(sequence_loss_by_example(
logits, targets, weights, num_decoder_symbols,
average_across_timesteps=average_across_timesteps,
softmax_loss_function=softmax_loss_function))
if average_across_batch:
batch_size = tf.shape(targets[0])[0]
return cost / tf.cast(batch_size, tf.float32)
else:
return cost
def model_with_buckets(encoder_inputs, decoder_inputs, targets, weights,
buckets, num_decoder_symbols, seq2seq,
softmax_loss_function=None, name=None):
"""Create a sequence-to-sequence model with support for bucketing.
The seq2seq argument is a function that defines a sequence-to-sequence model,
e.g., seq2seq = lambda x, y: basic_rnn_seq2seq(x, y, rnn_cell.GRUCell(24))
Args:
encoder_inputs: a list of Tensors to feed the encoder; first seq2seq input.
decoder_inputs: a list of Tensors to feed the decoder; second seq2seq input.
targets: a list of 1D batch-sized int32-Tensors (desired output sequence).
weights: list of 1D batch-sized float-Tensors to weight the targets.
buckets: a list of pairs of (input size, output size) for each bucket.
num_decoder_symbols: integer, number of decoder symbols (output classes).
seq2seq: a sequence-to-sequence model function; it takes 2 input that
agree with encoder_inputs and decoder_inputs, and returns a pair
consisting of outputs and states (as, e.g., basic_rnn_seq2seq).
softmax_loss_function: function (inputs-batch, labels-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
name: optional name for this operation, defaults to "model_with_buckets".
Returns:
outputs: The outputs for each bucket. Its j'th element consists of a list
of 2D Tensors of shape [batch_size x num_decoder_symbols] (j'th outputs).
losses: List of scalar Tensors, representing losses for each bucket.
Raises:
ValueError: if length of encoder_inputsut, targets, or weights is smaller
than the largest (last) bucket.
"""
if len(encoder_inputs) < buckets[-1][0]:
raise ValueError("Length of encoder_inputs (%d) must be at least that of la"
"st bucket (%d)." % (len(encoder_inputs), buckets[-1][0]))
if len(targets) < buckets[-1][1]:
raise ValueError("Length of targets (%d) must be at least that of last"
"bucket (%d)." % (len(targets), buckets[-1][1]))
if len(weights) < buckets[-1][1]:
raise ValueError("Length of weights (%d) must be at least that of last"
"bucket (%d)." % (len(weights), buckets[-1][1]))
all_inputs = encoder_inputs + decoder_inputs + targets + weights
losses = []
outputs = []
with tf.op_scope(all_inputs, name, "model_with_buckets"):
for j in xrange(len(buckets)):
if j > 0:
tf.get_variable_scope().reuse_variables()
bucket_encoder_inputs = [encoder_inputs[i]
for i in xrange(buckets[j][0])]
bucket_decoder_inputs = [decoder_inputs[i]
for i in xrange(buckets[j][1])]
bucket_outputs, _ = seq2seq(bucket_encoder_inputs,
bucket_decoder_inputs)
outputs.append(bucket_outputs)
bucket_targets = [targets[i] for i in xrange(buckets[j][1])]
bucket_weights = [weights[i] for i in xrange(buckets[j][1])]
losses.append(sequence_loss(
outputs[-1], bucket_targets, bucket_weights, num_decoder_symbols,
softmax_loss_function=softmax_loss_function))
return outputs, losses
|
willzhang05/postgrestesting1 | refs/heads/master | postgrestesting1/lib/python3.5/site-packages/django/utils/autoreload.py | 26 | # Autoreloading launcher.
# Borrowed from Peter Hunt and the CherryPy project (http://www.cherrypy.org).
# Some taken from Ian Bicking's Paste (http://pythonpaste.org/).
#
# Portions copyright (c) 2004, CherryPy Team ([email protected])
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the CherryPy Team nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Avoid importing `importlib` from this package.
from __future__ import absolute_import
import os
import signal
import sys
import time
import traceback
from django.apps import apps
from django.conf import settings
from django.core.signals import request_finished
from django.utils import six
from django.utils._os import npath
from django.utils.six.moves import _thread as thread
# This import does nothing, but it's necessary to avoid some race conditions
# in the threading module. See http://code.djangoproject.com/ticket/2330 .
try:
import threading # NOQA
except ImportError:
pass
try:
import termios
except ImportError:
termios = None
USE_INOTIFY = False
try:
# Test whether inotify is enabled and likely to work
import pyinotify
fd = pyinotify.INotifyWrapper.create().inotify_init()
if fd >= 0:
USE_INOTIFY = True
os.close(fd)
except ImportError:
pass
RUN_RELOADER = True
FILE_MODIFIED = 1
I18N_MODIFIED = 2
_mtimes = {}
_win = (sys.platform == "win32")
_exception = None
_error_files = []
_cached_modules = set()
_cached_filenames = []
def gen_filenames(only_new=False):
"""
Returns a list of filenames referenced in sys.modules and translation
files.
"""
# N.B. ``list(...)`` is needed, because this runs in parallel with
# application code which might be mutating ``sys.modules``, and this will
# fail with RuntimeError: cannot mutate dictionary while iterating
global _cached_modules, _cached_filenames
module_values = set(sys.modules.values())
_cached_filenames = clean_files(_cached_filenames)
if _cached_modules == module_values:
# No changes in module list, short-circuit the function
if only_new:
return []
else:
return _cached_filenames + clean_files(_error_files)
new_modules = module_values - _cached_modules
new_filenames = clean_files(
[filename.__file__ for filename in new_modules
if hasattr(filename, '__file__')])
if not _cached_filenames and settings.USE_I18N:
# Add the names of the .mo files that can be generated
# by compilemessages management command to the list of files watched.
basedirs = [os.path.join(os.path.dirname(os.path.dirname(__file__)),
'conf', 'locale'),
'locale']
for app_config in reversed(list(apps.get_app_configs())):
basedirs.append(os.path.join(npath(app_config.path), 'locale'))
basedirs.extend(settings.LOCALE_PATHS)
basedirs = [os.path.abspath(basedir) for basedir in basedirs
if os.path.isdir(basedir)]
for basedir in basedirs:
for dirpath, dirnames, locale_filenames in os.walk(basedir):
for filename in locale_filenames:
if filename.endswith('.mo'):
new_filenames.append(os.path.join(dirpath, filename))
_cached_modules = _cached_modules.union(new_modules)
_cached_filenames += new_filenames
if only_new:
return new_filenames + clean_files(_error_files)
else:
return _cached_filenames + clean_files(_error_files)
def clean_files(filelist):
filenames = []
for filename in filelist:
if not filename:
continue
if filename.endswith(".pyc") or filename.endswith(".pyo"):
filename = filename[:-1]
if filename.endswith("$py.class"):
filename = filename[:-9] + ".py"
if os.path.exists(filename):
filenames.append(filename)
return filenames
def reset_translations():
import gettext
from django.utils.translation import trans_real
gettext._translations = {}
trans_real._translations = {}
trans_real._default = None
trans_real._active = threading.local()
def inotify_code_changed():
"""
Checks for changed code using inotify. After being called
it blocks until a change event has been fired.
"""
class EventHandler(pyinotify.ProcessEvent):
modified_code = None
def process_default(self, event):
if event.path.endswith('.mo'):
EventHandler.modified_code = I18N_MODIFIED
else:
EventHandler.modified_code = FILE_MODIFIED
wm = pyinotify.WatchManager()
notifier = pyinotify.Notifier(wm, EventHandler())
def update_watch(sender=None, **kwargs):
if sender and getattr(sender, 'handles_files', False):
# No need to update watches when request serves files.
# (sender is supposed to be a django.core.handlers.BaseHandler subclass)
return
mask = (
pyinotify.IN_MODIFY |
pyinotify.IN_DELETE |
pyinotify.IN_ATTRIB |
pyinotify.IN_MOVED_FROM |
pyinotify.IN_MOVED_TO |
pyinotify.IN_CREATE |
pyinotify.IN_DELETE_SELF |
pyinotify.IN_MOVE_SELF
)
for path in gen_filenames(only_new=True):
wm.add_watch(path, mask)
# New modules may get imported when a request is processed.
request_finished.connect(update_watch)
# Block until an event happens.
update_watch()
notifier.check_events(timeout=None)
notifier.read_events()
notifier.process_events()
notifier.stop()
# If we are here the code must have changed.
return EventHandler.modified_code
def code_changed():
global _mtimes, _win
for filename in gen_filenames():
stat = os.stat(filename)
mtime = stat.st_mtime
if _win:
mtime -= stat.st_ctime
if filename not in _mtimes:
_mtimes[filename] = mtime
continue
if mtime != _mtimes[filename]:
_mtimes = {}
try:
del _error_files[_error_files.index(filename)]
except ValueError:
pass
return I18N_MODIFIED if filename.endswith('.mo') else FILE_MODIFIED
return False
def check_errors(fn):
def wrapper(*args, **kwargs):
global _exception
try:
fn(*args, **kwargs)
except Exception:
_exception = sys.exc_info()
et, ev, tb = _exception
if getattr(ev, 'filename', None) is None:
# get the filename from the last item in the stack
filename = traceback.extract_tb(tb)[-1][0]
else:
filename = ev.filename
if filename not in _error_files:
_error_files.append(filename)
raise
return wrapper
def raise_last_exception():
global _exception
if _exception is not None:
six.reraise(*_exception)
def ensure_echo_on():
if termios:
fd = sys.stdin
if fd.isatty():
attr_list = termios.tcgetattr(fd)
if not attr_list[3] & termios.ECHO:
attr_list[3] |= termios.ECHO
if hasattr(signal, 'SIGTTOU'):
old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)
else:
old_handler = None
termios.tcsetattr(fd, termios.TCSANOW, attr_list)
if old_handler is not None:
signal.signal(signal.SIGTTOU, old_handler)
def reloader_thread():
ensure_echo_on()
if USE_INOTIFY:
fn = inotify_code_changed
else:
fn = code_changed
while RUN_RELOADER:
change = fn()
if change == FILE_MODIFIED:
sys.exit(3) # force reload
elif change == I18N_MODIFIED:
reset_translations()
time.sleep(1)
def restart_with_reloader():
while True:
args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions] + sys.argv
if sys.platform == "win32":
args = ['"%s"' % arg for arg in args]
new_environ = os.environ.copy()
new_environ["RUN_MAIN"] = 'true'
exit_code = os.spawnve(os.P_WAIT, sys.executable, args, new_environ)
if exit_code != 3:
return exit_code
def python_reloader(main_func, args, kwargs):
if os.environ.get("RUN_MAIN") == "true":
thread.start_new_thread(main_func, args, kwargs)
try:
reloader_thread()
except KeyboardInterrupt:
pass
else:
try:
exit_code = restart_with_reloader()
if exit_code < 0:
os.kill(os.getpid(), -exit_code)
else:
sys.exit(exit_code)
except KeyboardInterrupt:
pass
def jython_reloader(main_func, args, kwargs):
from _systemrestart import SystemRestart
thread.start_new_thread(main_func, args)
while True:
if code_changed():
raise SystemRestart
time.sleep(1)
def main(main_func, args=None, kwargs=None):
if args is None:
args = ()
if kwargs is None:
kwargs = {}
if sys.platform.startswith('java'):
reloader = jython_reloader
else:
reloader = python_reloader
wrapped_main_func = check_errors(main_func)
reloader(wrapped_main_func, args, kwargs)
|
jusdng/odoo | refs/heads/8.0 | doc/_extensions/html_domain.py | 254 | # -*- coding: utf-8 -*-
"""
Defines a "raw HTML" domain with a ``div[classes]`` and a number of roles
rendered more or less directly to HTML.
.. warning::
the purpose of this domain is *not* to document HTML or components
"""
from docutils import nodes, utils
from docutils.parsers.rst import Directive, directives
from docutils.parsers.rst.directives.body import LineBlock
import sphinx.roles
from sphinx.domains import Domain
def setup(app):
app.add_domain(HtmlDomain)
app.add_node(div, html=(
lambda self, node: self.body.append(self.starttag(node, 'div')),
lambda self, node: self.body.append('</div>\n')))
app.add_node(address, html=(
lambda self, node: self.body.append(self.starttag(node, 'address')),
lambda self, node: self.body.append('</address>\n')
))
app.add_node(cite, html=(visit_cite, depart_cite))
for name, node in [('mark', mark), ('ins', insert), ('del', delete),
('s', strikethrough), ('u', underline), ('small', small),
('kbd', kbd), ('var', var), ('samp', samp)]:
addnode(app, node, name)
class div(nodes.General, nodes.Element): pass
class Div(Directive):
optional_arguments = 1
final_argument_whitespace = 1
has_content = True
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
try:
if self.arguments:
classes = directives.class_option(self.arguments[0])
else:
classes = []
except ValueError:
raise self.error(
'Invalid class attribute value for "%s" directive: "%s".'
% (self.name, self.arguments[0]))
node = div(text)
node['classes'].extend(classes)
self.add_name(node)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class address(nodes.General, nodes.Element): pass
class Address(LineBlock):
def run(self):
[node] = super(Address, self).run()
ad = address(node.rawsource, *node.children)
return [ad]
class mark(nodes.Inline, nodes.TextElement): pass
class insert(nodes.Inline, nodes.TextElement): pass
class delete(nodes.Inline, nodes.TextElement): pass
class strikethrough(nodes.Inline, nodes.TextElement): pass
class underline(nodes.Inline, nodes.TextElement): pass
class small(nodes.Inline, nodes.TextElement): pass
class kbd(nodes.Inline, nodes.FixedTextElement): pass
class var(nodes.Inline, nodes.FixedTextElement): pass
class samp(nodes.Inline, nodes.FixedTextElement): pass
def makerole(node):
return lambda name, rawtext, text, lineno, inliner, options=None, content=None:\
([node(rawtext.strip(), text.strip())], [])
def addnode(app, node, nodename):
app.add_node(node, html=(
lambda self, n: self.body.append(self.starttag(n, nodename)),
lambda self, n: self.body.append('</%s>' % nodename)
))
def initialism(*args, **kwargs):
nodes, _ = sphinx.roles.abbr_role(*args, **kwargs)
[abbr] = nodes
abbr.attributes.setdefault('classes', []).append('initialism')
return [abbr], []
def cite_role(typ, rawtext, text, lineno, inliner, options=None, content=None):
text = utils.unescape(text)
m = sphinx.roles._abbr_re.search(text)
if m is None:
return [cite(text, text, **(options or {}))], []
content = text[:m.start()].strip()
source = m.group(1)
return [cite(content, content, source=source)], []
class cite(nodes.Inline, nodes.TextElement): pass
def visit_cite(self, node):
attrs = {}
if node.hasattr('source'):
attrs['title'] = node['source']
self.body.append(self.starttag(node, 'cite', '', **attrs))
def depart_cite(self, node):
self.body.append('</abbr>')
class HtmlDomain(Domain):
name = 'h'
label = 'HTML'
directives = {
'div': Div,
'address': Address,
}
roles = {
'mark': makerole(mark),
'ins': makerole(insert),
'del': makerole(delete),
's': makerole(strikethrough),
'u': makerole(underline),
'small': makerole(small),
'initialism': initialism,
'cite': cite_role,
'kbd': makerole(kbd),
'var': makerole(var),
'samp': makerole(samp),
}
|
GunoH/intellij-community | refs/heads/master | python/testData/completion/fStringLikeCompletionInOrdinaryStringLiterals.after.py | 20 | my_expr = 42
s = f'foo{my_expr}' |
jopohl/urh | refs/heads/master | src/urh/util/GenericCRC.py | 1 | import array
import copy
from collections import OrderedDict
from xml.etree import ElementTree as ET
from urh.cythonext import util as c_util
from urh.util import util
class GenericCRC(object):
# https://en.wikipedia.org/wiki/Polynomial_representations_of_cyclic_redundancy_checks
DEFAULT_POLYNOMIALS = OrderedDict([
# x^8 + x^7 + x^6 + x^4 + x^2 + 1
("8_standard", array.array("B", [1,
1, 1, 0, 1, 0, 1, 0, 1])),
# x^16+x^15+x^2+x^0
("16_standard", array.array("B", [1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1])),
# x^16+x^12+x^5+x^0
("16_ccitt", array.array("B", [1,
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1])),
# x^16+x^13+x^12+x^11+x^10+x^8+x^6+x^5+x^2+x^0
("16_dnp", array.array("B", [1,
0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1])),
# x^8 + x^2 + x + 1
("8_ccitt", array.array("B", [1,
0, 0, 0, 0, 0, 1, 1, 1]))
])
STANDARD_CHECKSUMS = OrderedDict([
# see method guess_standard_parameters_and_datarange for default parameters
# Links:
# - https://en.wikipedia.org/wiki/Cyclic_redundancy_check
# - http://reveng.sourceforge.net/crc-catalogue/1-15.htm
# - https://crccalc.com/
("CRC8 (default)", dict(polynomial="0xD5")),
("CRC8 CCITT", dict(polynomial="0x07")),
("CRC8 Bluetooth", dict(polynomial="0xA7", ref_in=True, ref_out=True)),
("CRC8 DARC", dict(polynomial="0x39", ref_in=True, ref_out=True)),
("CRC8 NRSC-5", dict(polynomial="0x31", start_value=1)),
("CRC16 (default)", dict(polynomial="0x8005", ref_in=True, ref_out=True)),
("CRC16 CCITT", dict(polynomial="0x1021", ref_in=True, ref_out=True)),
("CRC16 NRSC-5", dict(polynomial="0x080B", start_value=1, ref_in=True, ref_out=True)),
("CRC16 CC1101", dict(polynomial="0x8005", start_value=1)),
("CRC16 CDMA2000", dict(polynomial="0xC867", start_value=1)),
("CRC32 (default)", dict(polynomial="0x04C11DB7", start_value=1, final_xor=1, ref_in=True, ref_out=True)),
])
def __init__(self, polynomial="16_standard", start_value=False, final_xor=False, reverse_polynomial=False,
reverse_all=False, little_endian=False, lsb_first=False):
if isinstance(polynomial, str):
self.caption = polynomial
else:
self.caption = ""
self.polynomial = self.choose_polynomial(polynomial)
self.reverse_polynomial = reverse_polynomial
self.reverse_all = reverse_all
self.little_endian = little_endian
self.lsb_first = lsb_first
self.start_value = self.__read_parameter(start_value)
self.final_xor = self.__read_parameter(final_xor)
self.cache = []
self.__cache_bits = 8
def __read_parameter(self, value):
if isinstance(value, bool) or isinstance(value, int):
return array.array('B', [value] * (self.poly_order - 1))
else:
if len(value) == self.poly_order - 1:
return value
else:
return array.array('B', value[0] * (self.poly_order - 1))
def __eq__(self, other):
if not isinstance(other, GenericCRC):
return False
return all(getattr(self, attrib) == getattr(other, attrib) for attrib in (
"polynomial", "reverse_polynomial", "reverse_all", "little_endian", "lsb_first", "start_value",
"final_xor"))
def __hash__(self):
return hash((self.polynomial.tobytes(), self.reverse_polynomial, self.reverse_all, self.little_endian,
self.lsb_first, self.start_value.tobytes(), self.final_xor.tobytes()))
@property
def poly_order(self):
return len(self.polynomial)
@property
def polynomial_as_bit_str(self) -> str:
return "".join("1" if p else "0" for p in self.polynomial)
@property
def polynomial_as_hex_str(self) -> str:
return util.bit2hex(self.polynomial[1:]) # do not show leading one
@property
def polynomial_to_html(self) -> str:
result = ""
for i in range(self.poly_order):
index = self.poly_order - 1 - i
if self.polynomial[i] > 0:
if index > 1:
result += "x<sup>{0}</sup> + ".format(index)
elif index == 1:
result += "x + "
elif index == 0:
result += "1"
result = result.rstrip(" + ")
return result
def set_polynomial_from_hex(self, hex_str: str):
old = self.polynomial
self.polynomial = array.array("B", [1]) + util.hex2bit(hex_str)
if self.polynomial != old:
self.cache = []
self.__cache_bits = 8
def choose_polynomial(self, polynomial):
if isinstance(polynomial, str):
return self.DEFAULT_POLYNOMIALS[polynomial]
elif isinstance(polynomial, int):
return list(self.DEFAULT_POLYNOMIALS.items())[polynomial][1]
else:
return polynomial
def get_parameters(self):
return self.polynomial, self.start_value, self.final_xor, \
self.lsb_first, self.reverse_polynomial, self.reverse_all, self.little_endian
def crc(self, inpt):
result = c_util.crc(array.array("B", inpt),
array.array("B", self.polynomial),
array.array("B", self.start_value),
array.array("B", self.final_xor),
self.lsb_first, self.reverse_polynomial, self.reverse_all, self.little_endian)
return util.number_to_bits(result, self.poly_order - 1)
def cached_crc(self, inpt, bits=8):
if len(self.cache) == 0:
self.calculate_cache(bits)
result = c_util.cached_crc(self.cache,
self.__cache_bits,
array.array("B", inpt),
array.array("B", self.polynomial),
array.array("B", self.start_value),
array.array("B", self.final_xor),
self.lsb_first, self.reverse_polynomial, self.reverse_all, self.little_endian)
return util.number_to_bits(result, self.poly_order - 1)
def calculate_cache(self, bits=8):
if 0 < bits < self.poly_order:
self.__cache_bits = bits
else:
self.__cache_bits = 8 if self.poly_order > 8 else self.poly_order - 1
self.cache = c_util.calculate_cache(array.array("B", self.polynomial), self.reverse_polynomial,
self.__cache_bits)
def get_crc_datarange(self, inpt, vrfy_crc_start):
return c_util.get_crc_datarange(array.array("B", inpt),
array.array("B", self.polynomial),
vrfy_crc_start,
array.array("B", self.start_value),
array.array("B", self.final_xor),
self.lsb_first, self.reverse_polynomial, self.reverse_all, self.little_endian)
def reference_crc(self, inpt):
len_inpt = len(inpt)
if len(self.start_value) < self.poly_order - 1:
return False
crc = copy.copy(self.start_value[0:(self.poly_order - 1)])
for i in range(0, len_inpt + 7, 8):
for j in range(0, 8):
if self.lsb_first:
idx = i + (7 - j)
else:
idx = i + j
if idx >= len_inpt:
break
if crc[0] != inpt[idx]:
crc[0:self.poly_order - 2] = crc[1:self.poly_order - 1] # crc = crc << 1
crc[self.poly_order - 2] = False
for x in range(0, self.poly_order - 1):
if self.reverse_polynomial:
crc[x] ^= self.polynomial[self.poly_order - 1 - x]
else:
crc[x] ^= self.polynomial[x + 1]
else:
crc[0:self.poly_order - 2] = crc[1:self.poly_order - 1] # crc = crc << 1
crc[self.poly_order - 2] = False
for i in range(0, self.poly_order - 1):
if self.final_xor[i]:
crc[i] = not crc[i]
if self.reverse_all:
crc_old = []
for i in range(0, self.poly_order - 1):
crc_old.append(crc[self.poly_order - 2 - i])
crc = crc_old
if self.poly_order - 1 == 16 and self.little_endian:
self.__swap_bytes(crc, 0, 1)
elif self.poly_order - 1 == 32 and self.little_endian:
self.__swap_bytes(crc, 0, 3)
self.__swap_bytes(crc, 1, 2)
elif self.poly_order - 1 == 64 and self.little_endian:
for pos1, pos2 in [(0, 7), (1, 6), (2, 5), (3, 4)]:
self.__swap_bytes(crc, pos1, pos2)
# return crc
return array.array("B", crc)
def calculate(self, bits: array.array):
return self.crc(bits)
@staticmethod
def __swap_bytes(array, pos1: int, pos2: int):
array[pos1 * 8:pos1 * 8 + 8], array[pos2 * 8:pos2 * 8 + 8] = \
array[pos2 * 8: pos2 * 8 + 8], array[pos1 * 8:pos1 * 8 + 8]
@staticmethod
def from_standard_checksum(name: str):
result = GenericCRC()
result.set_individual_parameters(**GenericCRC.STANDARD_CHECKSUMS[name])
result.caption = name
return result
def set_individual_parameters(self, polynomial, start_value=0, final_xor=0, ref_in=False, ref_out=False,
little_endian=False, reverse_polynomial=False):
# Set polynomial from hex or bit array
old = self.polynomial
if isinstance(polynomial, str):
self.set_polynomial_from_hex(polynomial)
else:
self.polynomial = polynomial
# Clear cache if polynomial changes
if self.polynomial != old:
self.cache = []
self.__cache_bits = 8
# Set start value completely or 0000/FFFF
if isinstance(start_value, int):
self.start_value = array.array("B", [start_value] * (self.poly_order - 1))
elif isinstance(start_value, array.array) and len(start_value) == self.poly_order - 1:
self.start_value = start_value
else:
raise ValueError("Invalid start value length")
# Set final xor completely or 0000/FFFF
if isinstance(final_xor, int):
self.final_xor = array.array("B", [final_xor] * (self.poly_order - 1))
elif isinstance(final_xor, array.array) and len(final_xor) == self.poly_order - 1:
self.final_xor = final_xor
else:
raise ValueError("Invalid final xor length")
# Set boolean parameters
old_reverse = self.reverse_polynomial
self.reverse_polynomial = reverse_polynomial
if self.reverse_polynomial != old_reverse:
self.cache = []
self.__cache_bits = 8
self.reverse_all = ref_out
self.little_endian = little_endian
self.lsb_first = ref_in
def set_crc_parameters(self, i):
# Bit 0,1 = Polynomial
val = (i >> 0) & 3
old = self.polynomial
self.polynomial = self.choose_polynomial(val)
poly_order = len(self.polynomial)
if (self.polynomial != old):
self.cache = []
self.__cache_bits = 8
# Bit 2 = Start Value
val = (i >> 2) & 1
self.start_value = [val != 0] * (poly_order - 1)
# Bit 3 = Final XOR
val = (i >> 3) & 1
self.final_xor = [val != 0] * (poly_order - 1)
# Bit 4 = Reverse Polynomial
val = (i >> 4) & 1
old_reverse = self.reverse_polynomial
if val == 0:
self.reverse_polynomial = False
else:
self.reverse_polynomial = True
if (self.reverse_polynomial != old_reverse):
self.cache = []
self.__cache_bits = 8
# Bit 5 = Reverse (all) Result
val = (i >> 5) & 1
if val == 0:
self.reverse_all = False
else:
self.reverse_all = True
# Bit 6 = Little Endian
val = (i >> 6) & 1
if val == 0:
self.little_endian = False
else:
self.little_endian = True
# Bit 7 = Least Significant Bit (LSB) first
val = (i >> 7) & 1
if val == 0:
self.lsb_first = False
else:
self.lsb_first = True
@classmethod
def __initialize_standard_checksums(cls):
for name in cls.STANDARD_CHECKSUMS:
polynomial = cls.STANDARD_CHECKSUMS[name]["polynomial"]
if isinstance(polynomial, str):
polynomial = array.array("B", [1]) + util.hex2bit(polynomial)
cls.STANDARD_CHECKSUMS[name]["polynomial"] = polynomial
n = len(polynomial) - 1
try:
start_val = cls.STANDARD_CHECKSUMS[name]["start_value"]
except KeyError:
start_val = 0
if isinstance(start_val, int):
cls.STANDARD_CHECKSUMS[name]["start_value"] = array.array("B", [start_val] * n)
try:
final_xor = cls.STANDARD_CHECKSUMS[name]["final_xor"]
except KeyError:
final_xor = 0
if isinstance(final_xor, int):
cls.STANDARD_CHECKSUMS[name]["final_xor"] = array.array("B", [final_xor] * n)
def guess_all(self, bits, trash_max=7, ignore_positions: set = None):
"""
:param bits:
:param trash_max:
:param ignore_positions: columns to ignore (e.g. if already another label on them)
:return: a CRC object, data_range_start, data_range_end, crc_start, crc_end
"""
self.__initialize_standard_checksums()
ignore_positions = set() if ignore_positions is None else ignore_positions
for i in range(0, trash_max):
ret = self.guess_standard_parameters_and_datarange(bits, i)
if ret == (0, 0, 0):
continue # nothing found
crc_start, crc_end = len(bits) - i - ret[0].poly_order + 1, len(bits) - i
if not any(i in ignore_positions for i in range(crc_start, crc_end)):
return ret[0], ret[1], ret[2], crc_start, crc_end
return 0, 0, 0, 0, 0
def bruteforce_all(self, inpt, trash_max=7):
polynomial_sizes = [16, 8]
len_input = len(inpt)
for s in polynomial_sizes:
for i in range(len_input - s - trash_max, len_input - s):
ret = self.bruteforce_parameters_and_data_range(inpt, i)
if ret != (0, 0, 0):
return ret[0], ret[1], ret[2], i, i + s
return 0, 0, 0, 0, 0
def guess_standard_parameters(self, inpt, vrfy_crc):
# Tests all standard parameters and return parameter_value (else False), if a valid CRC could be computed.
# Note: vfry_crc is included inpt!
for i in range(0, 2 ** 8):
self.set_crc_parameters(i)
if len(vrfy_crc) == self.poly_order and self.crc(inpt) == vrfy_crc:
return i
return False
def guess_standard_parameters_and_datarange(self, inpt, trash):
"""
Tests standard parameters from dict and return polynomial object, if a valid CRC could be computed
and determines start and end of crc datarange (end is set before crc)
Note: vfry_crc is included inpt!
"""
# Test longer polynomials first, because smaller polynomials have higher risk of false positive
for name, parameters in sorted(self.STANDARD_CHECKSUMS.items(),
key=lambda x: len(x[1]["polynomial"]),
reverse=True):
self.caption = name
data_begin, data_end = c_util.get_crc_datarange(inpt,
parameters["polynomial"],
max(0,
len(inpt) - trash - len(parameters["polynomial"])) + 1,
parameters["start_value"],
parameters["final_xor"],
parameters.get("ref_in", False),
parameters.get("reverse_polynomial", False),
parameters.get("ref_out", False),
parameters.get("little_endian", False))
if (data_begin, data_end) != (0, 0):
self.set_individual_parameters(**parameters)
return self, data_begin, data_end
return 0, 0, 0
def bruteforce_parameters_and_data_range(self, inpt, vrfy_crc_start):
# Tests all standard parameters and return parameter_value (else False), if a valid CRC could be computed
# and determines start and end of crc datarange (end is set before crc)
# Note: vfry_crc is included inpt!
for i in range(0, 2 ** 8):
self.set_crc_parameters(i)
data_begin, data_end = self.get_crc_datarange(inpt, vrfy_crc_start)
if (data_begin, data_end) != (0, 0):
return i, data_begin, data_end
return 0, 0, 0
def reverse_engineer_polynomial(self, dataset, crcset):
# Sets must be of equal size and > 2
setlen = len(dataset)
if setlen != len(crcset) or setlen < 3:
return False
# XOR each data string with every other string and find strings that only differ in one bit
one_bitter = []
one_bitter_crc = []
for i in range(0, setlen):
for j in range(i + 1, setlen):
if len(dataset[i]) == len(dataset[j]) and len(crcset[i]) == len(crcset[j]):
count = 0
tmp = -1
for x in range(0, len(dataset[i])):
if dataset[i][x] != dataset[j][x]:
tmp = x
count += 1
if count > 1:
break
if count == 1:
one_bitter.append(tmp)
tmp_crc = []
for x in range(0, len(crcset[i])):
tmp_crc.append(crcset[i][x] ^ crcset[j][x])
one_bitter_crc.extend([tmp_crc])
# Find two CRCs from one bit sequences with position i and i+1. CRC from one bit sequence with position i+1 must have MSB=1
setlen = len(one_bitter)
for i in range(0, setlen):
for j in range(0, setlen):
if i != j and one_bitter[i] + 1 == one_bitter[j] and one_bitter_crc[j][0] == True:
# Compute Polynomial
polynomial = one_bitter_crc[i].copy()
for x in range(0, len(one_bitter_crc[i]) - 1):
polynomial[x] ^= one_bitter_crc[j][x + 1]
return polynomial
return False
def to_xml(self):
root = ET.Element("crc")
root.set("polynomial", util.convert_bits_to_string(self.polynomial, 0))
root.set("start_value", util.convert_bits_to_string(self.start_value, 0))
root.set("final_xor", util.convert_bits_to_string(self.final_xor, 0))
root.set("ref_in", str(int(self.lsb_first)))
root.set("ref_out", str(int(self.reverse_all)))
return root
@classmethod
def from_xml(cls, tag: ET.Element):
polynomial = tag.get("polynomial", "1010")
start_value = tag.get("start_value", "0000")
final_xor = tag.get("final_xor", "0000")
ref_in = bool(int(tag.get("ref_in", "0")))
ref_out = bool(int(tag.get("ref_out", "0")))
return GenericCRC(polynomial=util.string2bits(polynomial),
start_value=util.string2bits(start_value), final_xor=util.string2bits(final_xor),
lsb_first=ref_in, reverse_all=ref_out)
@staticmethod
def bit2str(inpt):
return "".join(["1" if x else "0" for x in inpt])
@staticmethod
def str2bit(inpt):
return [True if x == "1" else False for x in inpt]
@staticmethod
def int2bit(inpt):
return [True if x == "1" else False for x in '{0:08b}'.format(inpt)]
@staticmethod
def str2arr(inpt):
return array.array("B", GenericCRC.str2bit(inpt))
@staticmethod
def bit2int(inpt):
return int(GenericCRC.bit2str(inpt), 2)
@staticmethod
def hex2str(inpt):
bitstring = bin(int(inpt, base=16))[2:]
return "0" * (4 * len(inpt.lstrip('0x')) - len(bitstring)) + bitstring
|
sivel/ansible-modules-extras | refs/heads/devel | packaging/language/maven_artifact.py | 22 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Chris Schmidt <chris.schmidt () contrastsecurity.com>
#
# Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact
# as a reference and starting point.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
__author__ = 'cschmidt'
from lxml import etree
import os
import hashlib
import sys
import posixpath
import urlparse
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
try:
import boto3
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: maven_artifact
short_description: Downloads an Artifact from a Maven Repository
version_added: "2.0"
description:
- Downloads an artifact from a maven repository given the maven coordinates provided to the module. Can retrieve
- snapshots or release versions of the artifact and will resolve the latest available version if one is not
- available.
author: "Chris Schmidt (@chrisisbeef)"
requirements:
- "python >= 2.6"
- lxml
- boto if using a S3 repository (s3://...)
options:
group_id:
description:
- The Maven groupId coordinate
required: true
artifact_id:
description:
- The maven artifactId coordinate
required: true
version:
description:
- The maven version coordinate
required: false
default: latest
classifier:
description:
- The maven classifier coordinate
required: false
default: null
extension:
description:
- The maven type/extension coordinate
required: false
default: jar
repository_url:
description:
- The URL of the Maven Repository to download from.
- Use s3://... if the repository is hosted on Amazon S3, added in version 2.2.
required: false
default: http://repo1.maven.org/maven2
username:
description:
- The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3
required: false
default: null
aliases: [ "aws_secret_key" ]
password:
description:
- The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3
required: false
default: null
aliases: [ "aws_secret_access_key" ]
dest:
description:
- The path where the artifact should be written to
required: true
default: false
state:
description:
- The desired state of the artifact
required: true
default: present
choices: [present,absent]
timeout:
description:
- Specifies a timeout in seconds for the connection attempt
required: false
default: 10
version_added: "2.3"
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be set to C(no) when no other option exists.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: "1.9.3"
'''
EXAMPLES = '''
# Download the latest version of the JUnit framework artifact from Maven Central
- maven_artifact:
group_id: junit
artifact_id: junit
dest: /tmp/junit-latest.jar
# Download JUnit 4.11 from Maven Central
- maven_artifact:
group_id: junit
artifact_id: junit
version: 4.11
dest: /tmp/junit-4.11.jar
# Download an artifact from a private repository requiring authentication
- maven_artifact:
group_id: com.company
artifact_id: library-name
repository_url: 'https://repo.company.com/maven'
username: user
password: pass
dest: /tmp/library-name-latest.jar
# Download a WAR File to the Tomcat webapps directory to be deployed
- maven_artifact:
group_id: com.company
artifact_id: web-app
extension: war
repository_url: 'https://repo.company.com/maven'
dest: /var/lib/tomcat7/webapps/web-app.war
'''
class Artifact(object):
def __init__(self, group_id, artifact_id, version, classifier=None, extension='jar'):
if not group_id:
raise ValueError("group_id must be set")
if not artifact_id:
raise ValueError("artifact_id must be set")
self.group_id = group_id
self.artifact_id = artifact_id
self.version = version
self.classifier = classifier
if not extension:
self.extension = "jar"
else:
self.extension = extension
def is_snapshot(self):
return self.version and self.version.endswith("SNAPSHOT")
def path(self, with_version=True):
base = posixpath.join(self.group_id.replace(".", "/"), self.artifact_id)
if with_version and self.version:
return posixpath.join(base, self.version)
else:
return base
def _generate_filename(self):
if not self.classifier:
return self.artifact_id + "." + self.extension
else:
return self.artifact_id + "-" + self.classifier + "." + self.extension
def get_filename(self, filename=None):
if not filename:
filename = self._generate_filename()
elif os.path.isdir(filename):
filename = os.path.join(filename, self._generate_filename())
return filename
def __str__(self):
if self.classifier:
return "%s:%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.classifier, self.version)
elif self.extension != "jar":
return "%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.version)
else:
return "%s:%s:%s" % (self.group_id, self.artifact_id, self.version)
@staticmethod
def parse(input):
parts = input.split(":")
if len(parts) >= 3:
g = parts[0]
a = parts[1]
v = parts[len(parts) - 1]
t = None
c = None
if len(parts) == 4:
t = parts[2]
if len(parts) == 5:
t = parts[2]
c = parts[3]
return Artifact(g, a, v, c, t)
else:
return None
class MavenDownloader:
def __init__(self, module, base="http://repo1.maven.org/maven2"):
self.module = module
if base.endswith("/"):
base = base.rstrip("/")
self.base = base
self.user_agent = "Maven Artifact Downloader/1.0"
def _find_latest_version_available(self, artifact):
path = "/%s/maven-metadata.xml" % (artifact.path(False))
xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r))
v = xml.xpath("/metadata/versioning/versions/version[last()]/text()")
if v:
return v[0]
def find_uri_for_artifact(self, artifact):
if artifact.version == "latest":
artifact.version = self._find_latest_version_available(artifact)
if artifact.is_snapshot():
path = "/%s/maven-metadata.xml" % (artifact.path())
xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r))
timestamp = xml.xpath("/metadata/versioning/snapshot/timestamp/text()")[0]
buildNumber = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0]
for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"):
if len(snapshotArtifact.xpath("classifier/text()")) > 0 and snapshotArtifact.xpath("classifier/text()")[0] == artifact.classifier and len(snapshotArtifact.xpath("extension/text()")) > 0 and snapshotArtifact.xpath("extension/text()")[0] == artifact.extension:
return self._uri_for_artifact(artifact, snapshotArtifact.xpath("value/text()")[0])
return self._uri_for_artifact(artifact, artifact.version.replace("SNAPSHOT", timestamp + "-" + buildNumber))
return self._uri_for_artifact(artifact, artifact.version)
def _uri_for_artifact(self, artifact, version=None):
if artifact.is_snapshot() and not version:
raise ValueError("Expected uniqueversion for snapshot artifact " + str(artifact))
elif not artifact.is_snapshot():
version = artifact.version
if artifact.classifier:
return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension)
return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "." + artifact.extension)
def _request(self, url, failmsg, f):
url_to_use = url
parsed_url = urlparse(url)
if parsed_url.scheme=='s3':
parsed_url = urlparse(url)
bucket_name = parsed_url.netloc
key_name = parsed_url.path[1:]
client = boto3.client('s3',aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', ''))
url_to_use = client.generate_presigned_url('get_object',Params={'Bucket':bucket_name,'Key':key_name},ExpiresIn=10)
req_timeout = self.module.params.get('timeout')
# Hack to add parameters in the way that fetch_url expects
self.module.params['url_username'] = self.module.params.get('username', '')
self.module.params['url_password'] = self.module.params.get('password', '')
self.module.params['http_agent'] = self.module.params.get('user_agent', None)
response, info = fetch_url(self.module, url_to_use, timeout=req_timeout)
if info['status'] != 200:
raise ValueError(failmsg + " because of " + info['msg'] + "for URL " + url_to_use)
else:
return f(response)
def download(self, artifact, filename=None):
filename = artifact.get_filename(filename)
if not artifact.version or artifact.version == "latest":
artifact = Artifact(artifact.group_id, artifact.artifact_id, self._find_latest_version_available(artifact),
artifact.classifier, artifact.extension)
url = self.find_uri_for_artifact(artifact)
if not self.verify_md5(filename, url + ".md5"):
response = self._request(url, "Failed to download artifact " + str(artifact), lambda r: r)
if response:
f = open(filename, 'w')
# f.write(response.read())
self._write_chunks(response, f, report_hook=self.chunk_report)
f.close()
return True
else:
return False
else:
return True
def chunk_report(self, bytes_so_far, chunk_size, total_size):
percent = float(bytes_so_far) / total_size
percent = round(percent * 100, 2)
sys.stdout.write("Downloaded %d of %d bytes (%0.2f%%)\r" %
(bytes_so_far, total_size, percent))
if bytes_so_far >= total_size:
sys.stdout.write('\n')
def _write_chunks(self, response, file, chunk_size=8192, report_hook=None):
total_size = response.info().getheader('Content-Length').strip()
total_size = int(total_size)
bytes_so_far = 0
while 1:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not chunk:
break
file.write(chunk)
if report_hook:
report_hook(bytes_so_far, chunk_size, total_size)
return bytes_so_far
def verify_md5(self, file, remote_md5):
if not os.path.exists(file):
return False
else:
local_md5 = self._local_md5(file)
remote = self._request(remote_md5, "Failed to download MD5", lambda r: r.read())
return local_md5 == remote
def _local_md5(self, file):
md5 = hashlib.md5()
f = open(file, 'rb')
for chunk in iter(lambda: f.read(8192), ''):
md5.update(chunk)
f.close()
return md5.hexdigest()
def main():
module = AnsibleModule(
argument_spec = dict(
group_id = dict(default=None),
artifact_id = dict(default=None),
version = dict(default="latest"),
classifier = dict(default=None),
extension = dict(default='jar'),
repository_url = dict(default=None),
username = dict(default=None,aliases=['aws_secret_key']),
password = dict(default=None, no_log=True,aliases=['aws_secret_access_key']),
state = dict(default="present", choices=["present","absent"]), # TODO - Implement a "latest" state
timeout = dict(default=10, type='int'),
dest = dict(type="path", default=None),
validate_certs = dict(required=False, default=True, type='bool'),
)
)
try:
parsed_url = urlparse(module.params["repository_url"])
except AttributeError as e:
module.fail_json(msg='url parsing went wrong %s' % e)
if parsed_url.scheme=='s3' and not HAS_BOTO:
module.fail_json(msg='boto3 required for this module, when using s3:// repository URLs')
group_id = module.params["group_id"]
artifact_id = module.params["artifact_id"]
version = module.params["version"]
classifier = module.params["classifier"]
extension = module.params["extension"]
repository_url = module.params["repository_url"]
repository_username = module.params["username"]
repository_password = module.params["password"]
state = module.params["state"]
dest = module.params["dest"]
if not repository_url:
repository_url = "http://repo1.maven.org/maven2"
#downloader = MavenDownloader(module, repository_url, repository_username, repository_password)
downloader = MavenDownloader(module, repository_url)
try:
artifact = Artifact(group_id, artifact_id, version, classifier, extension)
except ValueError as e:
module.fail_json(msg=e.args[0])
prev_state = "absent"
if os.path.isdir(dest):
dest = posixpath.join(dest, artifact_id + "-" + version + "." + extension)
if os.path.lexists(dest) and downloader.verify_md5(dest, downloader.find_uri_for_artifact(artifact) + '.md5'):
prev_state = "present"
else:
path = os.path.dirname(dest)
if not os.path.exists(path):
os.makedirs(path)
if prev_state == "present":
module.exit_json(dest=dest, state=state, changed=False)
try:
if downloader.download(artifact, dest):
module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, extension=extension, repository_url=repository_url, changed=True)
else:
module.fail_json(msg="Unable to download the artifact")
except ValueError as e:
module.fail_json(msg=e.args[0])
if __name__ == '__main__':
main()
|
snasoft/QtCreatorPluginsPack | refs/heads/master | Bin/3rdParty/vera/bin/lib/distutils/command/bdist_msi.py | 164 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2005, 2006 Martin von Löwis
# Licensed to PSF under a Contributor Agreement.
# The bdist_wininst command proper
# based on bdist_wininst
"""
Implements the bdist_msi command.
"""
import sys, os
from sysconfig import get_python_version
from distutils.core import Command
from distutils.dir_util import remove_tree
from distutils.version import StrictVersion
from distutils.errors import DistutilsOptionError
from distutils import log
from distutils.util import get_platform
import msilib
from msilib import schema, sequence, text
from msilib import Directory, Feature, Dialog, add_data
class PyDialog(Dialog):
"""Dialog class with a fixed layout: controls at the top, then a ruler,
then a list of buttons: back, next, cancel. Optionally a bitmap at the
left."""
def __init__(self, *args, **kw):
"""Dialog(database, name, x, y, w, h, attributes, title, first,
default, cancel, bitmap=true)"""
Dialog.__init__(self, *args)
ruler = self.h - 36
#if kw.get("bitmap", True):
# self.bitmap("Bitmap", 0, 0, bmwidth, ruler, "PythonWin")
self.line("BottomLine", 0, ruler, self.w, 0)
def title(self, title):
"Set the title text of the dialog at the top."
# name, x, y, w, h, flags=Visible|Enabled|Transparent|NoPrefix,
# text, in VerdanaBold10
self.text("Title", 15, 10, 320, 60, 0x30003,
r"{\VerdanaBold10}%s" % title)
def back(self, title, next, name = "Back", active = 1):
"""Add a back button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 180, self.h-27 , 56, 17, flags, title, next)
def cancel(self, title, next, name = "Cancel", active = 1):
"""Add a cancel button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 304, self.h-27, 56, 17, flags, title, next)
def next(self, title, next, name = "Next", active = 1):
"""Add a Next button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 236, self.h-27, 56, 17, flags, title, next)
def xbutton(self, name, title, next, xpos):
"""Add a button with a given title, the tab-next button,
its name in the Control table, giving its x position; the
y-position is aligned with the other buttons.
Return the button, so that events can be associated"""
return self.pushbutton(name, int(self.w*xpos - 28), self.h-27, 56, 17, 3, title, next)
class bdist_msi (Command):
description = "create a Microsoft Installer (.msi) binary distribution"
user_options = [('bdist-dir=', None,
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('target-version=', None,
"require a specific python version" +
" on the target system"),
('no-target-compile', 'c',
"do not compile .py to .pyc on the target system"),
('no-target-optimize', 'o',
"do not compile .py to .pyo (optimized)"
"on the target system"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('install-script=', None,
"basename of installation script to be run after"
"installation or before deinstallation"),
('pre-install-script=', None,
"Fully qualified filename of a script to be run before "
"any files are installed. This script need not be in the "
"distribution"),
]
boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
'skip-build']
all_versions = ['2.0', '2.1', '2.2', '2.3', '2.4',
'2.5', '2.6', '2.7', '2.8', '2.9',
'3.0', '3.1', '3.2', '3.3', '3.4',
'3.5', '3.6', '3.7', '3.8', '3.9']
other_version = 'X'
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.no_target_compile = 0
self.no_target_optimize = 0
self.target_version = None
self.dist_dir = None
self.skip_build = None
self.install_script = None
self.pre_install_script = None
self.versions = None
def finalize_options (self):
self.set_undefined_options('bdist', ('skip_build', 'skip_build'))
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'msi')
short_version = get_python_version()
if (not self.target_version) and self.distribution.has_ext_modules():
self.target_version = short_version
if self.target_version:
self.versions = [self.target_version]
if not self.skip_build and self.distribution.has_ext_modules()\
and self.target_version != short_version:
raise DistutilsOptionError, \
"target version can only be %s, or the '--skip-build'" \
" option must be specified" % (short_version,)
else:
self.versions = list(self.all_versions)
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'),
)
if self.pre_install_script:
raise DistutilsOptionError, "the pre-install-script feature is not yet implemented"
if self.install_script:
for script in self.distribution.scripts:
if self.install_script == os.path.basename(script):
break
else:
raise DistutilsOptionError, \
"install_script '%s' not found in scripts" % \
self.install_script
self.install_script_key = None
# finalize_options()
def run (self):
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.prefix = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
install_lib = self.reinitialize_command('install_lib')
# we do not want to include pyc or pyo files
install_lib.compile = 0
install_lib.optimize = 0
if self.distribution.has_ext_modules():
# If we are building an installer for a Python version other
# than the one we are currently running, then we need to ensure
# our build_lib reflects the other Python version rather than ours.
# Note that for target_version!=sys.version, we must have skipped the
# build step, so there is no issue with enforcing the build of this
# version.
target_version = self.target_version
if not target_version:
assert self.skip_build, "Should have already checked this"
target_version = sys.version[0:3]
plat_specifier = ".%s-%s" % (self.plat_name, target_version)
build = self.get_finalized_command('build')
build.build_lib = os.path.join(build.build_base,
'lib' + plat_specifier)
log.info("installing to %s", self.bdist_dir)
install.ensure_finalized()
# avoid warning of 'install_lib' about installing
# into a directory not in sys.path
sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
install.run()
del sys.path[0]
self.mkpath(self.dist_dir)
fullname = self.distribution.get_fullname()
installer_name = self.get_installer_filename(fullname)
installer_name = os.path.abspath(installer_name)
if os.path.exists(installer_name): os.unlink(installer_name)
metadata = self.distribution.metadata
author = metadata.author
if not author:
author = metadata.maintainer
if not author:
author = "UNKNOWN"
version = metadata.get_version()
# ProductVersion must be strictly numeric
# XXX need to deal with prerelease versions
sversion = "%d.%d.%d" % StrictVersion(version).version
# Prefix ProductName with Python x.y, so that
# it sorts together with the other Python packages
# in Add-Remove-Programs (APR)
fullname = self.distribution.get_fullname()
if self.target_version:
product_name = "Python %s %s" % (self.target_version, fullname)
else:
product_name = "Python %s" % (fullname)
self.db = msilib.init_database(installer_name, schema,
product_name, msilib.gen_uuid(),
sversion, author)
msilib.add_tables(self.db, sequence)
props = [('DistVersion', version)]
email = metadata.author_email or metadata.maintainer_email
if email:
props.append(("ARPCONTACT", email))
if metadata.url:
props.append(("ARPURLINFOABOUT", metadata.url))
if props:
add_data(self.db, 'Property', props)
self.add_find_python()
self.add_files()
self.add_scripts()
self.add_ui()
self.db.Commit()
if hasattr(self.distribution, 'dist_files'):
tup = 'bdist_msi', self.target_version or 'any', fullname
self.distribution.dist_files.append(tup)
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
def add_files(self):
db = self.db
cab = msilib.CAB("distfiles")
rootdir = os.path.abspath(self.bdist_dir)
root = Directory(db, cab, None, rootdir, "TARGETDIR", "SourceDir")
f = Feature(db, "Python", "Python", "Everything",
0, 1, directory="TARGETDIR")
items = [(f, root, '')]
for version in self.versions + [self.other_version]:
target = "TARGETDIR" + version
name = default = "Python" + version
desc = "Everything"
if version is self.other_version:
title = "Python from another location"
level = 2
else:
title = "Python %s from registry" % version
level = 1
f = Feature(db, name, title, desc, 1, level, directory=target)
dir = Directory(db, cab, root, rootdir, target, default)
items.append((f, dir, version))
db.Commit()
seen = {}
for feature, dir, version in items:
todo = [dir]
while todo:
dir = todo.pop()
for file in os.listdir(dir.absolute):
afile = os.path.join(dir.absolute, file)
if os.path.isdir(afile):
short = "%s|%s" % (dir.make_short(file), file)
default = file + version
newdir = Directory(db, cab, dir, file, default, short)
todo.append(newdir)
else:
if not dir.component:
dir.start_component(dir.logical, feature, 0)
if afile not in seen:
key = seen[afile] = dir.add_file(file)
if file==self.install_script:
if self.install_script_key:
raise DistutilsOptionError(
"Multiple files with name %s" % file)
self.install_script_key = '[#%s]' % key
else:
key = seen[afile]
add_data(self.db, "DuplicateFile",
[(key + version, dir.component, key, None, dir.logical)])
db.Commit()
cab.commit(db)
def add_find_python(self):
"""Adds code to the installer to compute the location of Python.
Properties PYTHON.MACHINE.X.Y and PYTHON.USER.X.Y will be set from the
registry for each version of Python.
Properties TARGETDIRX.Y will be set from PYTHON.USER.X.Y if defined,
else from PYTHON.MACHINE.X.Y.
Properties PYTHONX.Y will be set to TARGETDIRX.Y\\python.exe"""
start = 402
for ver in self.versions:
install_path = r"SOFTWARE\Python\PythonCore\%s\InstallPath" % ver
machine_reg = "python.machine." + ver
user_reg = "python.user." + ver
machine_prop = "PYTHON.MACHINE." + ver
user_prop = "PYTHON.USER." + ver
machine_action = "PythonFromMachine" + ver
user_action = "PythonFromUser" + ver
exe_action = "PythonExe" + ver
target_dir_prop = "TARGETDIR" + ver
exe_prop = "PYTHON" + ver
if msilib.Win64:
# type: msidbLocatorTypeRawValue + msidbLocatorType64bit
Type = 2+16
else:
Type = 2
add_data(self.db, "RegLocator",
[(machine_reg, 2, install_path, None, Type),
(user_reg, 1, install_path, None, Type)])
add_data(self.db, "AppSearch",
[(machine_prop, machine_reg),
(user_prop, user_reg)])
add_data(self.db, "CustomAction",
[(machine_action, 51+256, target_dir_prop, "[" + machine_prop + "]"),
(user_action, 51+256, target_dir_prop, "[" + user_prop + "]"),
(exe_action, 51+256, exe_prop, "[" + target_dir_prop + "]\\python.exe"),
])
add_data(self.db, "InstallExecuteSequence",
[(machine_action, machine_prop, start),
(user_action, user_prop, start + 1),
(exe_action, None, start + 2),
])
add_data(self.db, "InstallUISequence",
[(machine_action, machine_prop, start),
(user_action, user_prop, start + 1),
(exe_action, None, start + 2),
])
add_data(self.db, "Condition",
[("Python" + ver, 0, "NOT TARGETDIR" + ver)])
start += 4
assert start < 500
def add_scripts(self):
if self.install_script:
start = 6800
for ver in self.versions + [self.other_version]:
install_action = "install_script." + ver
exe_prop = "PYTHON" + ver
add_data(self.db, "CustomAction",
[(install_action, 50, exe_prop, self.install_script_key)])
add_data(self.db, "InstallExecuteSequence",
[(install_action, "&Python%s=3" % ver, start)])
start += 1
# XXX pre-install scripts are currently refused in finalize_options()
# but if this feature is completed, it will also need to add
# entries for each version as the above code does
if self.pre_install_script:
scriptfn = os.path.join(self.bdist_dir, "preinstall.bat")
f = open(scriptfn, "w")
# The batch file will be executed with [PYTHON], so that %1
# is the path to the Python interpreter; %0 will be the path
# of the batch file.
# rem ="""
# %1 %0
# exit
# """
# <actual script>
f.write('rem ="""\n%1 %0\nexit\n"""\n')
f.write(open(self.pre_install_script).read())
f.close()
add_data(self.db, "Binary",
[("PreInstall", msilib.Binary(scriptfn))
])
add_data(self.db, "CustomAction",
[("PreInstall", 2, "PreInstall", None)
])
add_data(self.db, "InstallExecuteSequence",
[("PreInstall", "NOT Installed", 450)])
def add_ui(self):
db = self.db
x = y = 50
w = 370
h = 300
title = "[ProductName] Setup"
# see "Dialog Style Bits"
modal = 3 # visible | modal
modeless = 1 # visible
# UI customization properties
add_data(db, "Property",
# See "DefaultUIFont Property"
[("DefaultUIFont", "DlgFont8"),
# See "ErrorDialog Style Bit"
("ErrorDialog", "ErrorDlg"),
("Progress1", "Install"), # modified in maintenance type dlg
("Progress2", "installs"),
("MaintenanceForm_Action", "Repair"),
# possible values: ALL, JUSTME
("WhichUsers", "ALL")
])
# Fonts, see "TextStyle Table"
add_data(db, "TextStyle",
[("DlgFont8", "Tahoma", 9, None, 0),
("DlgFontBold8", "Tahoma", 8, None, 1), #bold
("VerdanaBold10", "Verdana", 10, None, 1),
("VerdanaRed9", "Verdana", 9, 255, 0),
])
# UI Sequences, see "InstallUISequence Table", "Using a Sequence Table"
# Numbers indicate sequence; see sequence.py for how these action integrate
add_data(db, "InstallUISequence",
[("PrepareDlg", "Not Privileged or Windows9x or Installed", 140),
("WhichUsersDlg", "Privileged and not Windows9x and not Installed", 141),
# In the user interface, assume all-users installation if privileged.
("SelectFeaturesDlg", "Not Installed", 1230),
# XXX no support for resume installations yet
#("ResumeDlg", "Installed AND (RESUME OR Preselected)", 1240),
("MaintenanceTypeDlg", "Installed AND NOT RESUME AND NOT Preselected", 1250),
("ProgressDlg", None, 1280)])
add_data(db, 'ActionText', text.ActionText)
add_data(db, 'UIText', text.UIText)
#####################################################################
# Standard dialogs: FatalError, UserExit, ExitDialog
fatal=PyDialog(db, "FatalError", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
fatal.title("[ProductName] Installer ended prematurely")
fatal.back("< Back", "Finish", active = 0)
fatal.cancel("Cancel", "Back", active = 0)
fatal.text("Description1", 15, 70, 320, 80, 0x30003,
"[ProductName] setup ended prematurely because of an error. Your system has not been modified. To install this program at a later time, please run the installation again.")
fatal.text("Description2", 15, 155, 320, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c=fatal.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Exit")
user_exit=PyDialog(db, "UserExit", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
user_exit.title("[ProductName] Installer was interrupted")
user_exit.back("< Back", "Finish", active = 0)
user_exit.cancel("Cancel", "Back", active = 0)
user_exit.text("Description1", 15, 70, 320, 80, 0x30003,
"[ProductName] setup was interrupted. Your system has not been modified. "
"To install this program at a later time, please run the installation again.")
user_exit.text("Description2", 15, 155, 320, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c = user_exit.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Exit")
exit_dialog = PyDialog(db, "ExitDialog", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
exit_dialog.title("Completing the [ProductName] Installer")
exit_dialog.back("< Back", "Finish", active = 0)
exit_dialog.cancel("Cancel", "Back", active = 0)
exit_dialog.text("Description", 15, 235, 320, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c = exit_dialog.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Return")
#####################################################################
# Required dialog: FilesInUse, ErrorDlg
inuse = PyDialog(db, "FilesInUse",
x, y, w, h,
19, # KeepModeless|Modal|Visible
title,
"Retry", "Retry", "Retry", bitmap=False)
inuse.text("Title", 15, 6, 200, 15, 0x30003,
r"{\DlgFontBold8}Files in Use")
inuse.text("Description", 20, 23, 280, 20, 0x30003,
"Some files that need to be updated are currently in use.")
inuse.text("Text", 20, 55, 330, 50, 3,
"The following applications are using files that need to be updated by this setup. Close these applications and then click Retry to continue the installation or Cancel to exit it.")
inuse.control("List", "ListBox", 20, 107, 330, 130, 7, "FileInUseProcess",
None, None, None)
c=inuse.back("Exit", "Ignore", name="Exit")
c.event("EndDialog", "Exit")
c=inuse.next("Ignore", "Retry", name="Ignore")
c.event("EndDialog", "Ignore")
c=inuse.cancel("Retry", "Exit", name="Retry")
c.event("EndDialog","Retry")
# See "Error Dialog". See "ICE20" for the required names of the controls.
error = Dialog(db, "ErrorDlg",
50, 10, 330, 101,
65543, # Error|Minimize|Modal|Visible
title,
"ErrorText", None, None)
error.text("ErrorText", 50,9,280,48,3, "")
#error.control("ErrorIcon", "Icon", 15, 9, 24, 24, 5242881, None, "py.ico", None, None)
error.pushbutton("N",120,72,81,21,3,"No",None).event("EndDialog","ErrorNo")
error.pushbutton("Y",240,72,81,21,3,"Yes",None).event("EndDialog","ErrorYes")
error.pushbutton("A",0,72,81,21,3,"Abort",None).event("EndDialog","ErrorAbort")
error.pushbutton("C",42,72,81,21,3,"Cancel",None).event("EndDialog","ErrorCancel")
error.pushbutton("I",81,72,81,21,3,"Ignore",None).event("EndDialog","ErrorIgnore")
error.pushbutton("O",159,72,81,21,3,"Ok",None).event("EndDialog","ErrorOk")
error.pushbutton("R",198,72,81,21,3,"Retry",None).event("EndDialog","ErrorRetry")
#####################################################################
# Global "Query Cancel" dialog
cancel = Dialog(db, "CancelDlg", 50, 10, 260, 85, 3, title,
"No", "No", "No")
cancel.text("Text", 48, 15, 194, 30, 3,
"Are you sure you want to cancel [ProductName] installation?")
#cancel.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None,
# "py.ico", None, None)
c=cancel.pushbutton("Yes", 72, 57, 56, 17, 3, "Yes", "No")
c.event("EndDialog", "Exit")
c=cancel.pushbutton("No", 132, 57, 56, 17, 3, "No", "Yes")
c.event("EndDialog", "Return")
#####################################################################
# Global "Wait for costing" dialog
costing = Dialog(db, "WaitForCostingDlg", 50, 10, 260, 85, modal, title,
"Return", "Return", "Return")
costing.text("Text", 48, 15, 194, 30, 3,
"Please wait while the installer finishes determining your disk space requirements.")
c = costing.pushbutton("Return", 102, 57, 56, 17, 3, "Return", None)
c.event("EndDialog", "Exit")
#####################################################################
# Preparation dialog: no user input except cancellation
prep = PyDialog(db, "PrepareDlg", x, y, w, h, modeless, title,
"Cancel", "Cancel", "Cancel")
prep.text("Description", 15, 70, 320, 40, 0x30003,
"Please wait while the Installer prepares to guide you through the installation.")
prep.title("Welcome to the [ProductName] Installer")
c=prep.text("ActionText", 15, 110, 320, 20, 0x30003, "Pondering...")
c.mapping("ActionText", "Text")
c=prep.text("ActionData", 15, 135, 320, 30, 0x30003, None)
c.mapping("ActionData", "Text")
prep.back("Back", None, active=0)
prep.next("Next", None, active=0)
c=prep.cancel("Cancel", None)
c.event("SpawnDialog", "CancelDlg")
#####################################################################
# Feature (Python directory) selection
seldlg = PyDialog(db, "SelectFeaturesDlg", x, y, w, h, modal, title,
"Next", "Next", "Cancel")
seldlg.title("Select Python Installations")
seldlg.text("Hint", 15, 30, 300, 20, 3,
"Select the Python locations where %s should be installed."
% self.distribution.get_fullname())
seldlg.back("< Back", None, active=0)
c = seldlg.next("Next >", "Cancel")
order = 1
c.event("[TARGETDIR]", "[SourceDir]", ordering=order)
for version in self.versions + [self.other_version]:
order += 1
c.event("[TARGETDIR]", "[TARGETDIR%s]" % version,
"FEATURE_SELECTED AND &Python%s=3" % version,
ordering=order)
c.event("SpawnWaitDialog", "WaitForCostingDlg", ordering=order + 1)
c.event("EndDialog", "Return", ordering=order + 2)
c = seldlg.cancel("Cancel", "Features")
c.event("SpawnDialog", "CancelDlg")
c = seldlg.control("Features", "SelectionTree", 15, 60, 300, 120, 3,
"FEATURE", None, "PathEdit", None)
c.event("[FEATURE_SELECTED]", "1")
ver = self.other_version
install_other_cond = "FEATURE_SELECTED AND &Python%s=3" % ver
dont_install_other_cond = "FEATURE_SELECTED AND &Python%s<>3" % ver
c = seldlg.text("Other", 15, 200, 300, 15, 3,
"Provide an alternate Python location")
c.condition("Enable", install_other_cond)
c.condition("Show", install_other_cond)
c.condition("Disable", dont_install_other_cond)
c.condition("Hide", dont_install_other_cond)
c = seldlg.control("PathEdit", "PathEdit", 15, 215, 300, 16, 1,
"TARGETDIR" + ver, None, "Next", None)
c.condition("Enable", install_other_cond)
c.condition("Show", install_other_cond)
c.condition("Disable", dont_install_other_cond)
c.condition("Hide", dont_install_other_cond)
#####################################################################
# Disk cost
cost = PyDialog(db, "DiskCostDlg", x, y, w, h, modal, title,
"OK", "OK", "OK", bitmap=False)
cost.text("Title", 15, 6, 200, 15, 0x30003,
"{\DlgFontBold8}Disk Space Requirements")
cost.text("Description", 20, 20, 280, 20, 0x30003,
"The disk space required for the installation of the selected features.")
cost.text("Text", 20, 53, 330, 60, 3,
"The highlighted volumes (if any) do not have enough disk space "
"available for the currently selected features. You can either "
"remove some files from the highlighted volumes, or choose to "
"install less features onto local drive(s), or select different "
"destination drive(s).")
cost.control("VolumeList", "VolumeCostList", 20, 100, 330, 150, 393223,
None, "{120}{70}{70}{70}{70}", None, None)
cost.xbutton("OK", "Ok", None, 0.5).event("EndDialog", "Return")
#####################################################################
# WhichUsers Dialog. Only available on NT, and for privileged users.
# This must be run before FindRelatedProducts, because that will
# take into account whether the previous installation was per-user
# or per-machine. We currently don't support going back to this
# dialog after "Next" was selected; to support this, we would need to
# find how to reset the ALLUSERS property, and how to re-run
# FindRelatedProducts.
# On Windows9x, the ALLUSERS property is ignored on the command line
# and in the Property table, but installer fails according to the documentation
# if a dialog attempts to set ALLUSERS.
whichusers = PyDialog(db, "WhichUsersDlg", x, y, w, h, modal, title,
"AdminInstall", "Next", "Cancel")
whichusers.title("Select whether to install [ProductName] for all users of this computer.")
# A radio group with two options: allusers, justme
g = whichusers.radiogroup("AdminInstall", 15, 60, 260, 50, 3,
"WhichUsers", "", "Next")
g.add("ALL", 0, 5, 150, 20, "Install for all users")
g.add("JUSTME", 0, 25, 150, 20, "Install just for me")
whichusers.back("Back", None, active=0)
c = whichusers.next("Next >", "Cancel")
c.event("[ALLUSERS]", "1", 'WhichUsers="ALL"', 1)
c.event("EndDialog", "Return", ordering = 2)
c = whichusers.cancel("Cancel", "AdminInstall")
c.event("SpawnDialog", "CancelDlg")
#####################################################################
# Installation Progress dialog (modeless)
progress = PyDialog(db, "ProgressDlg", x, y, w, h, modeless, title,
"Cancel", "Cancel", "Cancel", bitmap=False)
progress.text("Title", 20, 15, 200, 15, 0x30003,
"{\DlgFontBold8}[Progress1] [ProductName]")
progress.text("Text", 35, 65, 300, 30, 3,
"Please wait while the Installer [Progress2] [ProductName]. "
"This may take several minutes.")
progress.text("StatusLabel", 35, 100, 35, 20, 3, "Status:")
c=progress.text("ActionText", 70, 100, w-70, 20, 3, "Pondering...")
c.mapping("ActionText", "Text")
#c=progress.text("ActionData", 35, 140, 300, 20, 3, None)
#c.mapping("ActionData", "Text")
c=progress.control("ProgressBar", "ProgressBar", 35, 120, 300, 10, 65537,
None, "Progress done", None, None)
c.mapping("SetProgress", "Progress")
progress.back("< Back", "Next", active=False)
progress.next("Next >", "Cancel", active=False)
progress.cancel("Cancel", "Back").event("SpawnDialog", "CancelDlg")
###################################################################
# Maintenance type: repair/uninstall
maint = PyDialog(db, "MaintenanceTypeDlg", x, y, w, h, modal, title,
"Next", "Next", "Cancel")
maint.title("Welcome to the [ProductName] Setup Wizard")
maint.text("BodyText", 15, 63, 330, 42, 3,
"Select whether you want to repair or remove [ProductName].")
g=maint.radiogroup("RepairRadioGroup", 15, 108, 330, 60, 3,
"MaintenanceForm_Action", "", "Next")
#g.add("Change", 0, 0, 200, 17, "&Change [ProductName]")
g.add("Repair", 0, 18, 200, 17, "&Repair [ProductName]")
g.add("Remove", 0, 36, 200, 17, "Re&move [ProductName]")
maint.back("< Back", None, active=False)
c=maint.next("Finish", "Cancel")
# Change installation: Change progress dialog to "Change", then ask
# for feature selection
#c.event("[Progress1]", "Change", 'MaintenanceForm_Action="Change"', 1)
#c.event("[Progress2]", "changes", 'MaintenanceForm_Action="Change"', 2)
# Reinstall: Change progress dialog to "Repair", then invoke reinstall
# Also set list of reinstalled features to "ALL"
c.event("[REINSTALL]", "ALL", 'MaintenanceForm_Action="Repair"', 5)
c.event("[Progress1]", "Repairing", 'MaintenanceForm_Action="Repair"', 6)
c.event("[Progress2]", "repairs", 'MaintenanceForm_Action="Repair"', 7)
c.event("Reinstall", "ALL", 'MaintenanceForm_Action="Repair"', 8)
# Uninstall: Change progress to "Remove", then invoke uninstall
# Also set list of removed features to "ALL"
c.event("[REMOVE]", "ALL", 'MaintenanceForm_Action="Remove"', 11)
c.event("[Progress1]", "Removing", 'MaintenanceForm_Action="Remove"', 12)
c.event("[Progress2]", "removes", 'MaintenanceForm_Action="Remove"', 13)
c.event("Remove", "ALL", 'MaintenanceForm_Action="Remove"', 14)
# Close dialog when maintenance action scheduled
c.event("EndDialog", "Return", 'MaintenanceForm_Action<>"Change"', 20)
#c.event("NewDialog", "SelectFeaturesDlg", 'MaintenanceForm_Action="Change"', 21)
maint.cancel("Cancel", "RepairRadioGroup").event("SpawnDialog", "CancelDlg")
def get_installer_filename(self, fullname):
# Factored out to allow overriding in subclasses
if self.target_version:
base_name = "%s.%s-py%s.msi" % (fullname, self.plat_name,
self.target_version)
else:
base_name = "%s.%s.msi" % (fullname, self.plat_name)
installer_name = os.path.join(self.dist_dir, base_name)
return installer_name
|
Emercoin/emcweb | refs/heads/master | engine/emcweb/emcweb/backups/local/__init__.py | 1 | from .client import backup, check_auth
__name__ = 'Local PC'
|
jphber/django-avanzado | refs/heads/master | artists/migrations/0002_track_listen.py | 3 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('artists', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='track',
name='listen',
field=models.PositiveIntegerField(default=0),
preserve_default=True,
),
]
|
dmccloskey/SBaaS_MFA | refs/heads/master | SBaaS_MFA/stage02_isotopomer_analysis_io.py | 1 | # System
import json
# SBaaS
from .stage02_isotopomer_analysis_query import stage02_isotopomer_analysis_query
from SBaaS_base.sbaas_template_io import sbaas_template_io
# Resources
from io_utilities.base_importData import base_importData
from io_utilities.base_exportData import base_exportData
class stage02_isotopomer_analysis_io(stage02_isotopomer_analysis_query,
sbaas_template_io):
def import_data_stage02_isotopomer_analysis_add(self, filename):
'''table adds'''
data = base_importData();
data.read_csv(filename);
data.format_data();
self.add_data_stage02_isotopomer_analysis(data.data);
data.clear_data();
|
kimimj/scrapy | refs/heads/master | scrapy/signalmanager.py | 123 | from __future__ import absolute_import
from pydispatch import dispatcher
from scrapy.utils import signal as _signal
class SignalManager(object):
def __init__(self, sender=dispatcher.Anonymous):
self.sender = sender
def connect(self, receiver, signal, **kwargs):
"""
Connect a receiver function to a signal.
The signal can be any object, although Scrapy comes with some
predefined signals that are documented in the :ref:`topics-signals`
section.
:param receiver: the function to be connected
:type receiver: callable
:param signal: the signal to connect to
:type signal: object
"""
kwargs.setdefault('sender', self.sender)
return dispatcher.connect(receiver, signal, **kwargs)
def disconnect(self, receiver, signal, **kwargs):
"""
Disconnect a receiver function from a signal. This has the
opposite effect of the :meth:`connect` method, and the arguments
are the same.
"""
kwargs.setdefault('sender', self.sender)
return dispatcher.disconnect(receiver, signal, **kwargs)
def send_catch_log(self, signal, **kwargs):
"""
Send a signal, catch exceptions and log them.
The keyword arguments are passed to the signal handlers (connected
through the :meth:`connect` method).
"""
kwargs.setdefault('sender', self.sender)
return _signal.send_catch_log(signal, **kwargs)
def send_catch_log_deferred(self, signal, **kwargs):
"""
Like :meth:`send_catch_log` but supports returning `deferreds`_ from
signal handlers.
Returns a Deferred that gets fired once all signal handlers
deferreds were fired. Send a signal, catch exceptions and log them.
The keyword arguments are passed to the signal handlers (connected
through the :meth:`connect` method).
.. _deferreds: http://twistedmatrix.com/documents/current/core/howto/defer.html
"""
kwargs.setdefault('sender', self.sender)
return _signal.send_catch_log_deferred(signal, **kwargs)
def disconnect_all(self, signal, **kwargs):
"""
Disconnect all receivers from the given signal.
:param signal: the signal to disconnect from
:type signal: object
"""
kwargs.setdefault('sender', self.sender)
return _signal.disconnect_all(signal, **kwargs)
|
HandyCodeJob/mikeandzoey-site | refs/heads/master | src/rsvp/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
liuenyan/django-blog | refs/heads/master | blog/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
h2oai/h2o | refs/heads/master | scripts/validate_r_cmd_check_output.py | 8 | #!/usr/bin/python
#
# CRAN submissions use the R CMD CHECK --as-cran approach.
# But that unfortunately does not do a good job of flagging errors in a way that can be automated.
#
# This tool goes combs through the output and returns 0 if it's good and nonzero if it's bad.
#
import sys
import os
import re
class Check:
def __init__(self, file_name):
self.file_name = file_name
self.lineno = 0
def parse_error(self, message, s, f):
print("ERROR " + message + " " + self.file_name + " line " + str(self.lineno))
sys.stdout.write(" >>> " + s)
s = f.readline()
while (len(s) > 0):
sys.stdout.write(" >>> " + s)
s = f.readline()
sys.exit(1)
def process(self):
# print("Processing " + self.file_name + "...")
f = open(self.file_name, "r")
allowed_regex_list = [
r"^\* using log directory",
r"^\* using R version",
r"^\* using R version",
r"^\* using platform",
r"^\* using session charset",
r"^\* using option .*",
r"^\* checking .* \.\.\. OK",
r"^\* checking extension type \.\.\. Package",
r"^\* this is package",
r"^\* checking CRAN incoming feasibility \.\.\. NOTE",
r"^Maintainer:",
r"^New maintainer:",
r"^\s*Tom Kraljevic",
r"^\Days since last update: .*",
r"^Old maintainer\(s\):",
r"^\s*Anqi Fu",
r"^NOTE: There was 1 note.",
r"^The Title field starts with the package name.",
r"^The Date field is over a month old.",
r"^\n",
r"^New submission",
r"^Package was archived on CRAN",
r"^CRAN repository db overrides:",
r"^ X-CRAN-Comment: Archived on 2014-09-23 as did not comply with CRAN",
r"^ policies on use of multiple threads.",
r"^\* checking installed package size ... NOTE",
r"^ installed size is .*Mb",
r"^ sub-directories of 1Mb or more:",
r"^ java .*Mb",
r"^NOTE: There were 2 notes.",
r"^Package has FOSS license, installs .class/.jar but has no 'java' directory.",
r"^\* DONE",
r"^Checking URLs requires 'libcurl' support in the R build",
r"^Status: 2 NOTEs",
]
s = f.readline()
while (len(s) > 0):
self.lineno = self.lineno + 1
allowed = False
for regex in allowed_regex_list:
match_groups = re.search(regex, s)
if (match_groups is not None):
# This line is allowed.
allowed = True
break
if (not allowed):
self.parse_error("Illegal output found", s, f)
s = f.readline()
def main(argv):
if (not os.path.exists("h2o.Rcheck")):
print("ERROR: You must run this script inside the generated R package source directory.")
sys.exit(1)
c = Check("h2o.Rcheck/00check.log")
c.process()
# No failures.
sys.exit(0)
if __name__ == "__main__":
main(sys.argv)
|
richardfergie/googleads-python-lib | refs/heads/master | examples/dfa/v1_20/create_flash_inpage_creative.py | 4 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a "Flash InPage" creative in a given advertiser or campaign.
If no campaign is specified then the creative is created in the advertiser
provided. To get assets file names, run create_html_asset.py and
create_image_asset.py. To get a size ID, run get_size.py. To get a creative
type ID, run get_creative_type.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfa
CAMPAIGN_ID = 'INSERT_CAMPAIGN_ID_HERE'
ADVERTISER_ID = 'INSERT_ADVERTISER_ID_HERE'
CREATIVE_NAME = 'INSERT_CREATIVE_NAME_HERE'
SWF_ASSET_FILE_NAME = 'INSERT_SWF_ASSET_FILE_NAME_HERE'
IMG_ASSET_FILE_NAME = 'INSERT_IMG_ASSET_FILE_NAME_HERE'
SIZE_ID = 'INSERT_SIZE_ID_HERE'
def main(client, campaign_id, advertiser_id, creative_name, swf_asset_file_name,
img_asset_file_name, size_id):
# Initialize appropriate service.
creative_service = client.GetService(
'creative', 'v1.20', 'https://advertisersapitest.doubleclick.net')
# Construct and save flash inpage creative structure.
flash_inpage_creative = {
'name': creative_name,
'id': '0',
'advertiserId': advertiser_id,
'typeId': '24',
'sizeId': size_id,
'codeLocked': 'true',
'active': 'true',
'parentFlashAsset': {
'assetFilename': swf_asset_file_name
},
'wmode': 'opaque',
'backupImageAsset': {
'assetFilename': img_asset_file_name
},
'backupImageTargetWindow': {
'option': '_blank'
}
}
result = creative_service.saveCreative(
flash_inpage_creative, campaign_id)
# Display results.
print 'Flash inpage creative with ID \'%s\' was created.' % result['id']
if __name__ == '__main__':
# Initialize client object.
dfa_client = dfa.DfaClient.LoadFromStorage()
main(dfa_client, CAMPAIGN_ID, ADVERTISER_ID, CREATIVE_NAME,
SWF_ASSET_FILE_NAME, IMG_ASSET_FILE_NAME, SIZE_ID)
|
KrisLee/libgdx | refs/heads/master | extensions/gdx-freetype/jni/freetype-2.5.5/src/tools/docmaker/content.py | 146 | #
# content.py
#
# Parse comment blocks to build content blocks (library file).
#
# Copyright 2002, 2004, 2006-2009, 2012-2014 by
# David Turner.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
#
# This file contains routines to parse documentation comment blocks,
# building more structured objects out of them.
#
from sources import *
from utils import *
import string, re
#
# Regular expressions to detect code sequences. `Code sequences' are simply
# code fragments embedded in '{' and '}', as demonstrated in the following
# example.
#
# {
# x = y + z;
# if ( zookoo == 2 )
# {
# foobar();
# }
# }
#
# Note that the indentation of the first opening brace and the last closing
# brace must be exactly the same. The code sequence itself should have a
# larger indentation than the surrounding braces.
#
re_code_start = re.compile( r"(\s*){\s*$" )
re_code_end = re.compile( r"(\s*)}\s*$" )
#
# A regular expression to isolate identifiers from other text.
#
re_identifier = re.compile( r'((?:\w|-)*)' )
#
# We collect macro names ending in `_H' (group 1), as defined in
# `config/ftheader.h'. While outputting the object data, we use this info
# together with the object's file location (group 2) to emit the appropriate
# header file macro and its associated file name before the object itself.
#
# Example:
#
# #define FT_FREETYPE_H <freetype.h>
#
re_header_macro = re.compile( r'^#define\s{1,}(\w{1,}_H)\s{1,}<(.*)>' )
################################################################
##
## DOC CODE CLASS
##
## The `DocCode' class is used to store source code lines.
##
## `self.lines' contains a set of source code lines that will be dumped as
## HTML in a <PRE> tag.
##
## The object is filled line by line by the parser; it strips the leading
## `margin' space from each input line before storing it in `self.lines'.
##
class DocCode:
def __init__( self, margin, lines ):
self.lines = []
self.words = None
# remove margin spaces
for l in lines:
if string.strip( l[:margin] ) == "":
l = l[margin:]
self.lines.append( l )
def dump( self, prefix = "", width = 60 ):
lines = self.dump_lines( 0, width )
for l in lines:
print prefix + l
def dump_lines( self, margin = 0, width = 60 ):
result = []
for l in self.lines:
result.append( " " * margin + l )
return result
################################################################
##
## DOC PARA CLASS
##
## `Normal' text paragraphs are stored in the `DocPara' class.
##
## `self.words' contains the list of words that make up the paragraph.
##
class DocPara:
def __init__( self, lines ):
self.lines = None
self.words = []
for l in lines:
l = string.strip( l )
self.words.extend( string.split( l ) )
def dump( self, prefix = "", width = 60 ):
lines = self.dump_lines( 0, width )
for l in lines:
print prefix + l
def dump_lines( self, margin = 0, width = 60 ):
cur = "" # current line
col = 0 # current width
result = []
for word in self.words:
ln = len( word )
if col > 0:
ln = ln + 1
if col + ln > width:
result.append( " " * margin + cur )
cur = word
col = len( word )
else:
if col > 0:
cur = cur + " "
cur = cur + word
col = col + ln
if col > 0:
result.append( " " * margin + cur )
return result
################################################################
##
## DOC FIELD CLASS
##
## The `DocField' class stores a list containing either `DocPara' or
## `DocCode' objects. Each DocField object also has an optional `name'
## that is used when the object corresponds to a field or value definition.
##
class DocField:
def __init__( self, name, lines ):
self.name = name # can be `None' for normal paragraphs/sources
self.items = [] # list of items
mode_none = 0 # start parsing mode
mode_code = 1 # parsing code sequences
mode_para = 3 # parsing normal paragraph
margin = -1 # current code sequence indentation
cur_lines = []
# analyze the markup lines to check whether they contain paragraphs,
# code sequences, or fields definitions
#
start = 0
mode = mode_none
for l in lines:
# are we parsing a code sequence?
if mode == mode_code:
m = re_code_end.match( l )
if m and len( m.group( 1 ) ) <= margin:
# that's it, we finished the code sequence
code = DocCode( 0, cur_lines )
self.items.append( code )
margin = -1
cur_lines = []
mode = mode_none
else:
# otherwise continue the code sequence
cur_lines.append( l[margin:] )
else:
# start of code sequence?
m = re_code_start.match( l )
if m:
# save current lines
if cur_lines:
para = DocPara( cur_lines )
self.items.append( para )
cur_lines = []
# switch to code extraction mode
margin = len( m.group( 1 ) )
mode = mode_code
else:
if not string.split( l ) and cur_lines:
# if the line is empty, we end the current paragraph,
# if any
para = DocPara( cur_lines )
self.items.append( para )
cur_lines = []
else:
# otherwise, simply add the line to the current
# paragraph
cur_lines.append( l )
if mode == mode_code:
# unexpected end of code sequence
code = DocCode( margin, cur_lines )
self.items.append( code )
elif cur_lines:
para = DocPara( cur_lines )
self.items.append( para )
def dump( self, prefix = "" ):
if self.field:
print prefix + self.field + " ::"
prefix = prefix + "----"
first = 1
for p in self.items:
if not first:
print ""
p.dump( prefix )
first = 0
def dump_lines( self, margin = 0, width = 60 ):
result = []
nl = None
for p in self.items:
if nl:
result.append( "" )
result.extend( p.dump_lines( margin, width ) )
nl = 1
return result
#
# A regular expression to detect field definitions.
#
# Examples:
#
# foo ::
# foo.bar ::
#
re_field = re.compile( r"""
\s*
(
\w*
|
\w (\w | \.)* \w
)
\s* ::
""", re.VERBOSE )
################################################################
##
## DOC MARKUP CLASS
##
class DocMarkup:
def __init__( self, tag, lines ):
self.tag = string.lower( tag )
self.fields = []
cur_lines = []
field = None
mode = 0
for l in lines:
m = re_field.match( l )
if m:
# We detected the start of a new field definition.
# first, save the current one
if cur_lines:
f = DocField( field, cur_lines )
self.fields.append( f )
cur_lines = []
field = None
field = m.group( 1 ) # record field name
ln = len( m.group( 0 ) )
l = " " * ln + l[ln:]
cur_lines = [l]
else:
cur_lines.append( l )
if field or cur_lines:
f = DocField( field, cur_lines )
self.fields.append( f )
def get_name( self ):
try:
return self.fields[0].items[0].words[0]
except:
return None
def dump( self, margin ):
print " " * margin + "<" + self.tag + ">"
for f in self.fields:
f.dump( " " )
print " " * margin + "</" + self.tag + ">"
################################################################
##
## DOC CHAPTER CLASS
##
class DocChapter:
def __init__( self, block ):
self.block = block
self.sections = []
if block:
self.name = block.name
self.title = block.get_markup_words( "title" )
self.order = block.get_markup_words( "sections" )
else:
self.name = "Other"
self.title = string.split( "Miscellaneous" )
self.order = []
################################################################
##
## DOC SECTION CLASS
##
class DocSection:
def __init__( self, name = "Other" ):
self.name = name
self.blocks = {}
self.block_names = [] # ordered block names in section
self.defs = []
self.abstract = ""
self.description = ""
self.order = []
self.title = "ERROR"
self.chapter = None
def add_def( self, block ):
self.defs.append( block )
def add_block( self, block ):
self.block_names.append( block.name )
self.blocks[block.name] = block
def process( self ):
# look up one block that contains a valid section description
for block in self.defs:
title = block.get_markup_text( "title" )
if title:
self.title = title
self.abstract = block.get_markup_words( "abstract" )
self.description = block.get_markup_items( "description" )
self.order = block.get_markup_words_all( "order" )
return
def reorder( self ):
self.block_names = sort_order_list( self.block_names, self.order )
################################################################
##
## CONTENT PROCESSOR CLASS
##
class ContentProcessor:
def __init__( self ):
"""Initialize a block content processor."""
self.reset()
self.sections = {} # dictionary of documentation sections
self.section = None # current documentation section
self.chapters = [] # list of chapters
self.headers = {} # dictionary of header macros
def set_section( self, section_name ):
"""Set current section during parsing."""
if not section_name in self.sections:
section = DocSection( section_name )
self.sections[section_name] = section
self.section = section
else:
self.section = self.sections[section_name]
def add_chapter( self, block ):
chapter = DocChapter( block )
self.chapters.append( chapter )
def reset( self ):
"""Reset the content processor for a new block."""
self.markups = []
self.markup = None
self.markup_lines = []
def add_markup( self ):
"""Add a new markup section."""
if self.markup and self.markup_lines:
# get rid of last line of markup if it's empty
marks = self.markup_lines
if len( marks ) > 0 and not string.strip( marks[-1] ):
self.markup_lines = marks[:-1]
m = DocMarkup( self.markup, self.markup_lines )
self.markups.append( m )
self.markup = None
self.markup_lines = []
def process_content( self, content ):
"""Process a block content and return a list of DocMarkup objects
corresponding to it."""
markup = None
markup_lines = []
first = 1
for line in content:
found = None
for t in re_markup_tags:
m = t.match( line )
if m:
found = string.lower( m.group( 1 ) )
prefix = len( m.group( 0 ) )
line = " " * prefix + line[prefix:] # remove markup from line
break
# is it the start of a new markup section ?
if found:
first = 0
self.add_markup() # add current markup content
self.markup = found
if len( string.strip( line ) ) > 0:
self.markup_lines.append( line )
elif first == 0:
self.markup_lines.append( line )
self.add_markup()
return self.markups
def parse_sources( self, source_processor ):
blocks = source_processor.blocks
count = len( blocks )
for n in range( count ):
source = blocks[n]
if source.content:
# this is a documentation comment, we need to catch
# all following normal blocks in the "follow" list
#
follow = []
m = n + 1
while m < count and not blocks[m].content:
follow.append( blocks[m] )
m = m + 1
doc_block = DocBlock( source, follow, self )
def finish( self ):
# process all sections to extract their abstract, description
# and ordered list of items
#
for sec in self.sections.values():
sec.process()
# process chapters to check that all sections are correctly
# listed there
for chap in self.chapters:
for sec in chap.order:
if sec in self.sections:
section = self.sections[sec]
section.chapter = chap
section.reorder()
chap.sections.append( section )
else:
sys.stderr.write( "WARNING: chapter '" + \
chap.name + "' in " + chap.block.location() + \
" lists unknown section '" + sec + "'\n" )
# check that all sections are in a chapter
#
others = []
for sec in self.sections.values():
if not sec.chapter:
sec.reorder()
others.append( sec )
# create a new special chapter for all remaining sections
# when necessary
#
if others:
chap = DocChapter( None )
chap.sections = others
self.chapters.append( chap )
################################################################
##
## DOC BLOCK CLASS
##
class DocBlock:
def __init__( self, source, follow, processor ):
processor.reset()
self.source = source
self.code = []
self.type = "ERRTYPE"
self.name = "ERRNAME"
self.section = processor.section
self.markups = processor.process_content( source.content )
# compute block type from first markup tag
try:
self.type = self.markups[0].tag
except:
pass
# compute block name from first markup paragraph
try:
markup = self.markups[0]
para = markup.fields[0].items[0]
name = para.words[0]
m = re_identifier.match( name )
if m:
name = m.group( 1 )
self.name = name
except:
pass
if self.type == "section":
# detect new section starts
processor.set_section( self.name )
processor.section.add_def( self )
elif self.type == "chapter":
# detect new chapter
processor.add_chapter( self )
else:
processor.section.add_block( self )
# now, compute the source lines relevant to this documentation
# block. We keep normal comments in for obvious reasons (??)
source = []
for b in follow:
if b.format:
break
for l in b.lines:
# collect header macro definitions
m = re_header_macro.match( l )
if m:
processor.headers[m.group( 2 )] = m.group( 1 );
# we use "/* */" as a separator
if re_source_sep.match( l ):
break
source.append( l )
# now strip the leading and trailing empty lines from the sources
start = 0
end = len( source ) - 1
while start < end and not string.strip( source[start] ):
start = start + 1
while start < end and not string.strip( source[end] ):
end = end - 1
if start == end and not string.strip( source[start] ):
self.code = []
else:
self.code = source[start:end + 1]
def location( self ):
return self.source.location()
def get_markup( self, tag_name ):
"""Return the DocMarkup corresponding to a given tag in a block."""
for m in self.markups:
if m.tag == string.lower( tag_name ):
return m
return None
def get_markup_words( self, tag_name ):
try:
m = self.get_markup( tag_name )
return m.fields[0].items[0].words
except:
return []
def get_markup_words_all( self, tag_name ):
try:
m = self.get_markup( tag_name )
words = []
for item in m.fields[0].items:
# We honour empty lines in an `<Order>' section element by
# adding the sentinel `/empty/'. The formatter should then
# convert it to an appropriate representation in the
# `section_enter' function.
words += item.words
words.append( "/empty/" )
return words
except:
return []
def get_markup_text( self, tag_name ):
result = self.get_markup_words( tag_name )
return string.join( result )
def get_markup_items( self, tag_name ):
try:
m = self.get_markup( tag_name )
return m.fields[0].items
except:
return None
# eof
|
florian-dacosta/OCB | refs/heads/8.0 | addons/account/wizard/account_financial_report.py | 345 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class accounting_report(osv.osv_memory):
_name = "accounting.report"
_inherit = "account.common.report"
_description = "Accounting Report"
_columns = {
'enable_filter': fields.boolean('Enable Comparison'),
'account_report_id': fields.many2one('account.financial.report', 'Account Reports', required=True),
'label_filter': fields.char('Column Label', help="This label will be displayed on report to show the balance computed for the given comparison filter."),
'fiscalyear_id_cmp': fields.many2one('account.fiscalyear', 'Fiscal Year', help='Keep empty for all open fiscal year'),
'filter_cmp': fields.selection([('filter_no', 'No Filters'), ('filter_date', 'Date'), ('filter_period', 'Periods')], "Filter by", required=True),
'period_from_cmp': fields.many2one('account.period', 'Start Period'),
'period_to_cmp': fields.many2one('account.period', 'End Period'),
'date_from_cmp': fields.date("Start Date"),
'date_to_cmp': fields.date("End Date"),
'debit_credit': fields.boolean('Display Debit/Credit Columns', help="This option allows you to get more details about the way your balances are computed. Because it is space consuming, we do not allow to use it while doing a comparison."),
}
def _get_account_report(self, cr, uid, context=None):
# TODO deprecate this it doesnt work in web
menu_obj = self.pool.get('ir.ui.menu')
report_obj = self.pool.get('account.financial.report')
report_ids = []
if context.get('active_id'):
menu = menu_obj.browse(cr, uid, context.get('active_id')).name
report_ids = report_obj.search(cr, uid, [('name','ilike',menu)])
return report_ids and report_ids[0] or False
_defaults = {
'filter_cmp': 'filter_no',
'target_move': 'posted',
'account_report_id': _get_account_report,
}
def _build_comparison_context(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
result = {}
result['fiscalyear'] = 'fiscalyear_id_cmp' in data['form'] and data['form']['fiscalyear_id_cmp'] or False
result['journal_ids'] = 'journal_ids' in data['form'] and data['form']['journal_ids'] or False
result['chart_account_id'] = 'chart_account_id' in data['form'] and data['form']['chart_account_id'] or False
result['state'] = 'target_move' in data['form'] and data['form']['target_move'] or ''
if data['form']['filter_cmp'] == 'filter_date':
result['date_from'] = data['form']['date_from_cmp']
result['date_to'] = data['form']['date_to_cmp']
elif data['form']['filter_cmp'] == 'filter_period':
if not data['form']['period_from_cmp'] or not data['form']['period_to_cmp']:
raise osv.except_osv(_('Error!'),_('Select a starting and an ending period'))
result['period_from'] = data['form']['period_from_cmp']
result['period_to'] = data['form']['period_to_cmp']
return result
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
res = super(accounting_report, self).check_report(cr, uid, ids, context=context)
data = {}
data['form'] = self.read(cr, uid, ids, ['account_report_id', 'date_from_cmp', 'date_to_cmp', 'fiscalyear_id_cmp', 'journal_ids', 'period_from_cmp', 'period_to_cmp', 'filter_cmp', 'chart_account_id', 'target_move'], context=context)[0]
for field in ['fiscalyear_id_cmp', 'chart_account_id', 'period_from_cmp', 'period_to_cmp', 'account_report_id']:
if isinstance(data['form'][field], tuple):
data['form'][field] = data['form'][field][0]
comparison_context = self._build_comparison_context(cr, uid, ids, data, context=context)
res['data']['form']['comparison_context'] = comparison_context
return res
def _print_report(self, cr, uid, ids, data, context=None):
data['form'].update(self.read(cr, uid, ids, ['date_from_cmp', 'debit_credit', 'date_to_cmp', 'fiscalyear_id_cmp', 'period_from_cmp', 'period_to_cmp', 'filter_cmp', 'account_report_id', 'enable_filter', 'label_filter','target_move'], context=context)[0])
return self.pool['report'].get_action(cr, uid, [], 'account.report_financial', data=data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
kalaidin/luigi | refs/heads/master | dummy_test_module/not_imported.py | 66 | import luigi
class UnimportedTask(luigi.Task):
def complete(self):
return False
|
kennjason/ursula | refs/heads/master | library/rabbitmq_policy.py | 16 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, John Dewey <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: rabbitmq_policy
short_description: Manage the state of policies in RabbitMQ.
description:
- Manage the state of a virtual host in RabbitMQ.
version_added: "1.5"
author: John Dewey
options:
name:
description:
- The name of the policy to manage.
required: true
default: null
vhost:
description:
- The name of the vhost to apply to.
required: false
default: /
pattern:
description:
- A regex of queues to apply the policy to.
required: true
default: null
tags:
description:
- A dict or string describing the policy.
required: true
default: null
priority:
description:
- The priority of the policy.
required: false
default: 0
node:
description:
- Erlang node name of the rabbit we wish to configure.
required: false
default: rabbit
state:
description:
- The state of the policy.
default: present
choices: [present, absent]
'''
EXAMPLES = '''
- name: ensure the default vhost contains the HA policy via a dict
rabbitmq_policy: name=HA pattern='.*'
args:
tags:
"ha-mode": all
- name: ensure the default vhost contains the HA policy
rabbitmq_policy: name=HA pattern='.*' tags="ha-mode=all"
'''
class RabbitMqPolicy(object):
def __init__(self, module, name):
self._module = module
self._name = name
self._vhost = module.params['vhost']
self._pattern = module.params['pattern']
self._tags = module.params['tags']
self._priority = module.params['priority']
self._node = module.params['node']
self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True)
def _exec(self, args, run_in_check_mode=False):
if not self._module.check_mode or (self._module.check_mode and run_in_check_mode):
cmd = [self._rabbitmqctl, '-q', '-n', self._node]
args.insert(1, '-p')
args.insert(2, self._vhost)
rc, out, err = self._module.run_command(cmd + args, check_rc=True)
return out.splitlines()
return list()
def list(self):
policies = self._exec(['list_policies'], True)
for policy in policies:
policy_name = policy.split('\t')[1]
if policy_name == self._name:
return True
return False
def set(self):
import json
args = ['set_policy']
args.append(self._name)
args.append(self._pattern)
args.append(json.dumps(self._tags))
args.append('--priority')
args.append(self._priority)
return self._exec(args)
def clear(self):
return self._exec(['clear_policy', self._name])
def main():
arg_spec = dict(
name = dict(required=True),
vhost = dict(default='/'),
pattern = dict(required=True),
tags = dict(type='dict', required=True),
priority = dict(default='0'),
node = dict(default='rabbit'),
state = dict(default='present', choices=['present', 'absent']),
)
module = AnsibleModule(
argument_spec = arg_spec,
supports_check_mode = True
)
name = module.params['name']
state = module.params['state']
rabbitmq_policy = RabbitMqPolicy(module, name)
changed = False
if rabbitmq_policy.list():
if state == 'absent':
rabbitmq_policy.clear()
changed = True
elif state == 'present':
rabbitmq_policy.set()
changed = True
module.exit_json(changed=changed, name=name, state=state)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
|
alex/readthedocs.org | refs/heads/master | readthedocs/projects/urls/public.py | 1 | from django.conf.urls.defaults import *
urlpatterns = patterns('projects.views.public',
url(r'^$',
'project_index',
name='projects_list'
),
url(r'^tags/$',
'tag_index',
name='projects_tag_list',
),
url(r'^search/$',
'search',
name='project_search',
),
url(r'^search/autocomplete/$',
'search_autocomplete',
name='search_autocomplete',
),
url(r'^tags/(?P<tag>[-\w]+)/$',
'project_index',
name='projects_tag_detail',
),
url(r'^(?P<project_slug>[-\w]+)/$',
'project_detail',
name='projects_detail'
),
url(r'^(?P<username>\w+)/(?P<project_slug>[-\w]+)/$',
'legacy_project_detail',
name='legacy_projects_detail'
),
url(r'^(?P<username>\w+)/$',
'project_index',
name='projects_user_list'
),
)
urlpatterns += patterns('',
url(r'^(?P<username>\w+)/(?P<project_slug>[-\w]+)/docs/(?P<filename>.*)$',
'core.views.legacy_serve_docs',
),
)
|
fengshao0907/fbthrift | refs/heads/master | thrift/test/copy_files.py | 16 | #!/usr/local/bin/python
import os
import shutil
import string
import sys
if __name__ == "__main__":
dir = "."
for arg in sys.argv:
pair = string.split(arg, '=')
if len(pair) == 2 and pair[0] == "--install_dir":
dir = pair[1]
if not os.path.exists(dir):
os.makedirs(dir)
shutil.copy("EnumTest.thrift", dir + "/EnumTestStrict.thrift")
|
cobrab11/blacksmith-2 | refs/heads/master | expansions/exp_control/insc.py | 3 | # coding: utf-8
if DefLANG in ("RU", "UA"):
AnsBase_temp = tuple([line.decode("utf-8") for line in (
"\n[Название][состояние][файл кода][языковой файл]", # 0
"в наличии", # 1
"отсутствует", # 2
"загружен", # 3
"\nКоманды: %s", # 4
"\nФункции: %s", # 5
"не загружен", # 6
"Судя по всему нет такого плагина.", # 7
"\n[№][Название][файл кода][файл языка]", # 8
"\n\n## Незагруженные (%d) →\n\n%s", # 9
"%s - успешно загружен!", # 10
"Не могу загрузить - %s!%s", # 11
"Какой-то гад удалил файл с кодом!", # 12
"\n\n** Загрузил последнюю рабочую версию плагина.", # 13
"Этой функции нет в списке зарегистрированных.\n## Список зареганных: %s", # 14
"В %s нет зарегистрированных функций.", # 15
"Команда «%s» включена.", # 16
"Команда «%s» отключена.", # 17
"Нет отключенных команд." # 18
)])
else:
AnsBase_temp = (
"\n[Name][state][code-file][lang-file]", # 0
"exists", # 1
"not exists", # 2
"loaded", # 3
"\nCommands: %s", # 4
"\nHandlers: %s", # 5
"not loaded", # 6
"Also, this expansion isn't exist.", # 7
"\n[#][Name][code-file][lang-file]", # 8
"\n\n## Not loaded (%d) ->\n\n%s", # 9
"%s - successfully loaded!", # 10
"Can't load - %s!%s", # 11
"Somebody deleted code-file!", # 12
"\n\n** The last valid version of the expansion was loaded.", # 13
"This function isn't registered.\n## Functions-list: %s", # 14
"There is no registered functions in %s", # 15
"Command '%s' is on.", # 16
"Command '%s' is off.", # 17
"No disabled commands." # 18
) |
Gitlab11/odoo | refs/heads/8.0 | addons/website_event/__init__.py | 1577 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers
import models
|
CUCWD/edx-platform | refs/heads/master | common/djangoapps/xblock_django/migrations/0002_auto_20160204_0809.py | 60 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('xblock_django', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='xblockdisableconfig',
name='disabled_create_blocks',
field=models.TextField(default=b'', help_text='Space-separated list of XBlock types whose creation to disable in Studio.', blank=True),
),
]
|
Medigate/cutiuta-server | refs/heads/master | cutiuta-server/env/lib/python3.4/site-packages/_markerlib/__init__.py | 1008 | try:
import ast
from _markerlib.markers import default_environment, compile, interpret
except ImportError:
if 'ast' in globals():
raise
def default_environment():
return {}
def compile(marker):
def marker_fn(environment=None, override=None):
# 'empty markers are True' heuristic won't install extra deps.
return not marker.strip()
marker_fn.__doc__ = marker
return marker_fn
def interpret(marker, environment=None, override=None):
return compile(marker)()
|
adelton/django | refs/heads/master | tests/postgres_tests/test_aggregates.py | 307 | from django.contrib.postgres.aggregates import (
ArrayAgg, BitAnd, BitOr, BoolAnd, BoolOr, Corr, CovarPop, RegrAvgX,
RegrAvgY, RegrCount, RegrIntercept, RegrR2, RegrSlope, RegrSXX, RegrSXY,
RegrSYY, StatAggregate, StringAgg,
)
from django.db.models.expressions import F, Value
from django.test.utils import Approximate
from . import PostgreSQLTestCase
from .models import AggregateTestModel, StatTestModel
class TestGeneralAggregate(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
AggregateTestModel.objects.create(boolean_field=True, char_field='Foo1', integer_field=0)
AggregateTestModel.objects.create(boolean_field=False, char_field='Foo2', integer_field=1)
AggregateTestModel.objects.create(boolean_field=False, char_field='Foo3', integer_field=2)
AggregateTestModel.objects.create(boolean_field=True, char_field='Foo4', integer_field=0)
def test_array_agg_charfield(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field'))
self.assertEqual(values, {'arrayagg': ['Foo1', 'Foo2', 'Foo3', 'Foo4']})
def test_array_agg_integerfield(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('integer_field'))
self.assertEqual(values, {'arrayagg': [0, 1, 2, 0]})
def test_array_agg_booleanfield(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('boolean_field'))
self.assertEqual(values, {'arrayagg': [True, False, False, True]})
def test_array_agg_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field'))
self.assertEqual(values, {'arrayagg': []})
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('integer_field'))
self.assertEqual(values, {'arrayagg': []})
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('boolean_field'))
self.assertEqual(values, {'arrayagg': []})
def test_bit_and_general(self):
values = AggregateTestModel.objects.filter(
integer_field__in=[0, 1]).aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': 0})
def test_bit_and_on_only_true_values(self):
values = AggregateTestModel.objects.filter(
integer_field=1).aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': 1})
def test_bit_and_on_only_false_values(self):
values = AggregateTestModel.objects.filter(
integer_field=0).aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': 0})
def test_bit_and_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': None})
def test_bit_or_general(self):
values = AggregateTestModel.objects.filter(
integer_field__in=[0, 1]).aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': 1})
def test_bit_or_on_only_true_values(self):
values = AggregateTestModel.objects.filter(
integer_field=1).aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': 1})
def test_bit_or_on_only_false_values(self):
values = AggregateTestModel.objects.filter(
integer_field=0).aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': 0})
def test_bit_or_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': None})
def test_bool_and_general(self):
values = AggregateTestModel.objects.aggregate(booland=BoolAnd('boolean_field'))
self.assertEqual(values, {'booland': False})
def test_bool_and_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(booland=BoolAnd('boolean_field'))
self.assertEqual(values, {'booland': None})
def test_bool_or_general(self):
values = AggregateTestModel.objects.aggregate(boolor=BoolOr('boolean_field'))
self.assertEqual(values, {'boolor': True})
def test_bool_or_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(boolor=BoolOr('boolean_field'))
self.assertEqual(values, {'boolor': None})
def test_string_agg_requires_delimiter(self):
with self.assertRaises(TypeError):
AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field'))
def test_string_agg_charfield(self):
values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=';'))
self.assertEqual(values, {'stringagg': 'Foo1;Foo2;Foo3;Foo4'})
def test_string_agg_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=';'))
self.assertEqual(values, {'stringagg': ''})
class TestStatisticsAggregate(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
StatTestModel.objects.create(
int1=1,
int2=3,
related_field=AggregateTestModel.objects.create(integer_field=0),
)
StatTestModel.objects.create(
int1=2,
int2=2,
related_field=AggregateTestModel.objects.create(integer_field=1),
)
StatTestModel.objects.create(
int1=3,
int2=1,
related_field=AggregateTestModel.objects.create(integer_field=2),
)
# Tests for base class (StatAggregate)
def test_missing_arguments_raises_exception(self):
with self.assertRaisesMessage(ValueError, 'Both y and x must be provided.'):
StatAggregate(x=None, y=None)
def test_correct_source_expressions(self):
func = StatAggregate(x='test', y=13)
self.assertIsInstance(func.source_expressions[0], Value)
self.assertIsInstance(func.source_expressions[1], F)
def test_alias_is_required(self):
class SomeFunc(StatAggregate):
function = 'TEST'
with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'):
StatTestModel.objects.aggregate(SomeFunc(y='int2', x='int1'))
# Test aggregates
def test_corr_general(self):
values = StatTestModel.objects.aggregate(corr=Corr(y='int2', x='int1'))
self.assertEqual(values, {'corr': -1.0})
def test_corr_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(corr=Corr(y='int2', x='int1'))
self.assertEqual(values, {'corr': None})
def test_covar_pop_general(self):
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1'))
self.assertEqual(values, {'covarpop': Approximate(-0.66, places=1)})
def test_covar_pop_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1'))
self.assertEqual(values, {'covarpop': None})
def test_covar_pop_sample(self):
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1', sample=True))
self.assertEqual(values, {'covarpop': -1.0})
def test_covar_pop_sample_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1', sample=True))
self.assertEqual(values, {'covarpop': None})
def test_regr_avgx_general(self):
values = StatTestModel.objects.aggregate(regravgx=RegrAvgX(y='int2', x='int1'))
self.assertEqual(values, {'regravgx': 2.0})
def test_regr_avgx_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regravgx=RegrAvgX(y='int2', x='int1'))
self.assertEqual(values, {'regravgx': None})
def test_regr_avgy_general(self):
values = StatTestModel.objects.aggregate(regravgy=RegrAvgY(y='int2', x='int1'))
self.assertEqual(values, {'regravgy': 2.0})
def test_regr_avgy_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regravgy=RegrAvgY(y='int2', x='int1'))
self.assertEqual(values, {'regravgy': None})
def test_regr_count_general(self):
values = StatTestModel.objects.aggregate(regrcount=RegrCount(y='int2', x='int1'))
self.assertEqual(values, {'regrcount': 3})
def test_regr_count_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrcount=RegrCount(y='int2', x='int1'))
self.assertEqual(values, {'regrcount': 0})
def test_regr_intercept_general(self):
values = StatTestModel.objects.aggregate(regrintercept=RegrIntercept(y='int2', x='int1'))
self.assertEqual(values, {'regrintercept': 4})
def test_regr_intercept_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrintercept=RegrIntercept(y='int2', x='int1'))
self.assertEqual(values, {'regrintercept': None})
def test_regr_r2_general(self):
values = StatTestModel.objects.aggregate(regrr2=RegrR2(y='int2', x='int1'))
self.assertEqual(values, {'regrr2': 1})
def test_regr_r2_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrr2=RegrR2(y='int2', x='int1'))
self.assertEqual(values, {'regrr2': None})
def test_regr_slope_general(self):
values = StatTestModel.objects.aggregate(regrslope=RegrSlope(y='int2', x='int1'))
self.assertEqual(values, {'regrslope': -1})
def test_regr_slope_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrslope=RegrSlope(y='int2', x='int1'))
self.assertEqual(values, {'regrslope': None})
def test_regr_sxx_general(self):
values = StatTestModel.objects.aggregate(regrsxx=RegrSXX(y='int2', x='int1'))
self.assertEqual(values, {'regrsxx': 2.0})
def test_regr_sxx_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrsxx=RegrSXX(y='int2', x='int1'))
self.assertEqual(values, {'regrsxx': None})
def test_regr_sxy_general(self):
values = StatTestModel.objects.aggregate(regrsxy=RegrSXY(y='int2', x='int1'))
self.assertEqual(values, {'regrsxy': -2.0})
def test_regr_sxy_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrsxy=RegrSXY(y='int2', x='int1'))
self.assertEqual(values, {'regrsxy': None})
def test_regr_syy_general(self):
values = StatTestModel.objects.aggregate(regrsyy=RegrSYY(y='int2', x='int1'))
self.assertEqual(values, {'regrsyy': 2.0})
def test_regr_syy_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrsyy=RegrSYY(y='int2', x='int1'))
self.assertEqual(values, {'regrsyy': None})
def test_regr_avgx_with_related_obj_and_number_as_argument(self):
"""
This is more complex test to check if JOIN on field and
number as argument works as expected.
"""
values = StatTestModel.objects.aggregate(complex_regravgx=RegrAvgX(y=5, x='related_field__integer_field'))
self.assertEqual(values, {'complex_regravgx': 1.0})
|
gghatano/scipy_2015_sklearn_tutorial | refs/heads/master | notebooks/figures/plot_digits_datasets.py | 19 | # Taken from example in scikit-learn examples
# Authors: Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
def digits_plot():
digits = datasets.load_digits(n_class=6)
n_digits = 500
X = digits.data[:n_digits]
y = digits.target[:n_digits]
n_samples, n_features = X.shape
n_neighbors = 30
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(X.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 1e5:
# don't show points that are too close
# set a high threshold to basically turn this off
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
n_img_per_row = 10
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
print("Computing PCA projection")
pca = decomposition.PCA(n_components=2).fit(X)
X_pca = pca.transform(X)
plot_embedding(X_pca, "Principal Components projection of the digits")
plt.figure()
plt.matshow(pca.components_[0, :].reshape(8, 8), cmap="gray")
plt.axis('off')
plt.figure()
plt.matshow(pca.components_[1, :].reshape(8, 8), cmap="gray")
plt.axis('off')
plt.show()
|
glneo/gnuradio | refs/heads/master | gr-wxgui/python/wxgui/plotter/common.py | 47 | #
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import threading
import time
import math
import wx
##################################################
# Number formatting
##################################################
def get_exp(num):
"""
Get the exponent of the number in base 10.
Args:
num: the floating point number
Returns:
the exponent as an integer
"""
if num == 0: return 0
return int(math.floor(math.log10(abs(num))))
def get_si_components(num):
"""
Get the SI units for the number.
Extract the coeff and exponent of the number.
The exponent will be a multiple of 3.
Args:
num: the floating point number
Returns:
the tuple coeff, exp, prefix
"""
num = float(num)
exp = get_exp(num)
exp -= exp%3
exp = min(max(exp, -24), 24) #bounds on SI table below
prefix = {
24: 'Y', 21: 'Z',
18: 'E', 15: 'P',
12: 'T', 9: 'G',
6: 'M', 3: 'k',
0: '',
-3: 'm', -6: 'u',
-9: 'n', -12: 'p',
-15: 'f', -18: 'a',
-21: 'z', -24: 'y',
}[exp]
coeff = num/10**exp
return coeff, exp, prefix
def sci_format(num):
"""
Format a floating point number into scientific notation.
Args:
num: the number to format
Returns:
a label string
"""
coeff, exp, prefix = get_si_components(num)
if -3 <= exp < 3: return '%g'%num
return '%.3ge%d'%(coeff, exp)
def eng_format(num, units=''):
"""
Format a floating point number into engineering notation.
Args:
num: the number to format
units: the units to append
Returns:
a label string
"""
coeff, exp, prefix = get_si_components(num)
if -3 <= exp < 3: return '%g'%num
return '%g%s%s%s'%(coeff, units and ' ' or '', prefix, units)
##################################################
# Interface with thread safe lock/unlock
##################################################
class mutex(object):
_lock = threading.Lock()
def lock(self): self._lock.acquire()
def unlock(self): self._lock.release()
##################################################
# Periodic update thread for point label
##################################################
class point_label_thread(threading.Thread, mutex):
def __init__(self, plotter):
self._plotter = plotter
self._coor_queue = list()
#bind plotter mouse events
self._plotter.Bind(wx.EVT_MOTION, lambda evt: self.enqueue(evt.GetPosition()))
self._plotter.Bind(wx.EVT_LEAVE_WINDOW, lambda evt: self.enqueue(None))
self._plotter.Bind(wx.EVT_RIGHT_DOWN, lambda evt: plotter.enable_point_label(not plotter.enable_point_label()))
self._plotter.Bind(wx.EVT_LEFT_DOWN, lambda evt: plotter.call_freq_callback(evt.GetPosition()))
#start the thread
threading.Thread.__init__(self)
self.start()
def enqueue(self, coor):
self.lock()
self._coor_queue.append(coor)
self.unlock()
def run(self):
last_ts = time.time()
last_coor = coor = None
try:
while True:
time.sleep(1.0/30.0)
self.lock()
#get most recent coor change
if self._coor_queue:
coor = self._coor_queue[-1]
self._coor_queue = list()
self.unlock()
#update if coor change, or enough time expired
if last_coor != coor or (time.time() - last_ts) > (1.0/2.0):
self._plotter.set_point_label_coordinate(coor)
last_coor = coor
last_ts = time.time()
except wx.PyDeadObjectError: pass
|
skmezanul/seahub | refs/heads/master | seahub/utils/repo.py | 1 | # -*- coding: utf-8 -*-
from seaserv import seafile_api
from seahub.utils import EMPTY_SHA1
def list_dir_by_path(cmmt, path):
if cmmt.root_id == EMPTY_SHA1:
return []
else:
return seafile_api.list_dir_by_commit_and_path(cmmt.repo_id, cmmt.id, path)
|
zonca/pelican-plugins | refs/heads/master | pdf/__init__.py | 77 | from .pdf import *
|
cotsog/sample-code | refs/heads/master | sample-code/examples/python/selendroid_simple.py | 36 | import os
from time import sleep
import unittest
from appium import webdriver
# Returns abs path relative to this file and not cwd
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
# think times can be useful e.g. when testing with an emulator
THINK_TIME = 5.
class SimpleSalendroidTests(unittest.TestCase):
def setUp(self):
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '4.1'
desired_caps['deviceName'] = 'Android Emulator'
desired_caps['automationName'] = "selendroid"
desired_caps['app'] = PATH(
'../../../sample-code/apps/ApiDemos/bin/ApiDemos-debug.apk'
)
self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
def tearDown(self):
# end the session
self.driver.quit()
def test_selendroid(self):
el = self.driver.find_element_by_name("Animation")
# assert el.text == "Animation"
self.assertEqual('Animation', el.text)
el = self.driver.find_element_by_class_name("android.widget.TextView")
# assert el.text == "Accessibility"
self.assertEqual('Accessibility', el.text)
el = self.driver.find_element_by_name("App")
el.click()
sleep(THINK_TIME)
els = self.driver.find_elements_by_class_name("android.widget.TextView")
# Selendroid gets all the elements, not just the visible ones
self.assertLessEqual(30, len(els))
self.driver.find_element_by_name('Action Bar')
self.driver.back()
sleep(THINK_TIME)
el = self.driver.find_element_by_name("Animation")
self.assertEqual('Animation', el.text)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(SimpleSalendroidTests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
asiersarasua/QGIS | refs/heads/master | python/plugins/processing/algs/grass7/ext/v_sample.py | 8 | # -*- coding: utf-8 -*-
"""
***************************************************************************
v_sample.py
-----------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
def processInputs(alg, parameters, context, feedback):
if 'input' in alg.exportedLayers:
return
# We need to import the vector with v.in.ogr
# and we can use r.external for the raster
alg.loadVectorLayerFromParameter('input', parameters, context, feedback, False)
alg.loadRasterLayerFromParameter('raster', parameters, context, True)
alg.postInputs()
|
katevontaine/myblog2 | refs/heads/master | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/tools/graphviz.py | 2679 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Using the JSON dumped by the dump-dependency-json generator,
generate input suitable for graphviz to render a dependency graph of
targets."""
import collections
import json
import sys
def ParseTarget(target):
target, _, suffix = target.partition('#')
filename, _, target = target.partition(':')
return filename, target, suffix
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
file = open('dump.json')
edges = json.load(file)
file.close()
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
def WriteGraph(edges):
"""Print a graphviz graph to stdout.
|edges| is a map of target to a list of other targets it depends on."""
# Bucket targets by file.
files = collections.defaultdict(list)
for src, dst in edges.items():
build_file, target_name, toolset = ParseTarget(src)
files[build_file].append(src)
print 'digraph D {'
print ' fontsize=8' # Used by subgraphs.
print ' node [fontsize=8]'
# Output nodes by file. We must first write out each node within
# its file grouping before writing out any edges that may refer
# to those nodes.
for filename, targets in files.items():
if len(targets) == 1:
# If there's only one node for this file, simplify
# the display by making it a box without an internal node.
target = targets[0]
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
target_name)
else:
# Group multiple nodes together in a subgraph.
print ' subgraph "cluster_%s" {' % filename
print ' label = "%s"' % filename
for target in targets:
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [label="%s"]' % (target, target_name)
print ' }'
# Now that we've placed all the nodes within subgraphs, output all
# the edges between nodes.
for src, dsts in edges.items():
for dst in dsts:
print ' "%s" -> "%s"' % (src, dst)
print '}'
def main():
if len(sys.argv) < 2:
print >>sys.stderr, __doc__
print >>sys.stderr
print >>sys.stderr, 'usage: %s target1 target2...' % (sys.argv[0])
return 1
edges = LoadEdges('dump.json', sys.argv[1:])
WriteGraph(edges)
return 0
if __name__ == '__main__':
sys.exit(main())
|
aanwin/luno | refs/heads/master | frontend-auth/models/database.py | 1 | from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine('postgresql://luno:[email protected]/luno', convert_unicode=True)
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
def init_db():
# import all modules here that might define models so that
# they will be registered properly on the metadata. Otherwise
# you will have to import them first before calling init_db()
import appmodels
Base.metadata.create_all(bind=engine)
|
mm1ke/portage | refs/heads/master | pym/portage/cache/flat_hash.py | 4 | # Copyright 2005-2016 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# Author(s): Brian Harring ([email protected])
from __future__ import unicode_literals
from portage.cache import fs_template
from portage.cache import cache_errors
import errno
import io
import stat
import sys
import tempfile
import os as _os
from portage import os
from portage import _encodings
from portage import _unicode_encode
from portage.exception import InvalidData
from portage.versions import _pkg_str
if sys.hexversion >= 0x3000000:
# pylint: disable=W0622
long = int
class database(fs_template.FsBased):
autocommits = True
def __init__(self, *args, **config):
super(database,self).__init__(*args, **config)
self.location = os.path.join(self.location,
self.label.lstrip(os.path.sep).rstrip(os.path.sep))
write_keys = set(self._known_keys)
write_keys.add("_eclasses_")
write_keys.add("_%s_" % (self.validation_chf,))
self._write_keys = sorted(write_keys)
if not self.readonly and not os.path.exists(self.location):
self._ensure_dirs()
def _getitem(self, cpv):
# Don't use os.path.join, for better performance.
fp = self.location + _os.sep + cpv
try:
with io.open(_unicode_encode(fp,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
errors='replace') as myf:
lines = myf.read().split("\n")
if not lines[-1]:
lines.pop()
d = self._parse_data(lines, cpv)
if '_mtime_' not in d:
# Backward compatibility with old cache
# that uses mtime mangling.
d['_mtime_'] = _os.fstat(myf.fileno())[stat.ST_MTIME]
return d
except (IOError, OSError) as e:
if e.errno != errno.ENOENT:
raise cache_errors.CacheCorruption(cpv, e)
raise KeyError(cpv, e)
def _parse_data(self, data, cpv):
try:
return dict( x.split("=", 1) for x in data )
except ValueError as e:
# If a line is missing an "=", the split length is 1 instead of 2.
raise cache_errors.CacheCorruption(cpv, e)
def _setitem(self, cpv, values):
try:
fd, fp = tempfile.mkstemp(dir=self.location)
except EnvironmentError as e:
raise cache_errors.CacheCorruption(cpv, e)
with io.open(fd, mode='w',
encoding=_encodings['repo.content'],
errors='backslashreplace') as myf:
for k in self._write_keys:
v = values.get(k)
if not v:
continue
# NOTE: This format string requires unicode_literals, so that
# k and v are coerced to unicode, in order to prevent TypeError
# when writing raw bytes to TextIOWrapper with Python 2.
myf.write("%s=%s\n" % (k, v))
self._ensure_access(fp)
#update written. now we move it.
new_fp = os.path.join(self.location,cpv)
try:
os.rename(fp, new_fp)
except EnvironmentError as e:
success = False
try:
if errno.ENOENT == e.errno:
try:
self._ensure_dirs(cpv)
os.rename(fp, new_fp)
success = True
except EnvironmentError as e:
raise cache_errors.CacheCorruption(cpv, e)
else:
raise cache_errors.CacheCorruption(cpv, e)
finally:
if not success:
os.remove(fp)
def _delitem(self, cpv):
# import pdb;pdb.set_trace()
try:
os.remove(os.path.join(self.location,cpv))
except OSError as e:
if errno.ENOENT == e.errno:
raise KeyError(cpv)
else:
raise cache_errors.CacheCorruption(cpv, e)
def __contains__(self, cpv):
return os.path.exists(os.path.join(self.location, cpv))
def __iter__(self):
"""generator for walking the dir struct"""
dirs = [(0, self.location)]
len_base = len(self.location)
while dirs:
depth, dir_path = dirs.pop()
try:
dir_list = os.listdir(dir_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
del e
continue
for l in dir_list:
p = os.path.join(dir_path, l)
try:
st = os.lstat(p)
except OSError:
# Cache entry disappeared.
continue
if stat.S_ISDIR(st.st_mode):
# Only recurse 1 deep, in order to avoid iteration over
# entries from another nested cache instance. This can
# happen if the user nests an overlay inside
# /usr/portage/local as in bug #302764.
if depth < 1:
dirs.append((depth+1, p))
continue
try:
yield _pkg_str(p[len_base+1:])
except InvalidData:
continue
class md5_database(database):
validation_chf = 'md5'
store_eclass_paths = False
class mtime_md5_database(database):
validation_chf = 'md5'
chf_types = ('md5', 'mtime')
|
kejbaly2/invoke | refs/heads/master | invoke/platform.py | 5 | """
Platform-specific code lives here.
This is its own module to abstract away what would otherwise be distracting
logic-flow interruptions.
"""
import sys
WINDOWS = (sys.platform == 'win32')
"""
Whether or not the current platform appears to be Windows in nature.
Note that Cygwin's Python is actually close enough to "real" UNIXes that it
doesn't need (or want!) to use PyWin32 -- so we only test for literal Win32
setups (vanilla Python, ActiveState etc) here.
"""
def pty_size():
"""
Determine current local pseudoterminal dimensions.
:returns:
A ``(num_cols, num_rows)`` two-tuple describing PTY size. Defaults to
``(80, 24)`` if unable to get a sensible result dynamically.
"""
cols, rows = _pty_size() if not WINDOWS else _win_pty_size()
# TODO: make defaults configurable?
return ((cols or 80), (rows or 24))
def _pty_size():
"""
Suitable for most POSIX platforms.
"""
import fcntl
import struct
import termios
# Sentinel values to be replaced w/ defaults by caller
size = (None, None)
# Can only get useful values from real TTYs
if sys.stdout.isatty():
# We want two short unsigned integers (rows, cols)
fmt = 'HH'
# Create an empty (zeroed) buffer for ioctl to map onto. Yay for C!
buf = struct.pack(fmt, 0, 0)
# Call TIOCGWINSZ to get window size of stdout, returns our filled
# buffer
try:
result = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, buf)
# Unpack buffer back into Python data types
# NOTE: this unpack gives us rows x cols, but we return the
# inverse.
rows, cols = struct.unpack(fmt, result)
return (cols, rows)
# Deal with e.g. sys.stdout being monkeypatched, such as in testing.
# Or termios not having a TIOCGWINSZ.
except AttributeError:
pass
return size
def _win_pty_size():
from ctypes import Structure, c_ushort, windll, POINTER, byref
from ctypes.wintypes import HANDLE, _COORD, _SMALL_RECT
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
_fields_ = [
('dwSize', _COORD),
('dwCursorPosition', _COORD),
('wAttributes', c_ushort),
('srWindow', _SMALL_RECT),
('dwMaximumWindowSize', _COORD)
]
GetStdHandle = windll.kernel32.GetStdHandle
GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
GetStdHandle.restype = HANDLE
GetConsoleScreenBufferInfo.argtypes = [
HANDLE, POINTER(CONSOLE_SCREEN_BUFFER_INFO)
]
hstd = GetStdHandle(-11) # STD_OUTPUT_HANDLE = -11
csbi = CONSOLE_SCREEN_BUFFER_INFO()
ret = GetConsoleScreenBufferInfo(hstd, byref(csbi))
if ret:
sizex = csbi.srWindow.Right - csbi.srWindow.Left + 1
sizey = csbi.srWindow.Bottom - csbi.srWindow.Top + 1
return sizex, sizey
else:
return (None, None)
|
Gadal/sympy | refs/heads/master | sympy/matrices/expressions/tests/test_trace.py | 83 | from sympy.core import Lambda, S, symbols
from sympy.concrete import Sum
from sympy.functions import adjoint, conjugate, transpose
from sympy.matrices import eye, Matrix, ShapeError, ImmutableMatrix
from sympy.matrices.expressions import (
Adjoint, Identity, FunctionMatrix, MatrixExpr, MatrixSymbol, Trace,
ZeroMatrix, trace, MatPow, MatAdd, MatMul
)
from sympy.utilities.pytest import raises, XFAIL
n = symbols('n', integer=True)
A = MatrixSymbol('A', n, n)
B = MatrixSymbol('B', n, n)
C = MatrixSymbol('C', 3, 4)
def test_Trace():
assert isinstance(Trace(A), Trace)
assert not isinstance(Trace(A), MatrixExpr)
raises(ShapeError, lambda: Trace(C))
assert trace(eye(3)) == 3
assert trace(Matrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9])) == 15
assert adjoint(Trace(A)) == trace(Adjoint(A))
assert conjugate(Trace(A)) == trace(Adjoint(A))
assert transpose(Trace(A)) == Trace(A)
A / Trace(A) # Make sure this is possible
# Some easy simplifications
assert trace(Identity(5)) == 5
assert trace(ZeroMatrix(5, 5)) == 0
assert trace(2*A*B) == 2*Trace(A*B)
assert trace(A.T) == trace(A)
i, j = symbols('i j')
F = FunctionMatrix(3, 3, Lambda((i, j), i + j))
assert trace(F) == (0 + 0) + (1 + 1) + (2 + 2)
raises(TypeError, lambda: Trace(S.One))
assert Trace(A).arg is A
assert str(trace(A)) == str(Trace(A).doit())
def test_Trace_A_plus_B():
assert trace(A + B) == Trace(A) + Trace(B)
assert Trace(A + B).arg == MatAdd(A, B)
assert Trace(A + B).doit() == Trace(A) + Trace(B)
def test_Trace_MatAdd_doit():
# See issue #9028
X = ImmutableMatrix([[1, 2, 3]]*3)
Y = MatrixSymbol('Y', 3, 3)
q = MatAdd(X, 2*X, Y, -3*Y)
assert Trace(q).arg == q
assert Trace(q).doit() == 18 - 2*Trace(Y)
def test_Trace_MatPow_doit():
X = Matrix([[1, 2], [3, 4]])
assert Trace(X).doit() == 5
q = MatPow(X, 2)
assert Trace(q).arg == q
assert Trace(q).doit() == 29
def test_Trace_MutableMatrix_plus():
# See issue #9043
X = Matrix([[1, 2], [3, 4]])
assert Trace(X) + Trace(X) == 2*Trace(X)
def test_Trace_doit_deep_False():
X = Matrix([[1, 2], [3, 4]])
q = MatPow(X, 2)
assert Trace(q).doit(deep=False).arg == q
q = MatAdd(X, 2*X)
assert Trace(q).doit(deep=False).arg == q
q = MatMul(X, 2*X)
assert Trace(q).doit(deep=False).arg == q
def test_trace_constant_factor():
# Issue 9052: gave 2*Trace(MatMul(A)) instead of 2*Trace(A)
assert trace(2*A) == 2*Trace(A)
X = ImmutableMatrix([[1, 2], [3, 4]])
assert trace(MatMul(2, X)) == 10
@XFAIL
def test_rewrite():
assert isinstance(trace(A).rewrite(Sum), Sum)
|
davidl1/hortonworks-extension | refs/heads/master | src/contrib/hod/hodlib/HodRing/hodRing.py | 118 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#!/usr/bin/env python
"""hodring launches hadoop commands on work node and
cleans up all the work dirs afterward
"""
# -*- python -*-
import os, sys, time, shutil, getpass, xml.dom.minidom, xml.dom.pulldom
import socket, sets, urllib, csv, signal, pprint, random, re, httplib
from xml.dom import getDOMImplementation
from pprint import pformat
from optparse import OptionParser
from urlparse import urlparse
from hodlib.Common.util import local_fqdn, parseEquals, getMapredSystemDirectory, isProcessRunning
from hodlib.Common.tcp import tcpSocket, tcpError
binfile = sys.path[0]
libdir = os.path.dirname(binfile)
sys.path.append(libdir)
import hodlib.Common.logger
from hodlib.GridServices.service import *
from hodlib.Common.util import *
from hodlib.Common.socketServers import threadedHTTPServer
from hodlib.Common.hodsvc import hodBaseService
from hodlib.Common.threads import simpleCommand
from hodlib.Common.xmlrpc import hodXRClient
mswindows = (sys.platform == "win32")
originalcwd = os.getcwd()
reHdfsURI = re.compile("hdfs://(.*?:\d+)(.*)")
class CommandDesc:
"""A class that represents the commands that
are run by hodring"""
def __init__(self, dict, log):
self.log = log
self.log.debug("In command desc")
self.log.debug("Done in command desc")
dict.setdefault('argv', [])
dict.setdefault('version', None)
dict.setdefault('envs', {})
dict.setdefault('workdirs', [])
dict.setdefault('attrs', {})
dict.setdefault('final-attrs', {})
dict.setdefault('fg', False)
dict.setdefault('ignorefailures', False)
dict.setdefault('stdin', None)
self.log.debug("Printing dict")
self._checkRequired(dict)
self.dict = dict
def _checkRequired(self, dict):
if 'name' not in dict:
raise ValueError, "Command description lacks 'name'"
if 'program' not in dict:
raise ValueError, "Command description lacks 'program'"
if 'pkgdirs' not in dict:
raise ValueError, "Command description lacks 'pkgdirs'"
def getName(self):
return self.dict['name']
def getProgram(self):
return self.dict['program']
def getArgv(self):
return self.dict['argv']
def getVersion(self):
return self.dict['version']
def getEnvs(self):
return self.dict['envs']
def getPkgDirs(self):
return self.dict['pkgdirs']
def getWorkDirs(self):
return self.dict['workdirs']
def getAttrs(self):
return self.dict['attrs']
def getfinalAttrs(self):
return self.dict['final-attrs']
def isForeground(self):
return self.dict['fg']
def isIgnoreFailures(self):
return self.dict['ignorefailures']
def getStdin(self):
return self.dict['stdin']
def parseDesc(str):
dict = CommandDesc._parseMap(str)
dict['argv'] = CommandDesc._parseList(dict['argv'])
dict['envs'] = CommandDesc._parseMap(dict['envs'])
dict['pkgdirs'] = CommandDesc._parseList(dict['pkgdirs'], ':')
dict['workdirs'] = CommandDesc._parseList(dict['workdirs'], ':')
dict['attrs'] = CommandDesc._parseMap(dict['attrs'])
dict['final-attrs'] = CommandDesc._parseMap(dict['final-attrs'])
return CommandDesc(dict)
parseDesc = staticmethod(parseDesc)
def _parseList(str, delim = ','):
list = []
for row in csv.reader([str], delimiter=delim, escapechar='\\',
quoting=csv.QUOTE_NONE, doublequote=False):
list.extend(row)
return list
_parseList = staticmethod(_parseList)
def _parseMap(str):
"""Parses key value pairs"""
dict = {}
for row in csv.reader([str], escapechar='\\', quoting=csv.QUOTE_NONE, doublequote=False):
for f in row:
[k, v] = f.split('=', 1)
dict[k] = v
return dict
_parseMap = staticmethod(_parseMap)
class MRSystemDirectoryManager:
"""Class that is responsible for managing the MapReduce system directory"""
def __init__(self, jtPid, mrSysDir, fsName, hadoopPath, log, retries=120):
self.__jtPid = jtPid
self.__mrSysDir = mrSysDir
self.__fsName = fsName
self.__hadoopPath = hadoopPath
self.__log = log
self.__retries = retries
def toCleanupArgs(self):
return " --jt-pid %s --mr-sys-dir %s --fs-name %s --hadoop-path %s " \
% (self.__jtPid, self.__mrSysDir, self.__fsName, self.__hadoopPath)
def removeMRSystemDirectory(self):
jtActive = isProcessRunning(self.__jtPid)
count = 0 # try for a max of a minute for the process to end
while jtActive and (count<self.__retries):
time.sleep(0.5)
jtActive = isProcessRunning(self.__jtPid)
count += 1
if count == self.__retries:
self.__log.warn('Job Tracker did not exit even after a minute. Not going to try and cleanup the system directory')
return
self.__log.debug('jt is now inactive')
cmd = "%s dfs -fs hdfs://%s -rmr %s" % (self.__hadoopPath, self.__fsName, \
self.__mrSysDir)
self.__log.debug('Command to run to remove system directory: %s' % (cmd))
try:
hadoopCommand = simpleCommand('mr-sys-dir-cleaner', cmd)
hadoopCommand.start()
hadoopCommand.wait()
hadoopCommand.join()
ret = hadoopCommand.exit_code()
if ret != 0:
self.__log.warn("Error in removing MapReduce system directory '%s' from '%s' using path '%s'" \
% (self.__mrSysDir, self.__fsName, self.__hadoopPath))
self.__log.warn(pprint.pformat(hadoopCommand.output()))
else:
self.__log.info("Removed MapReduce system directory successfully.")
except:
self.__log.error('Exception while cleaning up MapReduce system directory. May not be cleaned up. %s', \
get_exception_error_string())
self.__log.debug(get_exception_string())
def createMRSystemDirectoryManager(dict, log):
keys = [ 'jt-pid', 'mr-sys-dir', 'fs-name', 'hadoop-path' ]
for key in keys:
if (not dict.has_key(key)) or (dict[key] is None):
return None
mrSysDirManager = MRSystemDirectoryManager(int(dict['jt-pid']), dict['mr-sys-dir'], \
dict['fs-name'], dict['hadoop-path'], log)
return mrSysDirManager
class HadoopCommand:
"""Runs a single hadoop command"""
def __init__(self, id, desc, tempdir, tardir, hadoopportrange, log, javahome,
mrSysDir, restart=False):
self.desc = desc
self.log = log
self.javahome = javahome
self.__mrSysDir = mrSysDir
self.program = desc.getProgram()
self.name = desc.getName()
self.workdirs = desc.getWorkDirs()
self.hadoopdir = tempdir
self.confdir = os.path.join(self.hadoopdir, '%d-%s' % (id, self.name),
"confdir")
self.logdir = os.path.join(self.hadoopdir, '%d-%s' % (id, self.name),
"logdir")
self.out = os.path.join(self.logdir, '%s.out' % self.name)
self.err = os.path.join(self.logdir, '%s.err' % self.name)
self.child = None
self.restart = restart
self.filledInKeyVals = []
self.__hadoopPortRange = hadoopportrange
self._createWorkDirs()
self._createHadoopSiteXml()
self._createHadoopLogDir()
self.__hadoopThread = None
self.stdErrContents = "" # store list of contents for returning to user
def _createWorkDirs(self):
for dir in self.workdirs:
if os.path.exists(dir):
if not os.access(dir, os.F_OK | os.R_OK | os.W_OK | os.X_OK):
raise ValueError, "Workdir %s does not allow rwx permission." % (dir)
continue
try:
os.makedirs(dir)
except:
pass
def getFilledInKeyValues(self):
return self.filledInKeyVals
def createXML(self, doc, attr, topElement, final):
for k,v in attr.iteritems():
self.log.debug('_createHadoopSiteXml: ' + str(k) + " " + str(v))
lowport, highport = self.__hadoopPortRange
if ( v == "fillinport" ):
v = "%d" % (ServiceUtil.getUniqRandomPort(low=lowport, high=highport, log=self.log))
keyvalpair = ''
if isinstance(v, (tuple, list)):
for item in v:
keyvalpair = "%s%s=%s," % (keyvalpair, k, item)
keyvalpair = keyvalpair[:-1]
else:
keyvalpair = k + '=' + v
self.filledInKeyVals.append(keyvalpair)
if(k == "mapred.job.tracker"): # total hack for time's sake
keyvalpair = k + "=" + v
self.filledInKeyVals.append(keyvalpair)
if ( v == "fillinhostport"):
port = "%d" % (ServiceUtil.getUniqRandomPort(low=lowport, high=highport, log=self.log))
self.log.debug('Setting hostname to: %s' % local_fqdn())
v = local_fqdn() + ':' + port
keyvalpair = ''
if isinstance(v, (tuple, list)):
for item in v:
keyvalpair = "%s%s=%s," % (keyvalpair, k, item)
keyvalpair = keyvalpair[:-1]
else:
keyvalpair = k + '=' + v
self.filledInKeyVals.append(keyvalpair)
if ( v == "fillindir"):
v = self.__mrSysDir
pass
prop = None
if isinstance(v, (tuple, list)):
for item in v:
prop = self._createXmlElement(doc, k, item, "No description", final)
topElement.appendChild(prop)
else:
if k == 'fs.default.name':
prop = self._createXmlElement(doc, k, "hdfs://" + v, "No description", final)
else:
prop = self._createXmlElement(doc, k, v, "No description", final)
topElement.appendChild(prop)
def _createHadoopSiteXml(self):
if self.restart:
if not os.path.exists(self.confdir):
os.makedirs(self.confdir)
else:
assert os.path.exists(self.confdir) == False
os.makedirs(self.confdir)
implementation = getDOMImplementation()
doc = implementation.createDocument('', 'configuration', None)
comment = doc.createComment("This is an auto generated hadoop-site.xml, do not modify")
topElement = doc.documentElement
topElement.appendChild(comment)
finalAttr = self.desc.getfinalAttrs()
self.createXML(doc, finalAttr, topElement, True)
attr = {}
attr1 = self.desc.getAttrs()
for k,v in attr1.iteritems():
if not finalAttr.has_key(k):
attr[k] = v
self.createXML(doc, attr, topElement, False)
siteName = os.path.join(self.confdir, "hadoop-site.xml")
sitefile = file(siteName, 'w')
print >> sitefile, topElement.toxml()
sitefile.close()
self.log.debug('created %s' % (siteName))
def _createHadoopLogDir(self):
if self.restart:
if not os.path.exists(self.logdir):
os.makedirs(self.logdir)
else:
assert os.path.exists(self.logdir) == False
os.makedirs(self.logdir)
def _createXmlElement(self, doc, name, value, description, final):
prop = doc.createElement("property")
nameP = doc.createElement("name")
string = doc.createTextNode(name)
nameP.appendChild(string)
valueP = doc.createElement("value")
string = doc.createTextNode(value)
valueP.appendChild(string)
desc = doc.createElement("description")
string = doc.createTextNode(description)
desc.appendChild(string)
prop.appendChild(nameP)
prop.appendChild(valueP)
prop.appendChild(desc)
if (final):
felement = doc.createElement("final")
string = doc.createTextNode("true")
felement.appendChild(string)
prop.appendChild(felement)
pass
return prop
def getMRSystemDirectoryManager(self):
return MRSystemDirectoryManager(self.__hadoopThread.getPid(), self.__mrSysDir, \
self.desc.getfinalAttrs()['fs.default.name'], \
self.path, self.log)
def run(self, dir):
status = True
args = []
desc = self.desc
self.log.debug(pprint.pformat(desc.dict))
self.log.debug("Got package dir of %s" % dir)
self.path = os.path.join(dir, self.program)
self.log.debug("path: %s" % self.path)
args.append(self.path)
args.extend(desc.getArgv())
envs = desc.getEnvs()
fenvs = os.environ
for k, v in envs.iteritems():
fenvs[k] = v
if envs.has_key('HADOOP_OPTS'):
fenvs['HADOOP_OPTS'] = envs['HADOOP_OPTS']
self.log.debug("HADOOP_OPTS : %s" % fenvs['HADOOP_OPTS'])
fenvs['JAVA_HOME'] = self.javahome
fenvs['HADOOP_CONF_DIR'] = self.confdir
fenvs['HADOOP_LOG_DIR'] = self.logdir
self.log.info(pprint.pformat(fenvs))
hadoopCommand = ''
for item in args:
hadoopCommand = "%s%s " % (hadoopCommand, item)
# Redirecting output and error to self.out and self.err
hadoopCommand = hadoopCommand + ' 1>%s 2>%s ' % (self.out, self.err)
self.log.debug('running command: %s' % (hadoopCommand))
self.log.debug('hadoop env: %s' % fenvs)
self.log.debug('Command stdout will be redirected to %s ' % self.out + \
'and command stderr to %s' % self.err)
self.__hadoopThread = simpleCommand('hadoop', hadoopCommand, env=fenvs)
self.__hadoopThread.start()
while self.__hadoopThread.stdin == None:
time.sleep(.2)
self.log.debug("hadoopThread still == None ...")
input = desc.getStdin()
self.log.debug("hadoop input: %s" % input)
if input:
if self.__hadoopThread.is_running():
print >>self.__hadoopThread.stdin, input
else:
self.log.error("hadoop command failed to start")
self.__hadoopThread.stdin.close()
self.log.debug("isForground: %s" % desc.isForeground())
if desc.isForeground():
self.log.debug("Waiting on hadoop to finish...")
self.__hadoopThread.wait()
self.log.debug("Joining hadoop thread...")
self.__hadoopThread.join()
if self.__hadoopThread.exit_code() != 0:
status = False
else:
status = self.getCommandStatus()
self.log.debug("hadoop run status: %s" % status)
if status == False:
self.handleFailedCommand()
if (status == True) or (not desc.isIgnoreFailures()):
return status
else:
self.log.error("Ignoring Failure")
return True
def kill(self):
self.__hadoopThread.kill()
if self.__hadoopThread:
self.__hadoopThread.join()
def addCleanup(self, list):
list.extend(self.workdirs)
list.append(self.confdir)
def getCommandStatus(self):
status = True
ec = self.__hadoopThread.exit_code()
if (ec != 0) and (ec != None):
status = False
return status
def handleFailedCommand(self):
self.log.error('hadoop error: %s' % (
self.__hadoopThread.exit_status_string()))
# read the contents of redirected stderr to print information back to user
if os.path.exists(self.err):
f = None
try:
f = open(self.err)
lines = f.readlines()
# format
for line in lines:
self.stdErrContents = "%s%s" % (self.stdErrContents, line)
finally:
if f is not None:
f.close()
self.log.error('See %s.out and/or %s.err for details. They are ' % \
(self.name, self.name) + \
'located at subdirectories under either ' + \
'hodring.work-dirs or hodring.log-destination-uri.')
class HodRing(hodBaseService):
"""The main class for hodring that
polls the commands it runs"""
def __init__(self, config):
hodBaseService.__init__(self, 'hodring', config['hodring'])
self.log = self.logs['main']
self._http = None
self.__pkg = None
self.__pkgDir = None
self.__tempDir = None
self.__running = {}
self.__hadoopLogDirs = []
self.__init_temp_dir()
def __init_temp_dir(self):
self.__tempDir = os.path.join(self._cfg['temp-dir'],
"%s.%s.hodring" % (self._cfg['userid'],
self._cfg['service-id']))
if not os.path.exists(self.__tempDir):
os.makedirs(self.__tempDir)
os.chdir(self.__tempDir)
def __fetch(self, url, spath):
retry = 3
success = False
while (retry != 0 and success != True):
try:
input = urllib.urlopen(url)
bufsz = 81920
buf = input.read(bufsz)
out = open(spath, 'w')
while len(buf) > 0:
out.write(buf)
buf = input.read(bufsz)
input.close()
out.close()
success = True
except:
self.log.debug("Failed to copy file")
retry = retry - 1
if (retry == 0 and success != True):
raise IOError, "Failed to copy the files"
def __get_name(self, addr):
parsedUrl = urlparse(addr)
path = parsedUrl[2]
split = path.split('/', 1)
return split[1]
def __get_dir(self, name):
"""Return the root directory inside the tarball
specified by name. Assumes that the tarball begins
with a root directory."""
import tarfile
myTarFile = tarfile.open(name)
hadoopPackage = myTarFile.getnames()[0]
self.log.debug("tarball name : %s hadoop package name : %s" %(name,hadoopPackage))
return hadoopPackage
def getRunningValues(self):
return self.__running.values()
def getTempDir(self):
return self.__tempDir
def getHadoopLogDirs(self):
return self.__hadoopLogDirs
def __download_package(self, ringClient):
self.log.debug("Found download address: %s" %
self._cfg['download-addr'])
try:
addr = 'none'
downloadTime = self._cfg['tarball-retry-initial-time'] # download time depends on tarball size and network bandwidth
increment = 0
addr = ringClient.getTarList(self.hostname)
while(addr == 'none'):
rand = self._cfg['tarball-retry-initial-time'] + increment + \
random.uniform(0,self._cfg['tarball-retry-interval'])
increment = increment + 1
self.log.debug("got no tarball. Retrying again in %s seconds." % rand)
time.sleep(rand)
addr = ringClient.getTarList(self.hostname)
self.log.debug("got this address %s" % addr)
tarName = self.__get_name(addr)
self.log.debug("tar package name: %s" % tarName)
fetchPath = os.path.join(os.getcwd(), tarName)
self.log.debug("fetch path: %s" % fetchPath)
self.__fetch(addr, fetchPath)
self.log.debug("done fetching")
tarUrl = "http://%s:%d/%s" % (self._http.server_address[0],
self._http.server_address[1],
tarName)
try:
ringClient.registerTarSource(self.hostname, tarUrl,addr)
#ringClient.tarDone(addr)
except KeyError, e:
self.log.error("registerTarSource and tarDone failed: ", e)
raise KeyError(e)
check = untar(fetchPath, os.getcwd())
if (check == False):
raise IOError, "Untarring failed."
self.__pkg = self.__get_dir(tarName)
self.__pkgDir = os.path.join(os.getcwd(), self.__pkg)
except Exception, e:
self.log.error("Failed download tar package: %s" %
get_exception_error_string())
raise Exception(e)
def __run_hadoop_commands(self, restart=True):
id = 0
for desc in self._cfg['commanddesc']:
self.log.debug(pprint.pformat(desc.dict))
mrSysDir = getMapredSystemDirectory(self._cfg['mapred-system-dir-root'],
self._cfg['userid'], self._cfg['service-id'])
self.log.debug('mrsysdir is %s' % mrSysDir)
cmd = HadoopCommand(id, desc, self.__tempDir, self.__pkgDir, self._cfg['hadoop-port-range'], self.log,
self._cfg['java-home'], mrSysDir, restart)
self.__hadoopLogDirs.append(cmd.logdir)
self.log.debug("hadoop log directory: %s" % self.__hadoopLogDirs)
try:
# if the tarball isn't there, we use the pkgs dir given.
if self.__pkgDir == None:
pkgdir = desc.getPkgDirs()
else:
pkgdir = self.__pkgDir
self.log.debug('This is the packcage dir %s ' % (pkgdir))
if not cmd.run(pkgdir):
addnInfo = ""
if cmd.stdErrContents is not "":
addnInfo = " Information from stderr of the command:\n%s" % (cmd.stdErrContents)
raise Exception("Could not launch the %s using %s/bin/hadoop.%s" % (desc.getName(), pkgdir, addnInfo))
except Exception, e:
self.log.debug("Exception running hadoop command: %s\n%s" % (get_exception_error_string(), get_exception_string()))
self.__running[id] = cmd
raise Exception(e)
id += 1
if desc.isForeground():
continue
self.__running[id-1] = cmd
# ok.. now command is running. If this HodRing got jobtracker,
# Check if it is ready for accepting jobs, and then only return
self.__check_jobtracker(desc, id-1, pkgdir)
def __check_jobtracker(self, desc, id, pkgdir):
# Check jobtracker status. Return properly if it is ready to accept jobs.
# Currently Checks for Jetty to come up, the last thing that can be checked
# before JT completes initialisation. To be perfectly reliable, we need
# hadoop support
name = desc.getName()
if name == 'jobtracker':
# Yes I am the Jobtracker
self.log.debug("Waiting for jobtracker to initialise")
version = desc.getVersion()
self.log.debug("jobtracker version : %s" % version)
hadoopCmd = self.getRunningValues()[id]
attrs = hadoopCmd.getFilledInKeyValues()
attrs = parseEquals(attrs)
jobTrackerAddr = attrs['mapred.job.tracker']
self.log.debug("jobtracker rpc server : %s" % jobTrackerAddr)
if version < 16:
jettyAddr = jobTrackerAddr.split(':')[0] + ':' + \
attrs['mapred.job.tracker.info.port']
else:
jettyAddr = attrs['mapred.job.tracker.http.address']
self.log.debug("Jobtracker jetty : %s" % jettyAddr)
# Check for Jetty to come up
# For this do a http head, and then look at the status
defaultTimeout = socket.getdefaulttimeout()
# socket timeout isn`t exposed at httplib level. Setting explicitly.
socket.setdefaulttimeout(1)
sleepTime = 0.5
jettyStatus = False
jettyStatusmsg = ""
while sleepTime <= 32:
# There is a possibility that the command might fail after a while.
# This code will check if the command failed so that a better
# error message can be returned to the user.
if not hadoopCmd.getCommandStatus():
self.log.critical('Hadoop command found to have failed when ' \
'checking for jobtracker status')
hadoopCmd.handleFailedCommand()
addnInfo = ""
if hadoopCmd.stdErrContents is not "":
addnInfo = " Information from stderr of the command:\n%s" \
% (hadoopCmd.stdErrContents)
raise Exception("Could not launch the %s using %s/bin/hadoop.%s" \
% (desc.getName(), pkgdir, addnInfo))
try:
jettyConn = httplib.HTTPConnection(jettyAddr)
jettyConn.request("HEAD", "/jobtracker.jsp")
# httplib inherently retries the following till socket timeout
resp = jettyConn.getresponse()
if resp.status != 200:
# Some problem?
jettyStatus = False
jettyStatusmsg = "Jetty gave a non-200 response to a HTTP-HEAD" +\
" request. HTTP Status (Code, Msg): (%s, %s)" % \
( resp.status, resp.reason )
break
else:
self.log.info("Jetty returned a 200 status (%s)" % resp.reason)
self.log.info("JobTracker successfully initialised")
return
except socket.error:
self.log.debug("Jetty gave a socket error. Sleeping for %s" \
% sleepTime)
time.sleep(sleepTime)
sleepTime = sleepTime * 2
except Exception, e:
jettyStatus = False
jettyStatusmsg = ("Process(possibly other than jetty) running on" + \
" port assigned to jetty is returning invalid http response")
break
socket.setdefaulttimeout(defaultTimeout)
if not jettyStatus:
self.log.critical("Jobtracker failed to initialise.")
if jettyStatusmsg:
self.log.critical( "Reason: %s" % jettyStatusmsg )
else: self.log.critical( "Reason: Jetty failed to give response")
raise Exception("JobTracker failed to initialise")
def stop(self):
self.log.debug("Entered hodring stop.")
if self._http:
self.log.debug("stopping http server...")
self._http.stop()
self.log.debug("call hodsvcrgy stop...")
hodBaseService.stop(self)
def _xr_method_clusterStart(self, initialize=True):
return self.clusterStart(initialize)
def _xr_method_clusterStop(self):
return self.clusterStop()
def start(self):
"""Run and maintain hodring commands"""
try:
if self._cfg.has_key('download-addr'):
self._http = threadedHTTPServer('', self._cfg['http-port-range'])
self.log.info("Starting http server...")
self._http.serve_forever()
self.log.debug("http://%s:%d" % (self._http.server_address[0],
self._http.server_address[1]))
hodBaseService.start(self)
ringXRAddress = None
if self._cfg.has_key('ringmaster-xrs-addr'):
ringXRAddress = "http://%s:%s/" % (self._cfg['ringmaster-xrs-addr'][0],
self._cfg['ringmaster-xrs-addr'][1])
self.log.debug("Ringmaster at %s" % ringXRAddress)
self.log.debug("Creating service registry XML-RPC client.")
serviceClient = hodXRClient(to_http_url(
self._cfg['svcrgy-addr']))
if ringXRAddress == None:
self.log.info("Did not get ringmaster XML-RPC address. Fetching information from service registry.")
ringList = serviceClient.getServiceInfo(self._cfg['userid'],
self._cfg['service-id'], 'ringmaster', 'hod')
self.log.debug(pprint.pformat(ringList))
if len(ringList):
if isinstance(ringList, list):
ringXRAddress = ringList[0]['xrs']
count = 0
while (ringXRAddress == None and count < 3000):
ringList = serviceClient.getServiceInfo(self._cfg['userid'],
self._cfg['service-id'], 'ringmaster', 'hod')
if len(ringList):
if isinstance(ringList, list):
ringXRAddress = ringList[0]['xrs']
count = count + 1
time.sleep(.2)
if ringXRAddress == None:
raise Exception("Could not get ringmaster XML-RPC server address.")
self.log.debug("Creating ringmaster XML-RPC client.")
ringClient = hodXRClient(ringXRAddress)
id = self.hostname + "_" + str(os.getpid())
if 'download-addr' in self._cfg:
self.__download_package(ringClient)
else:
self.log.debug("Did not find a download address.")
cmdlist = []
firstTime = True
increment = 0
hadoopStartupTime = 2
cmdlist = ringClient.getCommand(id)
while (cmdlist == []):
if firstTime:
sleepTime = increment + self._cfg['cmd-retry-initial-time'] + hadoopStartupTime\
+ random.uniform(0,self._cfg['cmd-retry-interval'])
firstTime = False
else:
sleepTime = increment + self._cfg['cmd-retry-initial-time'] + \
+ random.uniform(0,self._cfg['cmd-retry-interval'])
self.log.debug("Did not get command list. Waiting for %s seconds." % (sleepTime))
time.sleep(sleepTime)
increment = increment + 1
cmdlist = ringClient.getCommand(id)
self.log.debug(pformat(cmdlist))
cmdDescs = []
for cmds in cmdlist:
cmdDescs.append(CommandDesc(cmds['dict'], self.log))
self._cfg['commanddesc'] = cmdDescs
self.log.info("Running hadoop commands...")
self.__run_hadoop_commands(False)
masterParams = []
for k, cmd in self.__running.iteritems():
masterParams.extend(cmd.filledInKeyVals)
self.log.debug("printing getparams")
self.log.debug(pformat(id))
self.log.debug(pformat(masterParams))
# when this is on a required host, the ringMaster already has our masterParams
if(len(masterParams) > 0):
ringClient.addMasterParams(id, masterParams)
except Exception, e:
raise Exception(e)
def clusterStart(self, initialize=True):
"""Start a stopped mapreduce/dfs cluster"""
if initialize:
self.log.debug('clusterStart Method Invoked - Initialize')
else:
self.log.debug('clusterStart Method Invoked - No Initialize')
try:
self.log.debug("Creating service registry XML-RPC client.")
serviceClient = hodXRClient(to_http_url(self._cfg['svcrgy-addr']),
None, None, 0, 0, 0)
self.log.info("Fetching ringmaster information from service registry.")
count = 0
ringXRAddress = None
while (ringXRAddress == None and count < 3000):
ringList = serviceClient.getServiceInfo(self._cfg['userid'],
self._cfg['service-id'], 'ringmaster', 'hod')
if len(ringList):
if isinstance(ringList, list):
ringXRAddress = ringList[0]['xrs']
count = count + 1
if ringXRAddress == None:
raise Exception("Could not get ringmaster XML-RPC server address.")
self.log.debug("Creating ringmaster XML-RPC client.")
ringClient = hodXRClient(ringXRAddress, None, None, 0, 0, 0)
id = self.hostname + "_" + str(os.getpid())
cmdlist = []
if initialize:
if 'download-addr' in self._cfg:
self.__download_package(ringClient)
else:
self.log.debug("Did not find a download address.")
while (cmdlist == []):
cmdlist = ringClient.getCommand(id)
else:
while (cmdlist == []):
cmdlist = ringClient.getAdminCommand(id)
self.log.debug(pformat(cmdlist))
cmdDescs = []
for cmds in cmdlist:
cmdDescs.append(CommandDesc(cmds['dict'], self.log))
self._cfg['commanddesc'] = cmdDescs
if initialize:
self.log.info("Running hadoop commands again... - Initialize")
self.__run_hadoop_commands()
masterParams = []
for k, cmd in self.__running.iteritems():
self.log.debug(cmd)
masterParams.extend(cmd.filledInKeyVals)
self.log.debug("printing getparams")
self.log.debug(pformat(id))
self.log.debug(pformat(masterParams))
# when this is on a required host, the ringMaster already has our masterParams
if(len(masterParams) > 0):
ringClient.addMasterParams(id, masterParams)
else:
self.log.info("Running hadoop commands again... - No Initialize")
self.__run_hadoop_commands()
except:
self.log.error(get_exception_string())
return True
def clusterStop(self):
"""Stop a running mapreduce/dfs cluster without stopping the hodring"""
self.log.debug('clusterStop Method Invoked')
try:
for cmd in self.__running.values():
cmd.kill()
self.__running = {}
except:
self.log.error(get_exception_string())
return True
|
IBM/differential-privacy-library | refs/heads/main | tests/mechanisms/test_LaplaceFolded.py | 1 | import numpy as np
from unittest import TestCase
from diffprivlib.mechanisms import LaplaceFolded
from diffprivlib.utils import global_seed
class TestLaplaceFolded(TestCase):
def setup_method(self, method):
if method.__name__ .endswith("prob"):
global_seed(314159)
self.mech = LaplaceFolded
def teardown_method(self, method):
del self.mech
def test_class(self):
from diffprivlib.mechanisms import DPMechanism
self.assertTrue(issubclass(LaplaceFolded, DPMechanism))
def test_zero_sensitivity(self):
mech = self.mech(epsilon=1, delta=0, sensitivity=0, lower=0, upper=2)
for i in range(1000):
self.assertAlmostEqual(mech.randomise(1), 1)
def test_inf_epsilon(self):
mech = self.mech(epsilon=float("inf"), delta=0, sensitivity=1, lower=0, upper=1)
for i in range(1000):
self.assertEqual(mech.randomise(0.5), 0.5)
def test_complex_epsilon(self):
with self.assertRaises(TypeError):
self.mech(epsilon=1 + 2j, delta=0, sensitivity=1, lower=0, upper=1)
def test_string_epsilon(self):
with self.assertRaises(TypeError):
self.mech(epsilon="Two", delta=0, sensitivity=1, lower=0, upper=1)
def test_wrong_bounds(self):
with self.assertRaises(ValueError):
self.mech(epsilon=1, delta=0, sensitivity=1, lower=3, upper=1)
with self.assertRaises(TypeError):
self.mech(epsilon=1, delta=0, sensitivity=1, lower="0", upper="2")
def test_non_numeric(self):
mech = self.mech(epsilon=1, delta=0, sensitivity=1, lower=0, upper=1)
with self.assertRaises(TypeError):
mech.randomise("Hello")
def test_zero_median_prob(self):
mech = self.mech(epsilon=1, delta=0, sensitivity=1, lower=0, upper=1)
vals = []
for i in range(10000):
vals.append(mech.randomise(0.5))
median = float(np.median(vals))
self.assertAlmostEqual(np.abs(median), 0.5, delta=0.1)
def test_neighbors_prob(self):
epsilon = 1
runs = 10000
mech = self.mech(epsilon=1, delta=0, sensitivity=1, lower=0, upper=1)
count = [0, 0]
for i in range(runs):
val0 = mech.randomise(0)
if val0 <= 0.5:
count[0] += 1
val1 = mech.randomise(1)
if val1 <= 0.5:
count[1] += 1
self.assertGreater(count[0], count[1])
self.assertLessEqual(count[0] / runs, np.exp(epsilon) * count[1] / runs + 0.1)
def test_within_bounds(self):
mech = self.mech(epsilon=1, delta=0, sensitivity=1, lower=0, upper=1)
vals = []
for i in range(1000):
vals.append(mech.randomise(0.5))
vals = np.array(vals)
self.assertTrue(np.all(vals >= 0))
self.assertTrue(np.all(vals <= 1))
def test_repr(self):
repr_ = repr(self.mech(epsilon=1, delta=0, sensitivity=1, lower=0, upper=1))
self.assertIn(".LaplaceFolded(", repr_)
def test_bias(self):
mech = self.mech(epsilon=1, delta=0, sensitivity=1, lower=0, upper=1)
self.assertGreater(mech.bias(0), 0.0)
self.assertLess(mech.bias(1), 0.0)
def test_variance(self):
mech = self.mech(epsilon=1, delta=0, sensitivity=1, lower=0, upper=1)
self.assertRaises(NotImplementedError, mech.variance, 0)
|
mjwtom/swift | refs/heads/master | swift/common/utils.py | 2 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Miscellaneous utility functions for use with Swift."""
from __future__ import print_function
import errno
import fcntl
import grp
import hmac
import operator
import os
import pwd
import re
import sys
import threading as stdlib_threading
import time
import uuid
import functools
import weakref
import email.parser
from hashlib import md5, sha1
from random import random, shuffle
from contextlib import contextmanager, closing
import ctypes
import ctypes.util
from optparse import OptionParser
from tempfile import mkstemp, NamedTemporaryFile
try:
import simplejson as json
except ImportError:
import json
import glob
import itertools
import stat
import datetime
import eventlet
import eventlet.semaphore
from eventlet import GreenPool, sleep, Timeout, tpool, greenthread, \
greenio, event
from eventlet.green import socket, threading
import eventlet.queue
import netifaces
import codecs
utf8_decoder = codecs.getdecoder('utf-8')
utf8_encoder = codecs.getencoder('utf-8')
import six
from six.moves import cPickle as pickle
from six.moves.configparser import (ConfigParser, NoSectionError,
NoOptionError, RawConfigParser)
from six.moves.queue import Queue, Empty
from six.moves import range
from six.moves.urllib.parse import ParseResult
from six.moves.urllib.parse import quote as _quote
from six.moves.urllib.parse import urlparse as stdlib_urlparse
from swift import gettext_ as _
import swift.common.exceptions
from swift.common.http import is_success, is_redirection, HTTP_NOT_FOUND, \
HTTP_PRECONDITION_FAILED, HTTP_REQUESTED_RANGE_NOT_SATISFIABLE
# logging doesn't import patched as cleanly as one would like
from logging.handlers import SysLogHandler
import logging
logging.thread = eventlet.green.thread
logging.threading = eventlet.green.threading
logging._lock = logging.threading.RLock()
# setup notice level logging
NOTICE = 25
logging.addLevelName(NOTICE, 'NOTICE')
SysLogHandler.priority_map['NOTICE'] = 'notice'
# These are lazily pulled from libc elsewhere
_sys_fallocate = None
_posix_fadvise = None
_libc_socket = None
_libc_bind = None
_libc_accept = None
# If set to non-zero, fallocate routines will fail based on free space
# available being at or below this amount, in bytes.
FALLOCATE_RESERVE = 0
# Used by hash_path to offer a bit more security when generating hashes for
# paths. It simply appends this value to all paths; guessing the hash a path
# will end up with would also require knowing this suffix.
HASH_PATH_SUFFIX = ''
HASH_PATH_PREFIX = ''
SWIFT_CONF_FILE = '/etc/swift/swift.conf'
# These constants are Linux-specific, and Python doesn't seem to know
# about them. We ask anyway just in case that ever gets fixed.
#
# The values were copied from the Linux 3.0 kernel headers.
AF_ALG = getattr(socket, 'AF_ALG', 38)
F_SETPIPE_SZ = getattr(fcntl, 'F_SETPIPE_SZ', 1031)
class InvalidHashPathConfigError(ValueError):
def __str__(self):
return "[swift-hash]: both swift_hash_path_suffix and " \
"swift_hash_path_prefix are missing from %s" % SWIFT_CONF_FILE
def validate_hash_conf():
global HASH_PATH_SUFFIX
global HASH_PATH_PREFIX
if not HASH_PATH_SUFFIX and not HASH_PATH_PREFIX:
hash_conf = ConfigParser()
if hash_conf.read(SWIFT_CONF_FILE):
try:
HASH_PATH_SUFFIX = hash_conf.get('swift-hash',
'swift_hash_path_suffix')
except (NoSectionError, NoOptionError):
pass
try:
HASH_PATH_PREFIX = hash_conf.get('swift-hash',
'swift_hash_path_prefix')
except (NoSectionError, NoOptionError):
pass
if not HASH_PATH_SUFFIX and not HASH_PATH_PREFIX:
raise InvalidHashPathConfigError()
try:
validate_hash_conf()
except InvalidHashPathConfigError:
# could get monkey patched or lazy loaded
pass
def get_hmac(request_method, path, expires, key):
"""
Returns the hexdigest string of the HMAC-SHA1 (RFC 2104) for
the request.
:param request_method: Request method to allow.
:param path: The path to the resource to allow access to.
:param expires: Unix timestamp as an int for when the URL
expires.
:param key: HMAC shared secret.
:returns: hexdigest str of the HMAC-SHA1 for the request.
"""
return hmac.new(
key, '%s\n%s\n%s' % (request_method, expires, path), sha1).hexdigest()
# Used by get_swift_info and register_swift_info to store information about
# the swift cluster.
_swift_info = {}
_swift_admin_info = {}
def get_swift_info(admin=False, disallowed_sections=None):
"""
Returns information about the swift cluster that has been previously
registered with the register_swift_info call.
:param admin: boolean value, if True will additionally return an 'admin'
section with information previously registered as admin
info.
:param disallowed_sections: list of section names to be withheld from the
information returned.
:returns: dictionary of information about the swift cluster.
"""
disallowed_sections = disallowed_sections or []
info = dict(_swift_info)
for section in disallowed_sections:
key_to_pop = None
sub_section_dict = info
for sub_section in section.split('.'):
if key_to_pop:
sub_section_dict = sub_section_dict.get(key_to_pop, {})
if not isinstance(sub_section_dict, dict):
sub_section_dict = {}
break
key_to_pop = sub_section
sub_section_dict.pop(key_to_pop, None)
if admin:
info['admin'] = dict(_swift_admin_info)
info['admin']['disallowed_sections'] = list(disallowed_sections)
return info
def register_swift_info(name='swift', admin=False, **kwargs):
"""
Registers information about the swift cluster to be retrieved with calls
to get_swift_info.
NOTE: Do not use "." in the param: name or any keys in kwargs. "." is used
in the disallowed_sections to remove unwanted keys from /info.
:param name: string, the section name to place the information under.
:param admin: boolean, if True, information will be registered to an
admin section which can optionally be withheld when
requesting the information.
:param kwargs: key value arguments representing the information to be
added.
:raises ValueError: if name or any of the keys in kwargs has "." in it
"""
if name == 'admin' or name == 'disallowed_sections':
raise ValueError('\'{0}\' is reserved name.'.format(name))
if admin:
dict_to_use = _swift_admin_info
else:
dict_to_use = _swift_info
if name not in dict_to_use:
if "." in name:
raise ValueError('Cannot use "." in a swift_info key: %s' % name)
dict_to_use[name] = {}
for key, val in kwargs.items():
if "." in key:
raise ValueError('Cannot use "." in a swift_info key: %s' % key)
dict_to_use[name][key] = val
def backward(f, blocksize=4096):
"""
A generator returning lines from a file starting with the last line,
then the second last line, etc. i.e., it reads lines backwards.
Stops when the first line (if any) is read.
This is useful when searching for recent activity in very
large files.
:param f: file object to read
:param blocksize: no of characters to go backwards at each block
"""
f.seek(0, os.SEEK_END)
if f.tell() == 0:
return
last_row = b''
while f.tell() != 0:
try:
f.seek(-blocksize, os.SEEK_CUR)
except IOError:
blocksize = f.tell()
f.seek(-blocksize, os.SEEK_CUR)
block = f.read(blocksize)
f.seek(-blocksize, os.SEEK_CUR)
rows = block.split(b'\n')
rows[-1] = rows[-1] + last_row
while rows:
last_row = rows.pop(-1)
if rows and last_row:
yield last_row
yield last_row
# Used when reading config values
TRUE_VALUES = set(('true', '1', 'yes', 'on', 't', 'y'))
def config_true_value(value):
"""
Returns True if the value is either True or a string in TRUE_VALUES.
Returns False otherwise.
"""
return value is True or \
(isinstance(value, six.string_types) and value.lower() in TRUE_VALUES)
def config_auto_int_value(value, default):
"""
Returns default if value is None or 'auto'.
Returns value as an int or raises ValueError otherwise.
"""
if value is None or \
(isinstance(value, six.string_types) and value.lower() == 'auto'):
return default
try:
value = int(value)
except (TypeError, ValueError):
raise ValueError('Config option must be an integer or the '
'string "auto", not "%s".' % value)
return value
def append_underscore(prefix):
if prefix and prefix[-1] != '_':
prefix += '_'
return prefix
def config_read_reseller_options(conf, defaults):
"""
Read reseller_prefix option and associated options from configuration
Reads the reseller_prefix option, then reads options that may be
associated with a specific reseller prefix. Reads options such that an
option without a prefix applies to all reseller prefixes unless an option
has an explicit prefix.
:param conf: the configuration
:param defaults: a dict of default values. The key is the option
name. The value is either an array of strings or a string
:return: tuple of an array of reseller prefixes and a dict of option values
"""
reseller_prefix_opt = conf.get('reseller_prefix', 'AUTH').split(',')
reseller_prefixes = []
for prefix in [pre.strip() for pre in reseller_prefix_opt if pre.strip()]:
if prefix == "''":
prefix = ''
prefix = append_underscore(prefix)
if prefix not in reseller_prefixes:
reseller_prefixes.append(prefix)
if len(reseller_prefixes) == 0:
reseller_prefixes.append('')
# Get prefix-using config options
associated_options = {}
for prefix in reseller_prefixes:
associated_options[prefix] = dict(defaults)
associated_options[prefix].update(
config_read_prefixed_options(conf, '', defaults))
prefix_name = prefix if prefix != '' else "''"
associated_options[prefix].update(
config_read_prefixed_options(conf, prefix_name, defaults))
return reseller_prefixes, associated_options
def config_read_prefixed_options(conf, prefix_name, defaults):
"""
Read prefixed options from configuration
:param conf: the configuration
:param prefix_name: the prefix (including, if needed, an underscore)
:param defaults: a dict of default values. The dict supplies the
option name and type (string or comma separated string)
:return: a dict containing the options
"""
params = {}
for option_name in defaults.keys():
value = conf.get('%s%s' % (prefix_name, option_name))
if value:
if isinstance(defaults.get(option_name), list):
params[option_name] = []
for role in value.lower().split(','):
params[option_name].append(role.strip())
else:
params[option_name] = value.strip()
return params
def noop_libc_function(*args):
return 0
def validate_configuration():
try:
validate_hash_conf()
except InvalidHashPathConfigError as e:
sys.exit("Error: %s" % e)
def load_libc_function(func_name, log_error=True,
fail_if_missing=False):
"""
Attempt to find the function in libc, otherwise return a no-op func.
:param func_name: name of the function to pull from libc.
:param log_error: log an error when a function can't be found
:param fail_if_missing: raise an exception when a function can't be found.
Default behavior is to return a no-op function.
"""
try:
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
return getattr(libc, func_name)
except AttributeError:
if fail_if_missing:
raise
if log_error:
logging.warn(_("Unable to locate %s in libc. Leaving as a "
"no-op."), func_name)
return noop_libc_function
def generate_trans_id(trans_id_suffix):
return 'tx%s-%010x%s' % (
uuid.uuid4().hex[:21], time.time(), quote(trans_id_suffix))
def get_policy_index(req_headers, res_headers):
"""
Returns the appropriate index of the storage policy for the request from
a proxy server
:param req: dict of the request headers.
:param res: dict of the response headers.
:returns: string index of storage policy, or None
"""
header = 'X-Backend-Storage-Policy-Index'
policy_index = res_headers.get(header, req_headers.get(header))
return str(policy_index) if policy_index is not None else None
def get_log_line(req, res, trans_time, additional_info):
"""
Make a line for logging that matches the documented log line format
for backend servers.
:param req: the request.
:param res: the response.
:param trans_time: the time the request took to complete, a float.
:param additional_info: a string to log at the end of the line
:returns: a properly formated line for logging.
"""
policy_index = get_policy_index(req.headers, res.headers)
return '%s - - [%s] "%s %s" %s %s "%s" "%s" "%s" %.4f "%s" %d %s' % (
req.remote_addr,
time.strftime('%d/%b/%Y:%H:%M:%S +0000', time.gmtime()),
req.method, req.path, res.status.split()[0],
res.content_length or '-', req.referer or '-',
req.headers.get('x-trans-id', '-'),
req.user_agent or '-', trans_time, additional_info or '-',
os.getpid(), policy_index or '-')
def get_trans_id_time(trans_id):
if len(trans_id) >= 34 and trans_id[:2] == 'tx' and trans_id[23] == '-':
try:
return int(trans_id[24:34], 16)
except ValueError:
pass
return None
class FileLikeIter(object):
def __init__(self, iterable):
"""
Wraps an iterable to behave as a file-like object.
"""
self.iterator = iter(iterable)
self.buf = None
self.closed = False
def __iter__(self):
return self
def next(self):
"""
next(x) -> the next value, or raise StopIteration
"""
if self.closed:
raise ValueError('I/O operation on closed file')
if self.buf:
rv = self.buf
self.buf = None
return rv
else:
return next(self.iterator)
def read(self, size=-1):
"""
read([size]) -> read at most size bytes, returned as a string.
If the size argument is negative or omitted, read until EOF is reached.
Notice that when in non-blocking mode, less data than what was
requested may be returned, even if no size parameter was given.
"""
if self.closed:
raise ValueError('I/O operation on closed file')
if size < 0:
return ''.join(self)
elif not size:
chunk = ''
elif self.buf:
chunk = self.buf
self.buf = None
else:
try:
chunk = next(self.iterator)
except StopIteration:
return ''
if len(chunk) > size:
self.buf = chunk[size:]
chunk = chunk[:size]
return chunk
def readline(self, size=-1):
"""
readline([size]) -> next line from the file, as a string.
Retain newline. A non-negative size argument limits the maximum
number of bytes to return (an incomplete line may be returned then).
Return an empty string at EOF.
"""
if self.closed:
raise ValueError('I/O operation on closed file')
data = ''
while '\n' not in data and (size < 0 or len(data) < size):
if size < 0:
chunk = self.read(1024)
else:
chunk = self.read(size - len(data))
if not chunk:
break
data += chunk
if '\n' in data:
data, sep, rest = data.partition('\n')
data += sep
if self.buf:
self.buf = rest + self.buf
else:
self.buf = rest
return data
def readlines(self, sizehint=-1):
"""
readlines([size]) -> list of strings, each a line from the file.
Call readline() repeatedly and return a list of the lines so read.
The optional size argument, if given, is an approximate bound on the
total number of bytes in the lines returned.
"""
if self.closed:
raise ValueError('I/O operation on closed file')
lines = []
while True:
line = self.readline(sizehint)
if not line:
break
lines.append(line)
if sizehint >= 0:
sizehint -= len(line)
if sizehint <= 0:
break
return lines
def close(self):
"""
close() -> None or (perhaps) an integer. Close the file.
Sets data attribute .closed to True. A closed file cannot be used for
further I/O operations. close() may be called more than once without
error. Some kinds of file objects (for example, opened by popen())
may return an exit status upon closing.
"""
self.iterator = None
self.closed = True
class FallocateWrapper(object):
def __init__(self, noop=False):
if noop:
self.func_name = 'posix_fallocate'
self.fallocate = noop_libc_function
return
# fallocate is preferred because we need the on-disk size to match
# the allocated size. Older versions of sqlite require that the
# two sizes match. However, fallocate is Linux only.
for func in ('fallocate', 'posix_fallocate'):
self.func_name = func
self.fallocate = load_libc_function(func, log_error=False)
if self.fallocate is not noop_libc_function:
break
if self.fallocate is noop_libc_function:
logging.warn(_("Unable to locate fallocate, posix_fallocate in "
"libc. Leaving as a no-op."))
def __call__(self, fd, mode, offset, length):
"""The length parameter must be a ctypes.c_uint64."""
if FALLOCATE_RESERVE > 0:
st = os.fstatvfs(fd)
free = st.f_frsize * st.f_bavail - length.value
if free <= FALLOCATE_RESERVE:
raise OSError('FALLOCATE_RESERVE fail %s <= %s' % (
free, FALLOCATE_RESERVE))
args = {
'fallocate': (fd, mode, offset, length),
'posix_fallocate': (fd, offset, length)
}
return self.fallocate(*args[self.func_name])
def disable_fallocate():
global _sys_fallocate
_sys_fallocate = FallocateWrapper(noop=True)
def fallocate(fd, size):
"""
Pre-allocate disk space for a file.
:param fd: file descriptor
:param size: size to allocate (in bytes)
"""
global _sys_fallocate
if _sys_fallocate is None:
_sys_fallocate = FallocateWrapper()
if size < 0:
size = 0
# 1 means "FALLOC_FL_KEEP_SIZE", which means it pre-allocates invisibly
ret = _sys_fallocate(fd, 1, 0, ctypes.c_uint64(size))
err = ctypes.get_errno()
if ret and err not in (0, errno.ENOSYS, errno.EOPNOTSUPP,
errno.EINVAL):
raise OSError(err, 'Unable to fallocate(%s)' % size)
def fsync(fd):
"""
Sync modified file data and metadata to disk.
:param fd: file descriptor
"""
if hasattr(fcntl, 'F_FULLSYNC'):
try:
fcntl.fcntl(fd, fcntl.F_FULLSYNC)
except IOError as e:
raise OSError(e.errno, 'Unable to F_FULLSYNC(%s)' % fd)
else:
os.fsync(fd)
def fdatasync(fd):
"""
Sync modified file data to disk.
:param fd: file descriptor
"""
try:
os.fdatasync(fd)
except AttributeError:
fsync(fd)
def fsync_dir(dirpath):
"""
Sync directory entries to disk.
:param dirpath: Path to the directory to be synced.
"""
dirfd = None
try:
dirfd = os.open(dirpath, os.O_DIRECTORY | os.O_RDONLY)
fsync(dirfd)
except OSError as err:
if err.errno == errno.ENOTDIR:
# Raise error if someone calls fsync_dir on a non-directory
raise
logging.warn(_("Unable to perform fsync() on directory %s: %s"),
dirpath, os.strerror(err.errno))
finally:
if dirfd:
os.close(dirfd)
def drop_buffer_cache(fd, offset, length):
"""
Drop 'buffer' cache for the given range of the given file.
:param fd: file descriptor
:param offset: start offset
:param length: length
"""
global _posix_fadvise
if _posix_fadvise is None:
_posix_fadvise = load_libc_function('posix_fadvise64')
# 4 means "POSIX_FADV_DONTNEED"
ret = _posix_fadvise(fd, ctypes.c_uint64(offset),
ctypes.c_uint64(length), 4)
if ret != 0:
logging.warn("posix_fadvise64(%(fd)s, %(offset)s, %(length)s, 4) "
"-> %(ret)s", {'fd': fd, 'offset': offset,
'length': length, 'ret': ret})
NORMAL_FORMAT = "%016.05f"
INTERNAL_FORMAT = NORMAL_FORMAT + '_%016x'
MAX_OFFSET = (16 ** 16) - 1
PRECISION = 1e-5
# Setting this to True will cause the internal format to always display
# extended digits - even when the value is equivalent to the normalized form.
# This isn't ideal during an upgrade when some servers might not understand
# the new time format - but flipping it to True works great for testing.
FORCE_INTERNAL = False # or True
class Timestamp(object):
"""
Internal Representation of Swift Time.
The normalized form of the X-Timestamp header looks like a float
with a fixed width to ensure stable string sorting - normalized
timestamps look like "1402464677.04188"
To support overwrites of existing data without modifying the original
timestamp but still maintain consistency a second internal offset vector
is append to the normalized timestamp form which compares and sorts
greater than the fixed width float format but less than a newer timestamp.
The internalized format of timestamps looks like
"1402464677.04188_0000000000000000" - the portion after the underscore is
the offset and is a formatted hexadecimal integer.
The internalized form is not exposed to clients in responses from
Swift. Normal client operations will not create a timestamp with an
offset.
The Timestamp class in common.utils supports internalized and
normalized formatting of timestamps and also comparison of timestamp
values. When the offset value of a Timestamp is 0 - it's considered
insignificant and need not be represented in the string format; to
support backwards compatibility during a Swift upgrade the
internalized and normalized form of a Timestamp with an
insignificant offset are identical. When a timestamp includes an
offset it will always be represented in the internalized form, but
is still excluded from the normalized form. Timestamps with an
equivalent timestamp portion (the float part) will compare and order
by their offset. Timestamps with a greater timestamp portion will
always compare and order greater than a Timestamp with a lesser
timestamp regardless of it's offset. String comparison and ordering
is guaranteed for the internalized string format, and is backwards
compatible for normalized timestamps which do not include an offset.
"""
def __init__(self, timestamp, offset=0, delta=0):
"""
Create a new Timestamp.
:param timestamp: time in seconds since the Epoch, may be any of:
* a float or integer
* normalized/internalized string
* another instance of this class (offset is preserved)
:param offset: the second internal offset vector, an int
:param delta: deca-microsecond difference from the base timestamp
param, an int
"""
if isinstance(timestamp, six.string_types):
parts = timestamp.split('_', 1)
self.timestamp = float(parts.pop(0))
if parts:
self.offset = int(parts[0], 16)
else:
self.offset = 0
else:
self.timestamp = float(timestamp)
self.offset = getattr(timestamp, 'offset', 0)
# increment offset
if offset >= 0:
self.offset += offset
else:
raise ValueError('offset must be non-negative')
if self.offset > MAX_OFFSET:
raise ValueError('offset must be smaller than %d' % MAX_OFFSET)
self.raw = int(round(self.timestamp / PRECISION))
# add delta
if delta:
self.raw = self.raw + delta
if self.raw <= 0:
raise ValueError(
'delta must be greater than %d' % (-1 * self.raw))
self.timestamp = float(self.raw * PRECISION)
def __repr__(self):
return INTERNAL_FORMAT % (self.timestamp, self.offset)
def __str__(self):
raise TypeError('You must specify which string format is required')
def __float__(self):
return self.timestamp
def __int__(self):
return int(self.timestamp)
def __nonzero__(self):
return bool(self.timestamp or self.offset)
def __bool__(self):
return self.__nonzero__()
@property
def normal(self):
return NORMAL_FORMAT % self.timestamp
@property
def internal(self):
if self.offset or FORCE_INTERNAL:
return INTERNAL_FORMAT % (self.timestamp, self.offset)
else:
return self.normal
@property
def isoformat(self):
isoformat = datetime.datetime.utcfromtimestamp(
float(self.normal)).isoformat()
# python isoformat() doesn't include msecs when zero
if len(isoformat) < len("1970-01-01T00:00:00.000000"):
isoformat += ".000000"
return isoformat
def __eq__(self, other):
if not isinstance(other, Timestamp):
other = Timestamp(other)
return self.internal == other.internal
def __ne__(self, other):
if not isinstance(other, Timestamp):
other = Timestamp(other)
return self.internal != other.internal
def __cmp__(self, other):
if not isinstance(other, Timestamp):
other = Timestamp(other)
return cmp(self.internal, other.internal)
def normalize_timestamp(timestamp):
"""
Format a timestamp (string or numeric) into a standardized
xxxxxxxxxx.xxxxx (10.5) format.
Note that timestamps using values greater than or equal to November 20th,
2286 at 17:46 UTC will use 11 digits to represent the number of
seconds.
:param timestamp: unix timestamp
:returns: normalized timestamp as a string
"""
return Timestamp(timestamp).normal
EPOCH = datetime.datetime(1970, 1, 1)
def last_modified_date_to_timestamp(last_modified_date_str):
"""
Convert a last modified date (like you'd get from a container listing,
e.g. 2014-02-28T23:22:36.698390) to a float.
"""
start = datetime.datetime.strptime(last_modified_date_str,
'%Y-%m-%dT%H:%M:%S.%f')
delta = start - EPOCH
# TODO(sam): after we no longer support py2.6, this expression can
# simplify to Timestamp(delta.total_seconds()).
#
# This calculation is based on Python 2.7's Modules/datetimemodule.c,
# function delta_to_microseconds(), but written in Python.
return Timestamp(delta.days * 86400 +
delta.seconds +
delta.microseconds / 1000000.0)
def normalize_delete_at_timestamp(timestamp):
"""
Format a timestamp (string or numeric) into a standardized
xxxxxxxxxx (10) format.
Note that timestamps less than 0000000000 are raised to
0000000000 and values greater than November 20th, 2286 at
17:46:39 UTC will be capped at that date and time, resulting in
no return value exceeding 9999999999.
This cap is because the expirer is already working through a
sorted list of strings that were all a length of 10. Adding
another digit would mess up the sort and cause the expirer to
break from processing early. By 2286, this problem will need to
be fixed, probably by creating an additional .expiring_objects
account to work from with 11 (or more) digit container names.
:param timestamp: unix timestamp
:returns: normalized timestamp as a string
"""
return '%010d' % min(max(0, float(timestamp)), 9999999999)
def mkdirs(path):
"""
Ensures the path is a directory or makes it if not. Errors if the path
exists but is a file or on permissions failure.
:param path: path to create
"""
if not os.path.isdir(path):
try:
os.makedirs(path)
except OSError as err:
if err.errno != errno.EEXIST or not os.path.isdir(path):
raise
def makedirs_count(path, count=0):
"""
Same as os.makedirs() except that this method returns the number of
new directories that had to be created.
Also, this does not raise an error if target directory already exists.
This behaviour is similar to Python 3.x's os.makedirs() called with
exist_ok=True. Also similar to swift.common.utils.mkdirs()
https://hg.python.org/cpython/file/v3.4.2/Lib/os.py#l212
"""
head, tail = os.path.split(path)
if not tail:
head, tail = os.path.split(head)
if head and tail and not os.path.exists(head):
count = makedirs_count(head, count)
if tail == os.path.curdir:
return
try:
os.mkdir(path)
except OSError as e:
# EEXIST may also be raised if path exists as a file
# Do not let that pass.
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise
else:
count += 1
return count
def renamer(old, new, fsync=True):
"""
Attempt to fix / hide race conditions like empty object directories
being removed by backend processes during uploads, by retrying.
The containing directory of 'new' and of all newly created directories are
fsync'd by default. This _will_ come at a performance penalty. In cases
where these additional fsyncs are not necessary, it is expected that the
caller of renamer() turn it off explicitly.
:param old: old path to be renamed
:param new: new path to be renamed to
:param fsync: fsync on containing directory of new and also all
the newly created directories.
"""
dirpath = os.path.dirname(new)
try:
count = makedirs_count(dirpath)
os.rename(old, new)
except OSError:
count = makedirs_count(dirpath)
os.rename(old, new)
if fsync:
# If count=0, no new directories were created. But we still need to
# fsync leaf dir after os.rename().
# If count>0, starting from leaf dir, fsync parent dirs of all
# directories created by makedirs_count()
for i in range(0, count + 1):
fsync_dir(dirpath)
dirpath = os.path.dirname(dirpath)
def split_path(path, minsegs=1, maxsegs=None, rest_with_last=False):
"""
Validate and split the given HTTP request path.
**Examples**::
['a'] = split_path('/a')
['a', None] = split_path('/a', 1, 2)
['a', 'c'] = split_path('/a/c', 1, 2)
['a', 'c', 'o/r'] = split_path('/a/c/o/r', 1, 3, True)
:param path: HTTP Request path to be split
:param minsegs: Minimum number of segments to be extracted
:param maxsegs: Maximum number of segments to be extracted
:param rest_with_last: If True, trailing data will be returned as part
of last segment. If False, and there is
trailing data, raises ValueError.
:returns: list of segments with a length of maxsegs (non-existent
segments will return as None)
:raises: ValueError if given an invalid path
"""
if not maxsegs:
maxsegs = minsegs
if minsegs > maxsegs:
raise ValueError('minsegs > maxsegs: %d > %d' % (minsegs, maxsegs))
if rest_with_last:
segs = path.split('/', maxsegs)
minsegs += 1
maxsegs += 1
count = len(segs)
if (segs[0] or count < minsegs or count > maxsegs or
'' in segs[1:minsegs]):
raise ValueError('Invalid path: %s' % quote(path))
else:
minsegs += 1
maxsegs += 1
segs = path.split('/', maxsegs)
count = len(segs)
if (segs[0] or count < minsegs or count > maxsegs + 1 or
'' in segs[1:minsegs] or
(count == maxsegs + 1 and segs[maxsegs])):
raise ValueError('Invalid path: %s' % quote(path))
segs = segs[1:maxsegs]
segs.extend([None] * (maxsegs - 1 - len(segs)))
return segs
def validate_device_partition(device, partition):
"""
Validate that a device and a partition are valid and won't lead to
directory traversal when used.
:param device: device to validate
:param partition: partition to validate
:raises: ValueError if given an invalid device or partition
"""
if not device or '/' in device or device in ['.', '..']:
raise ValueError('Invalid device: %s' % quote(device or ''))
if not partition or '/' in partition or partition in ['.', '..']:
raise ValueError('Invalid partition: %s' % quote(partition or ''))
class RateLimitedIterator(object):
"""
Wrap an iterator to only yield elements at a rate of N per second.
:param iterable: iterable to wrap
:param elements_per_second: the rate at which to yield elements
:param limit_after: rate limiting kicks in only after yielding
this many elements; default is 0 (rate limit
immediately)
"""
def __init__(self, iterable, elements_per_second, limit_after=0):
self.iterator = iter(iterable)
self.elements_per_second = elements_per_second
self.limit_after = limit_after
self.running_time = 0
def __iter__(self):
return self
def next(self):
if self.limit_after > 0:
self.limit_after -= 1
else:
self.running_time = ratelimit_sleep(self.running_time,
self.elements_per_second)
return next(self.iterator)
class GreenthreadSafeIterator(object):
"""
Wrap an iterator to ensure that only one greenthread is inside its next()
method at a time.
This is useful if an iterator's next() method may perform network IO, as
that may trigger a greenthread context switch (aka trampoline), which can
give another greenthread a chance to call next(). At that point, you get
an error like "ValueError: generator already executing". By wrapping calls
to next() with a mutex, we avoid that error.
"""
def __init__(self, unsafe_iterable):
self.unsafe_iter = iter(unsafe_iterable)
self.semaphore = eventlet.semaphore.Semaphore(value=1)
def __iter__(self):
return self
def next(self):
with self.semaphore:
return next(self.unsafe_iter)
class NullLogger(object):
"""A no-op logger for eventlet wsgi."""
def write(self, *args):
# "Logs" the args to nowhere
pass
class LoggerFileObject(object):
def __init__(self, logger, log_type='STDOUT'):
self.logger = logger
self.log_type = log_type
def write(self, value):
value = value.strip()
if value:
if 'Connection reset by peer' in value:
self.logger.error(
_('%s: Connection reset by peer'), self.log_type)
else:
self.logger.error(_('%s: %s'), self.log_type, value)
def writelines(self, values):
self.logger.error(_('%s: %s'), self.log_type, '#012'.join(values))
def close(self):
pass
def flush(self):
pass
def __iter__(self):
return self
def next(self):
raise IOError(errno.EBADF, 'Bad file descriptor')
def read(self, size=-1):
raise IOError(errno.EBADF, 'Bad file descriptor')
def readline(self, size=-1):
raise IOError(errno.EBADF, 'Bad file descriptor')
def tell(self):
return 0
def xreadlines(self):
return self
class StatsdClient(object):
def __init__(self, host, port, base_prefix='', tail_prefix='',
default_sample_rate=1, sample_rate_factor=1, logger=None):
self._host = host
self._port = port
self._base_prefix = base_prefix
self.set_prefix(tail_prefix)
self._default_sample_rate = default_sample_rate
self._sample_rate_factor = sample_rate_factor
self._target = (self._host, self._port)
self.random = random
self.logger = logger
def set_prefix(self, new_prefix):
if new_prefix and self._base_prefix:
self._prefix = '.'.join([self._base_prefix, new_prefix, ''])
elif new_prefix:
self._prefix = new_prefix + '.'
elif self._base_prefix:
self._prefix = self._base_prefix + '.'
else:
self._prefix = ''
def _send(self, m_name, m_value, m_type, sample_rate):
if sample_rate is None:
sample_rate = self._default_sample_rate
sample_rate = sample_rate * self._sample_rate_factor
parts = ['%s%s:%s' % (self._prefix, m_name, m_value), m_type]
if sample_rate < 1:
if self.random() < sample_rate:
parts.append('@%s' % (sample_rate,))
else:
return
# Ideally, we'd cache a sending socket in self, but that
# results in a socket getting shared by multiple green threads.
with closing(self._open_socket()) as sock:
try:
return sock.sendto('|'.join(parts), self._target)
except IOError as err:
if self.logger:
self.logger.warn(
'Error sending UDP message to %r: %s',
self._target, err)
def _open_socket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def update_stats(self, m_name, m_value, sample_rate=None):
return self._send(m_name, m_value, 'c', sample_rate)
def increment(self, metric, sample_rate=None):
return self.update_stats(metric, 1, sample_rate)
def decrement(self, metric, sample_rate=None):
return self.update_stats(metric, -1, sample_rate)
def timing(self, metric, timing_ms, sample_rate=None):
return self._send(metric, timing_ms, 'ms', sample_rate)
def timing_since(self, metric, orig_time, sample_rate=None):
return self.timing(metric, (time.time() - orig_time) * 1000,
sample_rate)
def transfer_rate(self, metric, elapsed_time, byte_xfer, sample_rate=None):
if byte_xfer:
return self.timing(metric,
elapsed_time * 1000 / byte_xfer * 1000,
sample_rate)
def server_handled_successfully(status_int):
"""
True for successful responses *or* error codes that are not Swift's fault,
False otherwise. For example, 500 is definitely the server's fault, but
412 is an error code (4xx are all errors) that is due to a header the
client sent.
If one is tracking error rates to monitor server health, one would be
advised to use a function like this one, lest a client cause a flurry of
404s or 416s and make a spurious spike in your errors graph.
"""
return (is_success(status_int) or
is_redirection(status_int) or
status_int == HTTP_NOT_FOUND or
status_int == HTTP_PRECONDITION_FAILED or
status_int == HTTP_REQUESTED_RANGE_NOT_SATISFIABLE)
def timing_stats(**dec_kwargs):
"""
Returns a decorator that logs timing events or errors for public methods in
swift's wsgi server controllers, based on response code.
"""
def decorating_func(func):
method = func.func_name
@functools.wraps(func)
def _timing_stats(ctrl, *args, **kwargs):
start_time = time.time()
resp = func(ctrl, *args, **kwargs)
if server_handled_successfully(resp.status_int):
ctrl.logger.timing_since(method + '.timing',
start_time, **dec_kwargs)
else:
ctrl.logger.timing_since(method + '.errors.timing',
start_time, **dec_kwargs)
return resp
return _timing_stats
return decorating_func
class LoggingHandlerWeakRef(weakref.ref):
"""
Like a weak reference, but passes through a couple methods that logging
handlers need.
"""
def close(self):
referent = self()
try:
if referent:
referent.close()
except KeyError:
# This is to catch an issue with old py2.6 versions
pass
def flush(self):
referent = self()
if referent:
referent.flush()
# double inheritance to support property with setter
class LogAdapter(logging.LoggerAdapter, object):
"""
A Logger like object which performs some reformatting on calls to
:meth:`exception`. Can be used to store a threadlocal transaction id and
client ip.
"""
_cls_thread_local = threading.local()
def __init__(self, logger, server):
logging.LoggerAdapter.__init__(self, logger, {})
self.server = server
setattr(self, 'warn', self.warning)
@property
def txn_id(self):
if hasattr(self._cls_thread_local, 'txn_id'):
return self._cls_thread_local.txn_id
@txn_id.setter
def txn_id(self, value):
self._cls_thread_local.txn_id = value
@property
def client_ip(self):
if hasattr(self._cls_thread_local, 'client_ip'):
return self._cls_thread_local.client_ip
@client_ip.setter
def client_ip(self, value):
self._cls_thread_local.client_ip = value
@property
def thread_locals(self):
return (self.txn_id, self.client_ip)
@thread_locals.setter
def thread_locals(self, value):
self.txn_id, self.client_ip = value
def getEffectiveLevel(self):
return self.logger.getEffectiveLevel()
def process(self, msg, kwargs):
"""
Add extra info to message
"""
kwargs['extra'] = {'server': self.server, 'txn_id': self.txn_id,
'client_ip': self.client_ip}
return msg, kwargs
def notice(self, msg, *args, **kwargs):
"""
Convenience function for syslog priority LOG_NOTICE. The python
logging lvl is set to 25, just above info. SysLogHandler is
monkey patched to map this log lvl to the LOG_NOTICE syslog
priority.
"""
self.log(NOTICE, msg, *args, **kwargs)
def _exception(self, msg, *args, **kwargs):
logging.LoggerAdapter.exception(self, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
_junk, exc, _junk = sys.exc_info()
call = self.error
emsg = ''
if isinstance(exc, OSError):
if exc.errno in (errno.EIO, errno.ENOSPC):
emsg = str(exc)
else:
call = self._exception
elif isinstance(exc, socket.error):
if exc.errno == errno.ECONNREFUSED:
emsg = _('Connection refused')
elif exc.errno == errno.EHOSTUNREACH:
emsg = _('Host unreachable')
elif exc.errno == errno.ETIMEDOUT:
emsg = _('Connection timeout')
else:
call = self._exception
elif isinstance(exc, eventlet.Timeout):
emsg = exc.__class__.__name__
if hasattr(exc, 'seconds'):
emsg += ' (%ss)' % exc.seconds
if isinstance(exc, swift.common.exceptions.MessageTimeout):
if exc.msg:
emsg += ' %s' % exc.msg
else:
call = self._exception
call('%s: %s' % (msg, emsg), *args, **kwargs)
def set_statsd_prefix(self, prefix):
"""
The StatsD client prefix defaults to the "name" of the logger. This
method may override that default with a specific value. Currently used
in the proxy-server to differentiate the Account, Container, and Object
controllers.
"""
if self.logger.statsd_client:
self.logger.statsd_client.set_prefix(prefix)
def statsd_delegate(statsd_func_name):
"""
Factory to create methods which delegate to methods on
self.logger.statsd_client (an instance of StatsdClient). The
created methods conditionally delegate to a method whose name is given
in 'statsd_func_name'. The created delegate methods are a no-op when
StatsD logging is not configured.
:param statsd_func_name: the name of a method on StatsdClient.
"""
func = getattr(StatsdClient, statsd_func_name)
@functools.wraps(func)
def wrapped(self, *a, **kw):
if getattr(self.logger, 'statsd_client'):
return func(self.logger.statsd_client, *a, **kw)
return wrapped
update_stats = statsd_delegate('update_stats')
increment = statsd_delegate('increment')
decrement = statsd_delegate('decrement')
timing = statsd_delegate('timing')
timing_since = statsd_delegate('timing_since')
transfer_rate = statsd_delegate('transfer_rate')
class SwiftLogFormatter(logging.Formatter):
"""
Custom logging.Formatter will append txn_id to a log message if the
record has one and the message does not. Optionally it can shorten
overly long log lines.
"""
def __init__(self, fmt=None, datefmt=None, max_line_length=0):
logging.Formatter.__init__(self, fmt=fmt, datefmt=datefmt)
self.max_line_length = max_line_length
def format(self, record):
if not hasattr(record, 'server'):
# Catch log messages that were not initiated by swift
# (for example, the keystone auth middleware)
record.server = record.name
# Included from Python's logging.Formatter and then altered slightly to
# replace \n with #012
record.message = record.getMessage()
if self._fmt.find('%(asctime)') >= 0:
record.asctime = self.formatTime(record, self.datefmt)
msg = (self._fmt % record.__dict__).replace('\n', '#012')
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(
record.exc_info).replace('\n', '#012')
if record.exc_text:
if msg[-3:] != '#012':
msg = msg + '#012'
msg = msg + record.exc_text
if (hasattr(record, 'txn_id') and record.txn_id and
record.levelno != logging.INFO and
record.txn_id not in msg):
msg = "%s (txn: %s)" % (msg, record.txn_id)
if (hasattr(record, 'client_ip') and record.client_ip and
record.levelno != logging.INFO and
record.client_ip not in msg):
msg = "%s (client_ip: %s)" % (msg, record.client_ip)
if self.max_line_length > 0 and len(msg) > self.max_line_length:
if self.max_line_length < 7:
msg = msg[:self.max_line_length]
else:
approxhalf = (self.max_line_length - 5) // 2
msg = msg[:approxhalf] + " ... " + msg[-approxhalf:]
return msg
def get_logger(conf, name=None, log_to_console=False, log_route=None,
fmt="%(server)s: %(message)s"):
"""
Get the current system logger using config settings.
**Log config and defaults**::
log_facility = LOG_LOCAL0
log_level = INFO
log_name = swift
log_max_line_length = 0
log_udp_host = (disabled)
log_udp_port = logging.handlers.SYSLOG_UDP_PORT
log_address = /dev/log
log_statsd_host = (disabled)
log_statsd_port = 8125
log_statsd_default_sample_rate = 1.0
log_statsd_sample_rate_factor = 1.0
log_statsd_metric_prefix = (empty-string)
:param conf: Configuration dict to read settings from
:param name: Name of the logger
:param log_to_console: Add handler which writes to console on stderr
:param log_route: Route for the logging, not emitted to the log, just used
to separate logging configurations
:param fmt: Override log format
"""
if not conf:
conf = {}
if name is None:
name = conf.get('log_name', 'swift')
if not log_route:
log_route = name
logger = logging.getLogger(log_route)
logger.propagate = False
# all new handlers will get the same formatter
formatter = SwiftLogFormatter(
fmt=fmt, max_line_length=int(conf.get('log_max_line_length', 0)))
# get_logger will only ever add one SysLog Handler to a logger
if not hasattr(get_logger, 'handler4logger'):
get_logger.handler4logger = {}
if logger in get_logger.handler4logger:
logger.removeHandler(get_logger.handler4logger[logger])
# facility for this logger will be set by last call wins
facility = getattr(SysLogHandler, conf.get('log_facility', 'LOG_LOCAL0'),
SysLogHandler.LOG_LOCAL0)
udp_host = conf.get('log_udp_host')
if udp_host:
udp_port = int(conf.get('log_udp_port',
logging.handlers.SYSLOG_UDP_PORT))
handler = SysLogHandler(address=(udp_host, udp_port),
facility=facility)
else:
log_address = conf.get('log_address', '/dev/log')
try:
handler = SysLogHandler(address=log_address, facility=facility)
except socket.error as e:
# Either /dev/log isn't a UNIX socket or it does not exist at all
if e.errno not in [errno.ENOTSOCK, errno.ENOENT]:
raise e
handler = SysLogHandler(facility=facility)
handler.setFormatter(formatter)
logger.addHandler(handler)
get_logger.handler4logger[logger] = handler
# setup console logging
if log_to_console or hasattr(get_logger, 'console_handler4logger'):
# remove pre-existing console handler for this logger
if not hasattr(get_logger, 'console_handler4logger'):
get_logger.console_handler4logger = {}
if logger in get_logger.console_handler4logger:
logger.removeHandler(get_logger.console_handler4logger[logger])
console_handler = logging.StreamHandler(sys.__stderr__)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
get_logger.console_handler4logger[logger] = console_handler
# set the level for the logger
logger.setLevel(
getattr(logging, conf.get('log_level', 'INFO').upper(), logging.INFO))
# Setup logger with a StatsD client if so configured
statsd_host = conf.get('log_statsd_host')
if statsd_host:
statsd_port = int(conf.get('log_statsd_port', 8125))
base_prefix = conf.get('log_statsd_metric_prefix', '')
default_sample_rate = float(conf.get(
'log_statsd_default_sample_rate', 1))
sample_rate_factor = float(conf.get(
'log_statsd_sample_rate_factor', 1))
statsd_client = StatsdClient(statsd_host, statsd_port, base_prefix,
name, default_sample_rate,
sample_rate_factor, logger=logger)
logger.statsd_client = statsd_client
else:
logger.statsd_client = None
adapted_logger = LogAdapter(logger, name)
other_handlers = conf.get('log_custom_handlers', None)
if other_handlers:
log_custom_handlers = [s.strip() for s in other_handlers.split(',')
if s.strip()]
for hook in log_custom_handlers:
try:
mod, fnc = hook.rsplit('.', 1)
logger_hook = getattr(__import__(mod, fromlist=[fnc]), fnc)
logger_hook(conf, name, log_to_console, log_route, fmt,
logger, adapted_logger)
except (AttributeError, ImportError):
print('Error calling custom handler [%s]' % hook,
file=sys.stderr)
except ValueError:
print('Invalid custom handler format [%s]' % hook,
file=sys.stderr)
# Python 2.6 has the undesirable property of keeping references to all log
# handlers around forever in logging._handlers and logging._handlerList.
# Combine that with handlers that keep file descriptors, and you get an fd
# leak.
#
# And no, we can't share handlers; a SyslogHandler has a socket, and if
# two greenthreads end up logging at the same time, you could get message
# overlap that garbles the logs and makes eventlet complain.
#
# Python 2.7 uses weakrefs to avoid the leak, so let's do that too.
if sys.version_info[0] == 2 and sys.version_info[1] <= 6:
try:
logging._acquireLock() # some thread-safety thing
for handler in adapted_logger.logger.handlers:
if handler in logging._handlers:
wr = LoggingHandlerWeakRef(handler)
del logging._handlers[handler]
logging._handlers[wr] = 1
for i, handler_ref in enumerate(logging._handlerList):
if handler_ref is handler:
logging._handlerList[i] = LoggingHandlerWeakRef(
handler)
finally:
logging._releaseLock()
return adapted_logger
def get_hub():
"""
Checks whether poll is available and falls back
on select if it isn't.
Note about epoll:
Review: https://review.openstack.org/#/c/18806/
There was a problem where once out of every 30 quadrillion
connections, a coroutine wouldn't wake up when the client
closed its end. Epoll was not reporting the event or it was
getting swallowed somewhere. Then when that file descriptor
was re-used, eventlet would freak right out because it still
thought it was waiting for activity from it in some other coro.
"""
try:
import select
if hasattr(select, "poll"):
return "poll"
return "selects"
except ImportError:
return None
def drop_privileges(user, call_setsid=True):
"""
Sets the userid/groupid of the current process, get session leader, etc.
:param user: User name to change privileges to
"""
if os.geteuid() == 0:
groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
os.setgroups(groups)
user = pwd.getpwnam(user)
os.setgid(user[3])
os.setuid(user[2])
os.environ['HOME'] = user[5]
if call_setsid:
try:
os.setsid()
except OSError:
pass
os.chdir('/') # in case you need to rmdir on where you started the daemon
os.umask(0o22) # ensure files are created with the correct privileges
def capture_stdio(logger, **kwargs):
"""
Log unhandled exceptions, close stdio, capture stdout and stderr.
param logger: Logger object to use
"""
# log uncaught exceptions
sys.excepthook = lambda * exc_info: \
logger.critical(_('UNCAUGHT EXCEPTION'), exc_info=exc_info)
# collect stdio file desc not in use for logging
stdio_files = [sys.stdin, sys.stdout, sys.stderr]
console_fds = [h.stream.fileno() for _junk, h in getattr(
get_logger, 'console_handler4logger', {}).items()]
stdio_files = [f for f in stdio_files if f.fileno() not in console_fds]
with open(os.devnull, 'r+b') as nullfile:
# close stdio (excludes fds open for logging)
for f in stdio_files:
# some platforms throw an error when attempting an stdin flush
try:
f.flush()
except IOError:
pass
try:
os.dup2(nullfile.fileno(), f.fileno())
except OSError:
pass
# redirect stdio
if kwargs.pop('capture_stdout', True):
sys.stdout = LoggerFileObject(logger)
if kwargs.pop('capture_stderr', True):
sys.stderr = LoggerFileObject(logger, 'STDERR')
def parse_options(parser=None, once=False, test_args=None):
"""
Parse standard swift server/daemon options with optparse.OptionParser.
:param parser: OptionParser to use. If not sent one will be created.
:param once: Boolean indicating the "once" option is available
:param test_args: Override sys.argv; used in testing
:returns : Tuple of (config, options); config is an absolute path to the
config file, options is the parser options as a dictionary.
:raises SystemExit: First arg (CONFIG) is required, file must exist
"""
if not parser:
parser = OptionParser(usage="%prog CONFIG [options]")
parser.add_option("-v", "--verbose", default=False, action="store_true",
help="log to console")
if once:
parser.add_option("-o", "--once", default=False, action="store_true",
help="only run one pass of daemon")
# if test_args is None, optparse will use sys.argv[:1]
options, args = parser.parse_args(args=test_args)
if not args:
parser.print_usage()
print(_("Error: missing config path argument"))
sys.exit(1)
config = os.path.abspath(args.pop(0))
if not os.path.exists(config):
parser.print_usage()
print(_("Error: unable to locate %s") % config)
sys.exit(1)
extra_args = []
# if any named options appear in remaining args, set the option to True
for arg in args:
if arg in options.__dict__:
setattr(options, arg, True)
else:
extra_args.append(arg)
options = vars(options)
if extra_args:
options['extra_args'] = extra_args
return config, options
def expand_ipv6(address):
"""
Expand ipv6 address.
:param address: a string indicating valid ipv6 address
:returns: a string indicating fully expanded ipv6 address
"""
packed_ip = socket.inet_pton(socket.AF_INET6, address)
return socket.inet_ntop(socket.AF_INET6, packed_ip)
def whataremyips(bind_ip=None):
"""
Get "our" IP addresses ("us" being the set of services configured by
one `*.conf` file). If our REST listens on a specific address, return it.
Otherwise, if listen on '0.0.0.0' or '::' return all addresses, including
the loopback.
:param str bind_ip: Optional bind_ip from a config file; may be IP address
or hostname.
:returns: list of Strings of ip addresses
"""
if bind_ip:
# See if bind_ip is '0.0.0.0'/'::'
try:
_, _, _, _, sockaddr = socket.getaddrinfo(
bind_ip, None, 0, socket.SOCK_STREAM, 0,
socket.AI_NUMERICHOST)[0]
if sockaddr[0] not in ('0.0.0.0', '::'):
return [bind_ip]
except socket.gaierror:
pass
addresses = []
for interface in netifaces.interfaces():
try:
iface_data = netifaces.ifaddresses(interface)
for family in iface_data:
if family not in (netifaces.AF_INET, netifaces.AF_INET6):
continue
for address in iface_data[family]:
addr = address['addr']
# If we have an ipv6 address remove the
# %ether_interface at the end
if family == netifaces.AF_INET6:
addr = expand_ipv6(addr.split('%')[0])
addresses.append(addr)
except ValueError:
pass
return addresses
def storage_directory(datadir, partition, name_hash):
"""
Get the storage directory
:param datadir: Base data directory
:param partition: Partition
:param name_hash: Account, container or object name hash
:returns: Storage directory
"""
return os.path.join(datadir, str(partition), name_hash[-3:], name_hash)
def hash_path(account, container=None, object=None, raw_digest=False):
"""
Get the canonical hash for an account/container/object
:param account: Account
:param container: Container
:param object: Object
:param raw_digest: If True, return the raw version rather than a hex digest
:returns: hash string
"""
if object and not container:
raise ValueError('container is required if object is provided')
paths = [account]
if container:
paths.append(container)
if object:
paths.append(object)
if raw_digest:
return md5(HASH_PATH_PREFIX + '/' + '/'.join(paths)
+ HASH_PATH_SUFFIX).digest()
else:
return md5(HASH_PATH_PREFIX + '/' + '/'.join(paths)
+ HASH_PATH_SUFFIX).hexdigest()
@contextmanager
def lock_path(directory, timeout=10, timeout_class=None):
"""
Context manager that acquires a lock on a directory. This will block until
the lock can be acquired, or the timeout time has expired (whichever occurs
first).
For locking exclusively, file or directory has to be opened in Write mode.
Python doesn't allow directories to be opened in Write Mode. So we
workaround by locking a hidden file in the directory.
:param directory: directory to be locked
:param timeout: timeout (in seconds)
:param timeout_class: The class of the exception to raise if the
lock cannot be granted within the timeout. Will be
constructed as timeout_class(timeout, lockpath). Default:
LockTimeout
"""
if timeout_class is None:
timeout_class = swift.common.exceptions.LockTimeout
mkdirs(directory)
lockpath = '%s/.lock' % directory
fd = os.open(lockpath, os.O_WRONLY | os.O_CREAT)
sleep_time = 0.01
slower_sleep_time = max(timeout * 0.01, sleep_time)
slowdown_at = timeout * 0.01
time_slept = 0
try:
with timeout_class(timeout, lockpath):
while True:
try:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as err:
if err.errno != errno.EAGAIN:
raise
if time_slept > slowdown_at:
sleep_time = slower_sleep_time
sleep(sleep_time)
time_slept += sleep_time
yield True
finally:
os.close(fd)
@contextmanager
def lock_file(filename, timeout=10, append=False, unlink=True):
"""
Context manager that acquires a lock on a file. This will block until
the lock can be acquired, or the timeout time has expired (whichever occurs
first).
:param filename: file to be locked
:param timeout: timeout (in seconds)
:param append: True if file should be opened in append mode
:param unlink: True if the file should be unlinked at the end
"""
flags = os.O_CREAT | os.O_RDWR
if append:
flags |= os.O_APPEND
mode = 'a+'
else:
mode = 'r+'
while True:
fd = os.open(filename, flags)
file_obj = os.fdopen(fd, mode)
try:
with swift.common.exceptions.LockTimeout(timeout, filename):
while True:
try:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as err:
if err.errno != errno.EAGAIN:
raise
sleep(0.01)
try:
if os.stat(filename).st_ino != os.fstat(fd).st_ino:
continue
except OSError as err:
if err.errno == errno.ENOENT:
continue
raise
yield file_obj
if unlink:
os.unlink(filename)
break
finally:
file_obj.close()
def lock_parent_directory(filename, timeout=10):
"""
Context manager that acquires a lock on the parent directory of the given
file path. This will block until the lock can be acquired, or the timeout
time has expired (whichever occurs first).
:param filename: file path of the parent directory to be locked
:param timeout: timeout (in seconds)
"""
return lock_path(os.path.dirname(filename), timeout=timeout)
def get_time_units(time_amount):
"""
Get a nomralized length of time in the largest unit of time (hours,
minutes, or seconds.)
:param time_amount: length of time in seconds
:returns: A touple of (length of time, unit of time) where unit of time is
one of ('h', 'm', 's')
"""
time_unit = 's'
if time_amount > 60:
time_amount /= 60
time_unit = 'm'
if time_amount > 60:
time_amount /= 60
time_unit = 'h'
return time_amount, time_unit
def compute_eta(start_time, current_value, final_value):
"""
Compute an ETA. Now only if we could also have a progress bar...
:param start_time: Unix timestamp when the operation began
:param current_value: Current value
:param final_value: Final value
:returns: ETA as a tuple of (length of time, unit of time) where unit of
time is one of ('h', 'm', 's')
"""
elapsed = time.time() - start_time
completion = (float(current_value) / final_value) or 0.00001
return get_time_units(1.0 / completion * elapsed - elapsed)
def unlink_older_than(path, mtime):
"""
Remove any file in a given path that that was last modified before mtime.
:param path: path to remove file from
:mtime: timestamp of oldest file to keep
"""
for fname in listdir(path):
fpath = os.path.join(path, fname)
try:
if os.path.getmtime(fpath) < mtime:
os.unlink(fpath)
except OSError:
pass
def item_from_env(env, item_name, allow_none=False):
"""
Get a value from the wsgi environment
:param env: wsgi environment dict
:param item_name: name of item to get
:returns: the value from the environment
"""
item = env.get(item_name, None)
if item is None and not allow_none:
logging.error("ERROR: %s could not be found in env!", item_name)
return item
def cache_from_env(env, allow_none=False):
"""
Get memcache connection pool from the environment (which had been
previously set by the memcache middleware
:param env: wsgi environment dict
:returns: swift.common.memcached.MemcacheRing from environment
"""
return item_from_env(env, 'swift.cache', allow_none)
def read_conf_dir(parser, conf_dir):
conf_files = []
for f in os.listdir(conf_dir):
if f.endswith('.conf') and not f.startswith('.'):
conf_files.append(os.path.join(conf_dir, f))
return parser.read(sorted(conf_files))
def readconf(conf_path, section_name=None, log_name=None, defaults=None,
raw=False):
"""
Read config file(s) and return config items as a dict
:param conf_path: path to config file/directory, or a file-like object
(hasattr readline)
:param section_name: config section to read (will return all sections if
not defined)
:param log_name: name to be used with logging (will use section_name if
not defined)
:param defaults: dict of default values to pre-populate the config with
:returns: dict of config items
"""
if defaults is None:
defaults = {}
if raw:
c = RawConfigParser(defaults)
else:
c = ConfigParser(defaults)
if hasattr(conf_path, 'readline'):
c.readfp(conf_path)
else:
if os.path.isdir(conf_path):
# read all configs in directory
success = read_conf_dir(c, conf_path)
else:
success = c.read(conf_path)
if not success:
print(_("Unable to read config from %s") % conf_path)
sys.exit(1)
if section_name:
if c.has_section(section_name):
conf = dict(c.items(section_name))
else:
print(_("Unable to find %s config section in %s") %
(section_name, conf_path))
sys.exit(1)
if "log_name" not in conf:
if log_name is not None:
conf['log_name'] = log_name
else:
conf['log_name'] = section_name
else:
conf = {}
for s in c.sections():
conf.update({s: dict(c.items(s))})
if 'log_name' not in conf:
conf['log_name'] = log_name
conf['__file__'] = conf_path
return conf
def write_pickle(obj, dest, tmp=None, pickle_protocol=0):
"""
Ensure that a pickle file gets written to disk. The file
is first written to a tmp location, ensure it is synced to disk, then
perform a move to its final location
:param obj: python object to be pickled
:param dest: path of final destination file
:param tmp: path to tmp to use, defaults to None
:param pickle_protocol: protocol to pickle the obj with, defaults to 0
"""
if tmp is None:
tmp = os.path.dirname(dest)
fd, tmppath = mkstemp(dir=tmp, suffix='.tmp')
with os.fdopen(fd, 'wb') as fo:
pickle.dump(obj, fo, pickle_protocol)
fo.flush()
os.fsync(fd)
renamer(tmppath, dest)
def search_tree(root, glob_match, ext='', exts=None, dir_ext=None):
"""Look in root, for any files/dirs matching glob, recursively traversing
any found directories looking for files ending with ext
:param root: start of search path
:param glob_match: glob to match in root, matching dirs are traversed with
os.walk
:param ext: only files that end in ext will be returned
:param dir_ext: if present directories that end with dir_ext will not be
traversed and instead will be returned as a matched path
:returns: list of full paths to matching files, sorted
"""
exts = exts or [ext]
found_files = []
for path in glob.glob(os.path.join(root, glob_match)):
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
if dir_ext and root.endswith(dir_ext):
found_files.append(root)
# the root is a config dir, descend no further
break
for file_ in files:
if any(exts) and not any(file_.endswith(e) for e in exts):
continue
found_files.append(os.path.join(root, file_))
found_dir = False
for dir_ in dirs:
if dir_ext and dir_.endswith(dir_ext):
found_dir = True
found_files.append(os.path.join(root, dir_))
if found_dir:
# do not descend further into matching directories
break
else:
if ext and not path.endswith(ext):
continue
found_files.append(path)
return sorted(found_files)
def write_file(path, contents):
"""Write contents to file at path
:param path: any path, subdirs will be created as needed
:param contents: data to write to file, will be converted to string
"""
dirname, name = os.path.split(path)
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError as err:
if err.errno == errno.EACCES:
sys.exit('Unable to create %s. Running as '
'non-root?' % dirname)
with open(path, 'w') as f:
f.write('%s' % contents)
def remove_file(path):
"""Quiet wrapper for os.unlink, OSErrors are suppressed
:param path: first and only argument passed to os.unlink
"""
try:
os.unlink(path)
except OSError:
pass
def audit_location_generator(devices, datadir, suffix='',
mount_check=True, logger=None):
'''
Given a devices path and a data directory, yield (path, device,
partition) for all files in that directory
:param devices: parent directory of the devices to be audited
:param datadir: a directory located under self.devices. This should be
one of the DATADIR constants defined in the account,
container, and object servers.
:param suffix: path name suffix required for all names returned
:param mount_check: Flag to check if a mount check should be performed
on devices
:param logger: a logger object
'''
device_dir = listdir(devices)
# randomize devices in case of process restart before sweep completed
shuffle(device_dir)
for device in device_dir:
if mount_check and not ismount(os.path.join(devices, device)):
if logger:
logger.warning(
_('Skipping %s as it is not mounted'), device)
continue
datadir_path = os.path.join(devices, device, datadir)
try:
partitions = listdir(datadir_path)
except OSError as e:
if logger:
logger.warning('Skipping %s because %s', datadir_path, e)
continue
for partition in partitions:
part_path = os.path.join(datadir_path, partition)
try:
suffixes = listdir(part_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for asuffix in suffixes:
suff_path = os.path.join(part_path, asuffix)
try:
hashes = listdir(suff_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for hsh in hashes:
hash_path = os.path.join(suff_path, hsh)
try:
files = sorted(listdir(hash_path), reverse=True)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for fname in files:
if suffix and not fname.endswith(suffix):
continue
path = os.path.join(hash_path, fname)
yield path, device, partition
def ratelimit_sleep(running_time, max_rate, incr_by=1, rate_buffer=5):
'''
Will eventlet.sleep() for the appropriate time so that the max_rate
is never exceeded. If max_rate is 0, will not ratelimit. The
maximum recommended rate should not exceed (1000 * incr_by) a second
as eventlet.sleep() does involve some overhead. Returns running_time
that should be used for subsequent calls.
:param running_time: the running time in milliseconds of the next
allowable request. Best to start at zero.
:param max_rate: The maximum rate per second allowed for the process.
:param incr_by: How much to increment the counter. Useful if you want
to ratelimit 1024 bytes/sec and have differing sizes
of requests. Must be > 0 to engage rate-limiting
behavior.
:param rate_buffer: Number of seconds the rate counter can drop and be
allowed to catch up (at a faster than listed rate).
A larger number will result in larger spikes in rate
but better average accuracy. Must be > 0 to engage
rate-limiting behavior.
'''
if max_rate <= 0 or incr_by <= 0:
return running_time
# 1,000 milliseconds = 1 second
clock_accuracy = 1000.0
# Convert seconds to milliseconds
now = time.time() * clock_accuracy
# Calculate time per request in milliseconds
time_per_request = clock_accuracy * (float(incr_by) / max_rate)
# Convert rate_buffer to milliseconds and compare
if now - running_time > rate_buffer * clock_accuracy:
running_time = now
elif running_time - now > time_per_request:
# Convert diff back to a floating point number of seconds and sleep
eventlet.sleep((running_time - now) / clock_accuracy)
# Return the absolute time for the next interval in milliseconds; note
# that time could have passed well beyond that point, but the next call
# will catch that and skip the sleep.
return running_time + time_per_request
class ContextPool(GreenPool):
"GreenPool subclassed to kill its coros when it gets gc'ed"
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
for coro in list(self.coroutines_running):
coro.kill()
class GreenAsyncPileWaitallTimeout(Timeout):
pass
class GreenAsyncPile(object):
"""
Runs jobs in a pool of green threads, and the results can be retrieved by
using this object as an iterator.
This is very similar in principle to eventlet.GreenPile, except it returns
results as they become available rather than in the order they were
launched.
Correlating results with jobs (if necessary) is left to the caller.
"""
def __init__(self, size_or_pool):
"""
:param size_or_pool: thread pool size or a pool to use
"""
if isinstance(size_or_pool, GreenPool):
self._pool = size_or_pool
size = self._pool.size
else:
self._pool = GreenPool(size_or_pool)
size = size_or_pool
self._responses = eventlet.queue.LightQueue(size)
self._inflight = 0
self._pending = 0
def _run_func(self, func, args, kwargs):
try:
self._responses.put(func(*args, **kwargs))
finally:
self._inflight -= 1
def spawn(self, func, *args, **kwargs):
"""
Spawn a job in a green thread on the pile.
"""
self._pending += 1
self._inflight += 1
self._pool.spawn(self._run_func, func, args, kwargs)
def waitall(self, timeout):
"""
Wait timeout seconds for any results to come in.
:param timeout: seconds to wait for results
:returns: list of results accrued in that time
"""
results = []
try:
with GreenAsyncPileWaitallTimeout(timeout):
while True:
results.append(next(self))
except (GreenAsyncPileWaitallTimeout, StopIteration):
pass
return results
def __iter__(self):
return self
def next(self):
try:
rv = self._responses.get_nowait()
except Empty:
if self._inflight == 0:
raise StopIteration()
rv = self._responses.get()
self._pending -= 1
return rv
class ModifiedParseResult(ParseResult):
"Parse results class for urlparse."
@property
def hostname(self):
netloc = self.netloc.split('@', 1)[-1]
if netloc.startswith('['):
return netloc[1:].split(']')[0]
elif ':' in netloc:
return netloc.rsplit(':')[0]
return netloc
@property
def port(self):
netloc = self.netloc.split('@', 1)[-1]
if netloc.startswith('['):
netloc = netloc.rsplit(']')[1]
if ':' in netloc:
return int(netloc.rsplit(':')[1])
return None
def urlparse(url):
"""
urlparse augmentation.
This is necessary because urlparse can't handle RFC 2732 URLs.
:param url: URL to parse.
"""
return ModifiedParseResult(*stdlib_urlparse(url))
def validate_sync_to(value, allowed_sync_hosts, realms_conf):
"""
Validates an X-Container-Sync-To header value, returning the
validated endpoint, realm, and realm_key, or an error string.
:param value: The X-Container-Sync-To header value to validate.
:param allowed_sync_hosts: A list of allowed hosts in endpoints,
if realms_conf does not apply.
:param realms_conf: A instance of
swift.common.container_sync_realms.ContainerSyncRealms to
validate against.
:returns: A tuple of (error_string, validated_endpoint, realm,
realm_key). The error_string will None if the rest of the
values have been validated. The validated_endpoint will be
the validated endpoint to sync to. The realm and realm_key
will be set if validation was done through realms_conf.
"""
orig_value = value
value = value.rstrip('/')
if not value:
return (None, None, None, None)
if value.startswith('//'):
if not realms_conf:
return (None, None, None, None)
data = value[2:].split('/')
if len(data) != 4:
return (
_('Invalid X-Container-Sync-To format %r') % orig_value,
None, None, None)
realm, cluster, account, container = data
realm_key = realms_conf.key(realm)
if not realm_key:
return (_('No realm key for %r') % realm, None, None, None)
endpoint = realms_conf.endpoint(realm, cluster)
if not endpoint:
return (
_('No cluster endpoint for %r %r') % (realm, cluster),
None, None, None)
return (
None,
'%s/%s/%s' % (endpoint.rstrip('/'), account, container),
realm.upper(), realm_key)
p = urlparse(value)
if p.scheme not in ('http', 'https'):
return (
_('Invalid scheme %r in X-Container-Sync-To, must be "//", '
'"http", or "https".') % p.scheme,
None, None, None)
if not p.path:
return (_('Path required in X-Container-Sync-To'), None, None, None)
if p.params or p.query or p.fragment:
return (
_('Params, queries, and fragments not allowed in '
'X-Container-Sync-To'),
None, None, None)
if p.hostname not in allowed_sync_hosts:
return (
_('Invalid host %r in X-Container-Sync-To') % p.hostname,
None, None, None)
return (None, value, None, None)
def affinity_key_function(affinity_str):
"""Turns an affinity config value into a function suitable for passing to
sort(). After doing so, the array will be sorted with respect to the given
ordering.
For example, if affinity_str is "r1=1, r2z7=2, r2z8=2", then the array
will be sorted with all nodes from region 1 (r1=1) first, then all the
nodes from region 2 zones 7 and 8 (r2z7=2 and r2z8=2), then everything
else.
Note that the order of the pieces of affinity_str is irrelevant; the
priority values are what comes after the equals sign.
If affinity_str is empty or all whitespace, then the resulting function
will not alter the ordering of the nodes.
:param affinity_str: affinity config value, e.g. "r1z2=3"
or "r1=1, r2z1=2, r2z2=2"
:returns: single-argument function
:raises: ValueError if argument invalid
"""
affinity_str = affinity_str.strip()
if not affinity_str:
return lambda x: 0
priority_matchers = []
pieces = [s.strip() for s in affinity_str.split(',')]
for piece in pieces:
# matches r<number>=<number> or r<number>z<number>=<number>
match = re.match("r(\d+)(?:z(\d+))?=(\d+)$", piece)
if match:
region, zone, priority = match.groups()
region = int(region)
priority = int(priority)
zone = int(zone) if zone else None
matcher = {'region': region, 'priority': priority}
if zone is not None:
matcher['zone'] = zone
priority_matchers.append(matcher)
else:
raise ValueError("Invalid affinity value: %r" % affinity_str)
priority_matchers.sort(key=operator.itemgetter('priority'))
def keyfn(ring_node):
for matcher in priority_matchers:
if (matcher['region'] == ring_node['region']
and ('zone' not in matcher
or matcher['zone'] == ring_node['zone'])):
return matcher['priority']
return 4294967296 # 2^32, i.e. "a big number"
return keyfn
def affinity_locality_predicate(write_affinity_str):
"""
Turns a write-affinity config value into a predicate function for nodes.
The returned value will be a 1-arg function that takes a node dictionary
and returns a true value if it is "local" and a false value otherwise. The
definition of "local" comes from the affinity_str argument passed in here.
For example, if affinity_str is "r1, r2z2", then only nodes where region=1
or where (region=2 and zone=2) are considered local.
If affinity_str is empty or all whitespace, then the resulting function
will consider everything local
:param affinity_str: affinity config value, e.g. "r1z2"
or "r1, r2z1, r2z2"
:returns: single-argument function, or None if affinity_str is empty
:raises: ValueError if argument invalid
"""
affinity_str = write_affinity_str.strip()
if not affinity_str:
return None
matchers = []
pieces = [s.strip() for s in affinity_str.split(',')]
for piece in pieces:
# matches r<number> or r<number>z<number>
match = re.match("r(\d+)(?:z(\d+))?$", piece)
if match:
region, zone = match.groups()
region = int(region)
zone = int(zone) if zone else None
matcher = {'region': region}
if zone is not None:
matcher['zone'] = zone
matchers.append(matcher)
else:
raise ValueError("Invalid write-affinity value: %r" % affinity_str)
def is_local(ring_node):
for matcher in matchers:
if (matcher['region'] == ring_node['region']
and ('zone' not in matcher
or matcher['zone'] == ring_node['zone'])):
return True
return False
return is_local
def get_remote_client(req):
# remote host for zeus
client = req.headers.get('x-cluster-client-ip')
if not client and 'x-forwarded-for' in req.headers:
# remote host for other lbs
client = req.headers['x-forwarded-for'].split(',')[0].strip()
if not client:
client = req.remote_addr
return client
def human_readable(value):
"""
Returns the number in a human readable format; for example 1048576 = "1Mi".
"""
value = float(value)
index = -1
suffixes = 'KMGTPEZY'
while value >= 1024 and index + 1 < len(suffixes):
index += 1
value = round(value / 1024)
if index == -1:
return '%d' % value
return '%d%si' % (round(value), suffixes[index])
def put_recon_cache_entry(cache_entry, key, item):
"""
Function that will check if item is a dict, and if so put it under
cache_entry[key]. We use nested recon cache entries when the object
auditor runs in parallel or else in 'once' mode with a specified
subset of devices.
"""
if isinstance(item, dict):
if key not in cache_entry or key in cache_entry and not \
isinstance(cache_entry[key], dict):
cache_entry[key] = {}
elif key in cache_entry and item == {}:
cache_entry.pop(key, None)
return
for k, v in item.items():
if v == {}:
cache_entry[key].pop(k, None)
else:
cache_entry[key][k] = v
else:
cache_entry[key] = item
def dump_recon_cache(cache_dict, cache_file, logger, lock_timeout=2):
"""Update recon cache values
:param cache_dict: Dictionary of cache key/value pairs to write out
:param cache_file: cache file to update
:param logger: the logger to use to log an encountered error
:param lock_timeout: timeout (in seconds)
"""
try:
with lock_file(cache_file, lock_timeout, unlink=False) as cf:
cache_entry = {}
try:
existing_entry = cf.readline()
if existing_entry:
cache_entry = json.loads(existing_entry)
except ValueError:
# file doesn't have a valid entry, we'll recreate it
pass
for cache_key, cache_value in cache_dict.items():
put_recon_cache_entry(cache_entry, cache_key, cache_value)
try:
with NamedTemporaryFile(dir=os.path.dirname(cache_file),
delete=False) as tf:
tf.write(json.dumps(cache_entry) + '\n')
renamer(tf.name, cache_file, fsync=False)
finally:
try:
os.unlink(tf.name)
except OSError as err:
if err.errno != errno.ENOENT:
raise
except (Exception, Timeout):
logger.exception(_('Exception dumping recon cache'))
def listdir(path):
try:
return os.listdir(path)
except OSError as err:
if err.errno != errno.ENOENT:
raise
return []
def streq_const_time(s1, s2):
"""Constant-time string comparison.
:params s1: the first string
:params s2: the second string
:return: True if the strings are equal.
This function takes two strings and compares them. It is intended to be
used when doing a comparison for authentication purposes to help guard
against timing attacks.
"""
if len(s1) != len(s2):
return False
result = 0
for (a, b) in zip(s1, s2):
result |= ord(a) ^ ord(b)
return result == 0
def pairs(item_list):
"""
Returns an iterator of all pairs of elements from item_list.
:param items: items (no duplicates allowed)
"""
for i, item1 in enumerate(item_list):
for item2 in item_list[(i + 1):]:
yield (item1, item2)
def replication(func):
"""
Decorator to declare which methods are accessible for different
type of servers:
* If option replication_server is None then this decorator
doesn't matter.
* If option replication_server is True then ONLY decorated with
this decorator methods will be started.
* If option replication_server is False then decorated with this
decorator methods will NOT be started.
:param func: function to mark accessible for replication
"""
func.replication = True
return func
def public(func):
"""
Decorator to declare which methods are publicly accessible as HTTP
requests
:param func: function to make public
"""
func.publicly_accessible = True
@functools.wraps(func)
def wrapped(*a, **kw):
return func(*a, **kw)
return wrapped
def quorum_size(n):
"""
quorum size as it applies to services that use 'replication' for data
integrity (Account/Container services). Object quorum_size is defined
on a storage policy basis.
Number of successful backend requests needed for the proxy to consider
the client request successful.
"""
return (n // 2) + 1
def rsync_ip(ip):
"""
Transform ip string to an rsync-compatible form
Will return ipv4 addresses unchanged, but will nest ipv6 addresses
inside square brackets.
:param ip: an ip string (ipv4 or ipv6)
:returns: a string ip address
"""
try:
socket.inet_pton(socket.AF_INET6, ip)
except socket.error: # it's IPv4
return ip
else:
return '[%s]' % ip
def rsync_module_interpolation(template, device):
"""
Interpolate devices variables inside a rsync module template
:param template: rsync module template as a string
:param device: a device from a ring
:returns: a string with all variables replaced by device attributes
"""
replacements = {
'ip': rsync_ip(device.get('ip', '')),
'port': device.get('port', ''),
'replication_ip': rsync_ip(device.get('replication_ip', '')),
'replication_port': device.get('replication_port', ''),
'region': device.get('region', ''),
'zone': device.get('zone', ''),
'device': device.get('device', ''),
'meta': device.get('meta', ''),
}
try:
module = template.format(**replacements)
except KeyError as e:
raise ValueError('Cannot interpolate rsync_module, invalid variable: '
'%s' % e)
return module
def get_valid_utf8_str(str_or_unicode):
"""
Get valid parts of utf-8 str from str, unicode and even invalid utf-8 str
:param str_or_unicode: a string or an unicode which can be invalid utf-8
"""
if isinstance(str_or_unicode, six.text_type):
(str_or_unicode, _len) = utf8_encoder(str_or_unicode, 'replace')
(valid_utf8_str, _len) = utf8_decoder(str_or_unicode, 'replace')
return valid_utf8_str.encode('utf-8')
def list_from_csv(comma_separated_str):
"""
Splits the str given and returns a properly stripped list of the comma
separated values.
"""
if comma_separated_str:
return [v.strip() for v in comma_separated_str.split(',') if v.strip()]
return []
def csv_append(csv_string, item):
"""
Appends an item to a comma-separated string.
If the comma-separated string is empty/None, just returns item.
"""
if csv_string:
return ",".join((csv_string, item))
else:
return item
class CloseableChain(object):
"""
Like itertools.chain, but with a close method that will attempt to invoke
its sub-iterators' close methods, if any.
"""
def __init__(self, *iterables):
self.iterables = iterables
def __iter__(self):
return iter(itertools.chain(*(self.iterables)))
def close(self):
for it in self.iterables:
close_method = getattr(it, 'close', None)
if close_method:
close_method()
def reiterate(iterable):
"""
Consume the first item from an iterator, then re-chain it to the rest of
the iterator. This is useful when you want to make sure the prologue to
downstream generators have been executed before continuing.
:param iterable: an iterable object
"""
if isinstance(iterable, (list, tuple)):
return iterable
else:
iterator = iter(iterable)
try:
chunk = ''
while not chunk:
chunk = next(iterator)
return CloseableChain([chunk], iterator)
except StopIteration:
return []
class InputProxy(object):
"""
File-like object that counts bytes read.
To be swapped in for wsgi.input for accounting purposes.
"""
def __init__(self, wsgi_input):
"""
:param wsgi_input: file-like object to wrap the functionality of
"""
self.wsgi_input = wsgi_input
self.bytes_received = 0
self.client_disconnect = False
def read(self, *args, **kwargs):
"""
Pass read request to the underlying file-like object and
add bytes read to total.
"""
try:
chunk = self.wsgi_input.read(*args, **kwargs)
except Exception:
self.client_disconnect = True
raise
self.bytes_received += len(chunk)
return chunk
def readline(self, *args, **kwargs):
"""
Pass readline request to the underlying file-like object and
add bytes read to total.
"""
try:
line = self.wsgi_input.readline(*args, **kwargs)
except Exception:
self.client_disconnect = True
raise
self.bytes_received += len(line)
return line
class LRUCache(object):
"""
Decorator for size/time bound memoization that evicts the least
recently used members.
"""
PREV, NEXT, KEY, CACHED_AT, VALUE = 0, 1, 2, 3, 4 # link fields
def __init__(self, maxsize=1000, maxtime=3600):
self.maxsize = maxsize
self.maxtime = maxtime
self.reset()
def reset(self):
self.mapping = {}
self.head = [None, None, None, None, None] # oldest
self.tail = [self.head, None, None, None, None] # newest
self.head[self.NEXT] = self.tail
def set_cache(self, value, *key):
while len(self.mapping) >= self.maxsize:
old_next, old_key = self.head[self.NEXT][self.NEXT:self.NEXT + 2]
self.head[self.NEXT], old_next[self.PREV] = old_next, self.head
del self.mapping[old_key]
last = self.tail[self.PREV]
link = [last, self.tail, key, time.time(), value]
self.mapping[key] = last[self.NEXT] = self.tail[self.PREV] = link
return value
def get_cached(self, link, *key):
link_prev, link_next, key, cached_at, value = link
if cached_at + self.maxtime < time.time():
raise KeyError('%r has timed out' % (key,))
link_prev[self.NEXT] = link_next
link_next[self.PREV] = link_prev
last = self.tail[self.PREV]
last[self.NEXT] = self.tail[self.PREV] = link
link[self.PREV] = last
link[self.NEXT] = self.tail
return value
def __call__(self, f):
class LRUCacheWrapped(object):
@functools.wraps(f)
def __call__(im_self, *key):
link = self.mapping.get(key, self.head)
if link is not self.head:
try:
return self.get_cached(link, *key)
except KeyError:
pass
value = f(*key)
self.set_cache(value, *key)
return value
def size(im_self):
"""
Return the size of the cache
"""
return len(self.mapping)
def reset(im_self):
return self.reset()
def get_maxsize(im_self):
return self.maxsize
def set_maxsize(im_self, i):
self.maxsize = i
def get_maxtime(im_self):
return self.maxtime
def set_maxtime(im_self, i):
self.maxtime = i
maxsize = property(get_maxsize, set_maxsize)
maxtime = property(get_maxtime, set_maxtime)
def __repr__(im_self):
return '<%s %r>' % (im_self.__class__.__name__, f)
return LRUCacheWrapped()
def tpool_reraise(func, *args, **kwargs):
"""
Hack to work around Eventlet's tpool not catching and reraising Timeouts.
"""
def inner():
try:
return func(*args, **kwargs)
except BaseException as err:
return err
resp = tpool.execute(inner)
if isinstance(resp, BaseException):
raise resp
return resp
class ThreadPool(object):
"""
Perform blocking operations in background threads.
Call its methods from within greenlets to green-wait for results without
blocking the eventlet reactor (hopefully).
"""
BYTE = 'a'.encode('utf-8')
def __init__(self, nthreads=2):
self.nthreads = nthreads
self._run_queue = Queue()
self._result_queue = Queue()
self._threads = []
self._alive = True
if nthreads <= 0:
return
# We spawn a greenthread whose job it is to pull results from the
# worker threads via a real Queue and send them to eventlet Events so
# that the calling greenthreads can be awoken.
#
# Since each OS thread has its own collection of greenthreads, it
# doesn't work to have the worker thread send stuff to the event, as
# it then notifies its own thread-local eventlet hub to wake up, which
# doesn't do anything to help out the actual calling greenthread over
# in the main thread.
#
# Thus, each worker sticks its results into a result queue and then
# writes a byte to a pipe, signaling the result-consuming greenlet (in
# the main thread) to wake up and consume results.
#
# This is all stuff that eventlet.tpool does, but that code can't have
# multiple instances instantiated. Since the object server uses one
# pool per disk, we have to reimplement this stuff.
_raw_rpipe, self.wpipe = os.pipe()
self.rpipe = greenio.GreenPipe(_raw_rpipe, 'rb', bufsize=0)
for _junk in range(nthreads):
thr = stdlib_threading.Thread(
target=self._worker,
args=(self._run_queue, self._result_queue))
thr.daemon = True
thr.start()
self._threads.append(thr)
# This is the result-consuming greenthread that runs in the main OS
# thread, as described above.
self._consumer_coro = greenthread.spawn_n(self._consume_results,
self._result_queue)
def _worker(self, work_queue, result_queue):
"""
Pulls an item from the queue and runs it, then puts the result into
the result queue. Repeats forever.
:param work_queue: queue from which to pull work
:param result_queue: queue into which to place results
"""
while True:
item = work_queue.get()
if item is None:
break
ev, func, args, kwargs = item
try:
result = func(*args, **kwargs)
result_queue.put((ev, True, result))
except BaseException:
result_queue.put((ev, False, sys.exc_info()))
finally:
work_queue.task_done()
os.write(self.wpipe, self.BYTE)
def _consume_results(self, queue):
"""
Runs as a greenthread in the same OS thread as callers of
run_in_thread().
Takes results from the worker OS threads and sends them to the waiting
greenthreads.
"""
while True:
try:
self.rpipe.read(1)
except ValueError:
# can happen at process shutdown when pipe is closed
break
while True:
try:
ev, success, result = queue.get(block=False)
except Empty:
break
try:
if success:
ev.send(result)
else:
ev.send_exception(*result)
finally:
queue.task_done()
def run_in_thread(self, func, *args, **kwargs):
"""
Runs ``func(*args, **kwargs)`` in a thread. Blocks the current greenlet
until results are available.
Exceptions thrown will be reraised in the calling thread.
If the threadpool was initialized with nthreads=0, it invokes
``func(*args, **kwargs)`` directly, followed by eventlet.sleep() to
ensure the eventlet hub has a chance to execute. It is more likely the
hub will be invoked when queuing operations to an external thread.
:returns: result of calling func
:raises: whatever func raises
"""
if not self._alive:
raise swift.common.exceptions.ThreadPoolDead()
if self.nthreads <= 0:
result = func(*args, **kwargs)
sleep()
return result
ev = event.Event()
self._run_queue.put((ev, func, args, kwargs), block=False)
# blocks this greenlet (and only *this* greenlet) until the real
# thread calls ev.send().
result = ev.wait()
return result
def _run_in_eventlet_tpool(self, func, *args, **kwargs):
"""
Really run something in an external thread, even if we haven't got any
threads of our own.
"""
def inner():
try:
return (True, func(*args, **kwargs))
except (Timeout, BaseException) as err:
return (False, err)
success, result = tpool.execute(inner)
if success:
return result
else:
raise result
def force_run_in_thread(self, func, *args, **kwargs):
"""
Runs ``func(*args, **kwargs)`` in a thread. Blocks the current greenlet
until results are available.
Exceptions thrown will be reraised in the calling thread.
If the threadpool was initialized with nthreads=0, uses eventlet.tpool
to run the function. This is in contrast to run_in_thread(), which
will (in that case) simply execute func in the calling thread.
:returns: result of calling func
:raises: whatever func raises
"""
if not self._alive:
raise swift.common.exceptions.ThreadPoolDead()
if self.nthreads <= 0:
return self._run_in_eventlet_tpool(func, *args, **kwargs)
else:
return self.run_in_thread(func, *args, **kwargs)
def terminate(self):
"""
Releases the threadpool's resources (OS threads, greenthreads, pipes,
etc.) and renders it unusable.
Don't call run_in_thread() or force_run_in_thread() after calling
terminate().
"""
self._alive = False
if self.nthreads <= 0:
return
for _junk in range(self.nthreads):
self._run_queue.put(None)
for thr in self._threads:
thr.join()
self._threads = []
self.nthreads = 0
greenthread.kill(self._consumer_coro)
self.rpipe.close()
os.close(self.wpipe)
def ismount(path):
"""
Test whether a path is a mount point. This will catch any
exceptions and translate them into a False return value
Use ismount_raw to have the exceptions raised instead.
"""
try:
return ismount_raw(path)
except OSError:
return False
def ismount_raw(path):
"""
Test whether a path is a mount point. Whereas ismount will catch
any exceptions and just return False, this raw version will not
catch exceptions.
This is code hijacked from C Python 2.6.8, adapted to remove the extra
lstat() system call.
"""
try:
s1 = os.lstat(path)
except os.error as err:
if err.errno == errno.ENOENT:
# It doesn't exist -- so not a mount point :-)
return False
raise
if stat.S_ISLNK(s1.st_mode):
# A symlink can never be a mount point
return False
s2 = os.lstat(os.path.join(path, '..'))
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
# path/.. on a different device as path
return True
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
# path/.. is the same i-node as path
return True
return False
def close_if_possible(maybe_closable):
close_method = getattr(maybe_closable, 'close', None)
if callable(close_method):
return close_method()
@contextmanager
def closing_if_possible(maybe_closable):
"""
Like contextlib.closing(), but doesn't crash if the object lacks a close()
method.
PEP 333 (WSGI) says: "If the iterable returned by the application has a
close() method, the server or gateway must call that method upon
completion of the current request[.]" This function makes that easier.
"""
try:
yield maybe_closable
finally:
close_if_possible(maybe_closable)
_rfc_token = r'[^()<>@,;:\"/\[\]?={}\x00-\x20\x7f]+'
_rfc_extension_pattern = re.compile(
r'(?:\s*;\s*(' + _rfc_token + r")\s*(?:=\s*(" + _rfc_token +
r'|"(?:[^"\\]|\\.)*"))?)')
_content_range_pattern = re.compile(r'^bytes (\d+)-(\d+)/(\d+)$')
def parse_content_range(content_range):
"""
Parse a content-range header into (first_byte, last_byte, total_size).
See RFC 7233 section 4.2 for details on the header format, but it's
basically "Content-Range: bytes ${start}-${end}/${total}".
:param content_range: Content-Range header value to parse,
e.g. "bytes 100-1249/49004"
:returns: 3-tuple (start, end, total)
:raises: ValueError if malformed
"""
found = re.search(_content_range_pattern, content_range)
if not found:
raise ValueError("malformed Content-Range %r" % (content_range,))
return tuple(int(x) for x in found.groups())
def parse_content_type(content_type):
"""
Parse a content-type and its parameters into values.
RFC 2616 sec 14.17 and 3.7 are pertinent.
**Examples**::
'text/plain; charset=UTF-8' -> ('text/plain', [('charset, 'UTF-8')])
'text/plain; charset=UTF-8; level=1' ->
('text/plain', [('charset, 'UTF-8'), ('level', '1')])
:param content_type: content_type to parse
:returns: a tuple containing (content type, list of k, v parameter tuples)
"""
parm_list = []
if ';' in content_type:
content_type, parms = content_type.split(';', 1)
parms = ';' + parms
for m in _rfc_extension_pattern.findall(parms):
key = m[0].strip()
value = m[1].strip()
parm_list.append((key, value))
return content_type, parm_list
def override_bytes_from_content_type(listing_dict, logger=None):
"""
Takes a dict from a container listing and overrides the content_type,
bytes fields if swift_bytes is set.
"""
content_type, params = parse_content_type(listing_dict['content_type'])
for key, value in params:
if key == 'swift_bytes':
try:
listing_dict['bytes'] = int(value)
except ValueError:
if logger:
logger.exception("Invalid swift_bytes")
else:
content_type += ';%s=%s' % (key, value)
listing_dict['content_type'] = content_type
def clean_content_type(value):
if ';' in value:
left, right = value.rsplit(';', 1)
if right.lstrip().startswith('swift_bytes='):
return left
return value
def quote(value, safe='/'):
"""
Patched version of urllib.quote that encodes utf-8 strings before quoting
"""
return _quote(get_valid_utf8_str(value), safe)
def get_expirer_container(x_delete_at, expirer_divisor, acc, cont, obj):
"""
Returns a expiring object container name for given X-Delete-At and
a/c/o.
"""
shard_int = int(hash_path(acc, cont, obj), 16) % 100
return normalize_delete_at_timestamp(
int(x_delete_at) / expirer_divisor * expirer_divisor - shard_int)
class _MultipartMimeFileLikeObject(object):
def __init__(self, wsgi_input, boundary, input_buffer, read_chunk_size):
self.no_more_data_for_this_file = False
self.no_more_files = False
self.wsgi_input = wsgi_input
self.boundary = boundary
self.input_buffer = input_buffer
self.read_chunk_size = read_chunk_size
def read(self, length=None):
if not length:
length = self.read_chunk_size
if self.no_more_data_for_this_file:
return ''
# read enough data to know whether we're going to run
# into a boundary in next [length] bytes
if len(self.input_buffer) < length + len(self.boundary) + 2:
to_read = length + len(self.boundary) + 2
while to_read > 0:
try:
chunk = self.wsgi_input.read(to_read)
except (IOError, ValueError) as e:
raise swift.common.exceptions.ChunkReadError(str(e))
to_read -= len(chunk)
self.input_buffer += chunk
if not chunk:
self.no_more_files = True
break
boundary_pos = self.input_buffer.find(self.boundary)
# boundary does not exist in the next (length) bytes
if boundary_pos == -1 or boundary_pos > length:
ret = self.input_buffer[:length]
self.input_buffer = self.input_buffer[length:]
# if it does, just return data up to the boundary
else:
ret, self.input_buffer = self.input_buffer.split(self.boundary, 1)
self.no_more_files = self.input_buffer.startswith('--')
self.no_more_data_for_this_file = True
self.input_buffer = self.input_buffer[2:]
return ret
def readline(self):
if self.no_more_data_for_this_file:
return ''
boundary_pos = newline_pos = -1
while newline_pos < 0 and boundary_pos < 0:
try:
chunk = self.wsgi_input.read(self.read_chunk_size)
except (IOError, ValueError) as e:
raise swift.common.exceptions.ChunkReadError(str(e))
self.input_buffer += chunk
newline_pos = self.input_buffer.find('\r\n')
boundary_pos = self.input_buffer.find(self.boundary)
if not chunk:
self.no_more_files = True
break
# found a newline
if newline_pos >= 0 and \
(boundary_pos < 0 or newline_pos < boundary_pos):
# Use self.read to ensure any logic there happens...
ret = ''
to_read = newline_pos + 2
while to_read > 0:
chunk = self.read(to_read)
# Should never happen since we're reading from input_buffer,
# but just for completeness...
if not chunk:
break
to_read -= len(chunk)
ret += chunk
return ret
else: # no newlines, just return up to next boundary
return self.read(len(self.input_buffer))
def iter_multipart_mime_documents(wsgi_input, boundary, read_chunk_size=4096):
"""
Given a multi-part-mime-encoded input file object and boundary,
yield file-like objects for each part. Note that this does not
split each part into headers and body; the caller is responsible
for doing that if necessary.
:param wsgi_input: The file-like object to read from.
:param boundary: The mime boundary to separate new file-like
objects on.
:returns: A generator of file-like objects for each part.
:raises: MimeInvalid if the document is malformed
"""
boundary = '--' + boundary
blen = len(boundary) + 2 # \r\n
try:
got = wsgi_input.readline(blen)
while got == '\r\n':
got = wsgi_input.readline(blen)
except (IOError, ValueError) as e:
raise swift.common.exceptions.ChunkReadError(str(e))
if got.strip() != boundary:
raise swift.common.exceptions.MimeInvalid(
'invalid starting boundary: wanted %r, got %r', (boundary, got))
boundary = '\r\n' + boundary
input_buffer = ''
done = False
while not done:
it = _MultipartMimeFileLikeObject(wsgi_input, boundary, input_buffer,
read_chunk_size)
yield it
done = it.no_more_files
input_buffer = it.input_buffer
def parse_mime_headers(doc_file):
"""
Takes a file-like object containing a MIME document and returns a
HeaderKeyDict containing the headers. The body of the message is not
consumed: the position in doc_file is left at the beginning of the body.
This function was inspired by the Python standard library's
http.client.parse_headers.
:param doc_file: binary file-like object containing a MIME document
:returns: a swift.common.swob.HeaderKeyDict containing the headers
"""
from swift.common.swob import HeaderKeyDict # avoid circular import
headers = []
while True:
line = doc_file.readline()
headers.append(line)
if line in (b'\r\n', b'\n', b''):
break
header_string = b''.join(headers)
return HeaderKeyDict(email.parser.Parser().parsestr(header_string))
def mime_to_document_iters(input_file, boundary, read_chunk_size=4096):
"""
Takes a file-like object containing a multipart MIME document and
returns an iterator of (headers, body-file) tuples.
:param input_file: file-like object with the MIME doc in it
:param boundary: MIME boundary, sans dashes
(e.g. "divider", not "--divider")
:param read_chunk_size: size of strings read via input_file.read()
"""
doc_files = iter_multipart_mime_documents(input_file, boundary,
read_chunk_size)
for i, doc_file in enumerate(doc_files):
# this consumes the headers and leaves just the body in doc_file
headers = parse_mime_headers(doc_file)
yield (headers, doc_file)
def maybe_multipart_byteranges_to_document_iters(app_iter, content_type):
"""
Takes an iterator that may or may not contain a multipart MIME document
as well as content type and returns an iterator of body iterators.
:param app_iter: iterator that may contain a multipart MIME document
:param content_type: content type of the app_iter, used to determine
whether it conains a multipart document and, if
so, what the boundary is between documents
"""
content_type, params_list = parse_content_type(content_type)
if content_type != 'multipart/byteranges':
yield app_iter
return
body_file = FileLikeIter(app_iter)
boundary = dict(params_list)['boundary']
for _headers, body in mime_to_document_iters(body_file, boundary):
yield (chunk for chunk in iter(lambda: body.read(65536), ''))
def document_iters_to_multipart_byteranges(ranges_iter, boundary):
"""
Takes an iterator of range iters and yields a multipart/byteranges MIME
document suitable for sending as the body of a multi-range 206 response.
See document_iters_to_http_response_body for parameter descriptions.
"""
divider = "--" + boundary + "\r\n"
terminator = "--" + boundary + "--"
for range_spec in ranges_iter:
start_byte = range_spec["start_byte"]
end_byte = range_spec["end_byte"]
entity_length = range_spec.get("entity_length", "*")
content_type = range_spec["content_type"]
part_iter = range_spec["part_iter"]
part_header = ''.join((
divider,
"Content-Type: ", str(content_type), "\r\n",
"Content-Range: ", "bytes %d-%d/%s\r\n" % (
start_byte, end_byte, entity_length),
"\r\n"
))
yield part_header
for chunk in part_iter:
yield chunk
yield "\r\n"
yield terminator
def document_iters_to_http_response_body(ranges_iter, boundary, multipart,
logger):
"""
Takes an iterator of range iters and turns it into an appropriate
HTTP response body, whether that's multipart/byteranges or not.
This is almost, but not quite, the inverse of
http_response_to_document_iters(). This function only yields chunks of
the body, not any headers.
:param ranges_iter: an iterator of dictionaries, one per range.
Each dictionary must contain at least the following key:
"part_iter": iterator yielding the bytes in the range
Additionally, if multipart is True, then the following other keys
are required:
"start_byte": index of the first byte in the range
"end_byte": index of the last byte in the range
"content_type": value for the range's Content-Type header
Finally, there is one optional key that is used in the
multipart/byteranges case:
"entity_length": length of the requested entity (not necessarily
equal to the response length). If omitted, "*" will be used.
Each part_iter will be exhausted prior to calling next(ranges_iter).
:param boundary: MIME boundary to use, sans dashes (e.g. "boundary", not
"--boundary").
:param multipart: True if the response should be multipart/byteranges,
False otherwise. This should be True if and only if you have 2 or
more ranges.
:param logger: a logger
"""
if multipart:
return document_iters_to_multipart_byteranges(ranges_iter, boundary)
else:
try:
response_body_iter = next(ranges_iter)['part_iter']
except StopIteration:
return ''
# We need to make sure ranges_iter does not get garbage-collected
# before response_body_iter is exhausted. The reason is that
# ranges_iter has a finally block that calls close_swift_conn, and
# so if that finally block fires before we read response_body_iter,
# there's nothing there.
def string_along(useful_iter, useless_iter_iter, logger):
for x in useful_iter:
yield x
try:
next(useless_iter_iter)
except StopIteration:
pass
else:
logger.warn("More than one part in a single-part response?")
return string_along(response_body_iter, ranges_iter, logger)
def multipart_byteranges_to_document_iters(input_file, boundary,
read_chunk_size=4096):
"""
Takes a file-like object containing a multipart/byteranges MIME document
(see RFC 7233, Appendix A) and returns an iterator of (first-byte,
last-byte, length, document-headers, body-file) 5-tuples.
:param input_file: file-like object with the MIME doc in it
:param boundary: MIME boundary, sans dashes
(e.g. "divider", not "--divider")
:param read_chunk_size: size of strings read via input_file.read()
"""
for headers, body in mime_to_document_iters(input_file, boundary,
read_chunk_size):
first_byte, last_byte, length = parse_content_range(
headers.get('content-range'))
yield (first_byte, last_byte, length, headers.items(), body)
def http_response_to_document_iters(response, read_chunk_size=4096):
"""
Takes a successful object-GET HTTP response and turns it into an
iterator of (first-byte, last-byte, length, headers, body-file)
5-tuples.
The response must either be a 200 or a 206; if you feed in a 204 or
something similar, this probably won't work.
:param response: HTTP response, like from bufferedhttp.http_connect(),
not a swob.Response.
"""
if response.status == 200:
# Single "range" that's the whole object
content_length = int(response.getheader('Content-Length'))
return iter([(0, content_length - 1, content_length,
response.getheaders(), response)])
content_type, params_list = parse_content_type(
response.getheader('Content-Type'))
if content_type != 'multipart/byteranges':
# Single range; no MIME framing, just the bytes. The start and end
# byte indices are in the Content-Range header.
start, end, length = parse_content_range(
response.getheader('Content-Range'))
return iter([(start, end, length, response.getheaders(), response)])
else:
# Multiple ranges; the response body is a multipart/byteranges MIME
# document, and we have to parse it using the MIME boundary
# extracted from the Content-Type header.
params = dict(params_list)
return multipart_byteranges_to_document_iters(
response, params['boundary'], read_chunk_size)
#: Regular expression to match form attributes.
ATTRIBUTES_RE = re.compile(r'(\w+)=(".*?"|[^";]+)(; ?|$)')
def parse_content_disposition(header):
"""
Given the value of a header like:
Content-Disposition: form-data; name="somefile"; filename="test.html"
Return data like
("form-data", {"name": "somefile", "filename": "test.html"})
:param header: Value of a header (the part after the ': ').
:returns: (value name, dict) of the attribute data parsed (see above).
"""
attributes = {}
attrs = ''
if ';' in header:
header, attrs = [x.strip() for x in header.split(';', 1)]
m = True
while m:
m = ATTRIBUTES_RE.match(attrs)
if m:
attrs = attrs[len(m.group(0)):]
attributes[m.group(1)] = m.group(2).strip('"')
return header, attributes
class sockaddr_alg(ctypes.Structure):
_fields_ = [("salg_family", ctypes.c_ushort),
("salg_type", ctypes.c_ubyte * 14),
("salg_feat", ctypes.c_uint),
("salg_mask", ctypes.c_uint),
("salg_name", ctypes.c_ubyte * 64)]
_bound_md5_sockfd = None
def get_md5_socket():
"""
Get an MD5 socket file descriptor. One can MD5 data with it by writing it
to the socket with os.write, then os.read the 16 bytes of the checksum out
later.
NOTE: It is the caller's responsibility to ensure that os.close() is
called on the returned file descriptor. This is a bare file descriptor,
not a Python object. It doesn't close itself.
"""
# Linux's AF_ALG sockets work like this:
#
# First, initialize a socket with socket() and bind(). This tells the
# socket what algorithm to use, as well as setting up any necessary bits
# like crypto keys. Of course, MD5 doesn't need any keys, so it's just the
# algorithm name.
#
# Second, to hash some data, get a second socket by calling accept() on
# the first socket. Write data to the socket, then when finished, read the
# checksum from the socket and close it. This lets you checksum multiple
# things without repeating all the setup code each time.
#
# Since we only need to bind() one socket, we do that here and save it for
# future re-use. That way, we only use one file descriptor to get an MD5
# socket instead of two, and we also get to save some syscalls.
global _bound_md5_sockfd
global _libc_socket
global _libc_bind
global _libc_accept
if _libc_accept is None:
_libc_accept = load_libc_function('accept', fail_if_missing=True)
if _libc_socket is None:
_libc_socket = load_libc_function('socket', fail_if_missing=True)
if _libc_bind is None:
_libc_bind = load_libc_function('bind', fail_if_missing=True)
# Do this at first call rather than at import time so that we don't use a
# file descriptor on systems that aren't using any MD5 sockets.
if _bound_md5_sockfd is None:
sockaddr_setup = sockaddr_alg(
AF_ALG,
(ord('h'), ord('a'), ord('s'), ord('h'), 0),
0, 0,
(ord('m'), ord('d'), ord('5'), 0))
hash_sockfd = _libc_socket(ctypes.c_int(AF_ALG),
ctypes.c_int(socket.SOCK_SEQPACKET),
ctypes.c_int(0))
if hash_sockfd < 0:
raise IOError(ctypes.get_errno(),
"Failed to initialize MD5 socket")
bind_result = _libc_bind(ctypes.c_int(hash_sockfd),
ctypes.pointer(sockaddr_setup),
ctypes.c_int(ctypes.sizeof(sockaddr_alg)))
if bind_result < 0:
os.close(hash_sockfd)
raise IOError(ctypes.get_errno(), "Failed to bind MD5 socket")
_bound_md5_sockfd = hash_sockfd
md5_sockfd = _libc_accept(ctypes.c_int(_bound_md5_sockfd), None, 0)
if md5_sockfd < 0:
raise IOError(ctypes.get_errno(), "Failed to accept MD5 socket")
return md5_sockfd
|
towerjoo/DjangoNotes | refs/heads/master | Django-1.5.1/django/core/mail/backends/smtp.py | 130 | """SMTP email backend class."""
import smtplib
import ssl
import threading
from django.conf import settings
from django.core.mail.backends.base import BaseEmailBackend
from django.core.mail.utils import DNS_NAME
from django.core.mail.message import sanitize_address
from django.utils.encoding import force_bytes
class EmailBackend(BaseEmailBackend):
"""
A wrapper that manages the SMTP network connection.
"""
def __init__(self, host=None, port=None, username=None, password=None,
use_tls=None, fail_silently=False, **kwargs):
super(EmailBackend, self).__init__(fail_silently=fail_silently)
self.host = host or settings.EMAIL_HOST
self.port = port or settings.EMAIL_PORT
if username is None:
self.username = settings.EMAIL_HOST_USER
else:
self.username = username
if password is None:
self.password = settings.EMAIL_HOST_PASSWORD
else:
self.password = password
if use_tls is None:
self.use_tls = settings.EMAIL_USE_TLS
else:
self.use_tls = use_tls
self.connection = None
self._lock = threading.RLock()
def open(self):
"""
Ensures we have a connection to the email server. Returns whether or
not a new connection was required (True or False).
"""
if self.connection:
# Nothing to do if the connection is already open.
return False
try:
# If local_hostname is not specified, socket.getfqdn() gets used.
# For performance, we use the cached FQDN for local_hostname.
self.connection = smtplib.SMTP(self.host, self.port,
local_hostname=DNS_NAME.get_fqdn())
if self.use_tls:
self.connection.ehlo()
self.connection.starttls()
self.connection.ehlo()
if self.username and self.password:
self.connection.login(self.username, self.password)
return True
except:
if not self.fail_silently:
raise
def close(self):
"""Closes the connection to the email server."""
if self.connection is None:
return
try:
try:
self.connection.quit()
except (ssl.SSLError, smtplib.SMTPServerDisconnected):
# This happens when calling quit() on a TLS connection
# sometimes, or when the connection was already disconnected
# by the server.
self.connection.close()
except:
if self.fail_silently:
return
raise
finally:
self.connection = None
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
if not email_messages:
return
with self._lock:
new_conn_created = self.open()
if not self.connection:
# We failed silently on open().
# Trying to send would be pointless.
return
num_sent = 0
for message in email_messages:
sent = self._send(message)
if sent:
num_sent += 1
if new_conn_created:
self.close()
return num_sent
def _send(self, email_message):
"""A helper method that does the actual sending."""
if not email_message.recipients():
return False
from_email = sanitize_address(email_message.from_email, email_message.encoding)
recipients = [sanitize_address(addr, email_message.encoding)
for addr in email_message.recipients()]
message = email_message.message()
charset = message.get_charset().get_output_charset() if message.get_charset() else 'utf-8'
try:
self.connection.sendmail(from_email, recipients,
force_bytes(message.as_string(), charset))
except:
if not self.fail_silently:
raise
return False
return True
|
anilveeramalli/cloudify-azure-plugin | refs/heads/master | blueprints/clustered-dns/dns/dns_add_reverse_record.py | 1 | import subprocess, os, sys
from reverseZone_naming import reverseZone_name
from netaddr import *
curlybrace1="{"
curlybrace2="}"
zone_files_path="/etc/bind/zones"
def add_reverse_record():
ip_address_to_be_added= sys.argv[1]
host_name_to_be_added= sys.argv[2]
record_valid=1
ip_valid=0
os.chdir("/root")
network_address_file= open("user_network_address", 'r')
network_address=network_address_file.read()
network_address_file.close()
ip = IPNetwork(network_address)
prefix_length=int(ip.prefixlen)
ip_address=IPAddress(ip_address_to_be_added)
if ip_address_to_be_added in IPNetwork(network_address):
ip_valid=1
reverse_zone_file_name,reverse_zone_name=reverseZone_name()
os.chdir(zone_files_path)
readFiles = open(reverse_zone_file_name, 'r')
forward_zone_file_content = readFiles.read()
readFiles.close()
if host_name_to_be_added in forward_zone_file_content:
record_valid=0
print "\nHostname is already entered in the database.\nSorry! Cannot enter duplicate records.\nPlease enter a different record\n"
if record_valid==1:
octate = str(ip_address).split(".")
if prefix_length<16:
reverse_record=octate[3]+"."+octate[2]+"."+octate[1]
elif prefix_length>=16 and prefix_length<24:
reverse_record=octate[3]+"."+octate[2]
else:
reverse_record=octate[3]
if reverse_record in forward_zone_file_content:
record_valid=0
print "\nIP address is already entered in the database.\nSorry! Cannot enter duplicate records.\nPlease enter a different record\n"
else:
print ("\nSorry, cannot accept this record! \nPlease enter an IP address within your entered zone! \nYour entered zone is: %s\n" % (IPNetwork(network_address)))
if ip_valid==1:
if record_valid==1:
reverse_zone_file_name,reverse_zone_name=reverseZone_name()
os.chdir(zone_files_path)
reverse_zone_file_path=reverse_zone_file_name
reverse_zone_content = open(reverse_zone_file_path, 'a')
reverse_zone_content.write("\n%s \t IN \t PTR \t %s" % (reverse_record,host_name_to_be_added))
reverse_zone_content.close()
print "\nThe reverse record that you entered has been added!\n"
subprocess.call("service bind9 reload",shell=True)
def main():
add_reverse_record()
main()
|
eggimasv/SNIP | refs/heads/master | Python_Files/SNIP_costs.py | 1 | # ======================================================================================
# Copyright 2014 Swiss Federal Institute of Aquatic Science and Technology
#
# This file is part of SNIP (Sustainable Network Infrastructure Planning)
# SNIP is used for determining the optimal degree of centralization for waste
# water infrastructures. You find detailed information about SNIP in Eggimann et al. (2014).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# The Djikstra and a* algorithm are adapted from Hetland (2010).
# The algorithm is developed for Python 2.7 and ArcGIS 10.2
#
# Literature
# ----------
# Eggimann Sven, Truffer Bernhard, Maurer Max (2015): To connect or not to connect?
# Modelling the optimal degree of centralisation for wastewater infrastructures.
# Water Research, XY, .....
#
# Hetland M.L. (2010): Python Algorithms. Mastering Basic Algorithms in the Python Language. apress.
#
# Contact: [email protected]
# Version 1.0
# Date: 1.1.2015
# Autor: Eggimann Sven
# ======================================================================================
import math
def calculatePipeCosts(pipeDiameter, distance, averageTrenchDepth, lifeSewers, interestRate, operationCostsPerYear, fc_SewerCost):
"""
This functions calculates with help of the pipe diameter, the pipe length, average trench Depth and financial parameters,
the costs of a pipe.
Input Arguments:
pipeDiameter - Pipe Diameter [cm]
distance - Pipe Distance [m]
averageTrenchDepth - Average trench depth [m]
lifeSewers - Life span of sewers
interestRate - Real interest rate
operationCostsPerYear - Operation costs per year per meter of pipe [Currency]
fc_SewerCost - Percentage of cost variation
Output Arguments:
totannuities - Annuities of pipe costs, including maintenance
"""
r = float(interestRate + 1.0) # calculate r of annuities formula
# Construction costs
if pipeDiameter >= 1.2: # If diameter is larger than 1.2m, select parameters of 1200
pipeDiameter = 1.2
# CAPEX
a = 152.51 * pipeDiameter + 173.08 # Linearly derived function for a & b
b = 760.31 * pipeDiameter - 78.208 # Linearly derived function for a & b
costFactor = 1 + fc_SewerCost # Calculate how costs vary
costPerMeter = a * averageTrenchDepth + b * costFactor # Calculate cost per meter pipe
totCost = float(costPerMeter * distance) # Total costs of whole pipe length
# OPEX
averageYearlyOperationCosts = operationCostsPerYear * distance
totannuities = ((interestRate * r**lifeSewers) / (r**lifeSewers - 1)) * totCost + averageYearlyOperationCosts # Calculate annuities and add operation costs
return totannuities
def getPumpCostsDependingOnFlow(Q, heightDifference, pricekWh, nrOfOperatingYears, interestRate):
"""
This function calculates the costs of a individual pump
#Source: The sewage pumping handbook, p. 84 ff
# the pumping investement costs are divided by nr of running years.
Input Arguments:
Q - Flow [l / s]
heightDifference -- Slope
pricekWh - Pipe length
nrOfOperatingYears - Strickler coefficient
interestRate - Real Interest rate
Output Arguments:
pipeDiameter - Needed pipe diameter
"""
#gravity = 9.81 # [m / s^2]
#runninghoursPerYear = 365*24 # [h/year]
#efficiency = 0.5 # efficiency of pump plus motor
# Operation costs
#motorPowerInput = (gravity * Q * heightDifference )/(efficiency*1000) # [kW] slower
#EnergyUsed = motorPowerInput * runninghoursPerYear # [kWh] slower
motorPowerInput = (9.81 * Q * heightDifference )/(500) # [kW] faster
EnergyUsed = motorPowerInput * 8760 # [kWh] faster
operationCostsPerYear = EnergyUsed * pricekWh
operationCostsOverWholePeriod = operationCostsPerYear * nrOfOperatingYears # Costs over whole life span
# Error message
if heightDifference < 0 or operationCostsPerYear < 0:
raise Exception("ERROR: Pumping costs cannot be calculated correctly. " + str(heightDifference) + "" + str(Q)) # Does not make sense if pumped down
return operationCostsPerYear, operationCostsOverWholePeriod # [running CHF per year + investment costs]
def getPipeDiameter(Q, slope, stricklerC):
"""
This function calculates the pipe diameter according to Manning-Strickler.
Input Arguments:
Q - Flow in pipe
slope - Slope
stricklerC - Strickler coefficient
Output Arguments:
pipeDiameter - Needed pipe diameter
"""
Q = Q/86400.0 # Convert the flow [m3/day] to [m3/s], 24.0*60.0*60.0 = 86400.0
Qmax = 0.8 # Maximum filling condition
normDiameterList = (.25, .3, .4, .5, .6, .7, .8, .9, 1, 1.2, 1.5, 2, 2.5, 3, 4, 6, 8) # [m] Norm pipe diameters
#Iterate list with norm diameters until the calculate flow is bigger
for i in normDiameterList:
if slope == 0: # Error if slope is zero
# If slope is 0 WWTP is pumped and a Diamter of 0.25 is assumed.
pipeDiameter = normDiameterList[0]
return pipeDiameter
#raise Exception("ERROR: Pipe diamater cannot get calculated because slope is zero.")
else:
# Calculate flow in pipe with diameter i [m3/s]
#Qfull = stricklerC * (i/4.0)**(2.0/3.0) *math.sqrt(abs(slope)) * (math.pi/4.0) * i**2.0 # slower
Qfull = stricklerC * (i/4.0)**(0.6666666666666666) *math.sqrt(abs(slope)) * (0.7853981633974483) * i**2.0 # faster
# If pipe can bear more than flow as input, select this diameter # 80 % condition
if Qfull * Qmax >= Q:
pipeDiameter = i
return pipeDiameter
if i == 8: # Not big enough norm-pipe diameter existing
pipeDiameter = 10
return pipeDiameter
def costWWTP(flow, EWQuantity, lifeWwtps, interestRate, fc_wwtpOpex, fc_wwtpCapex):
"""
This function calculates the costs of a wwtp.
Input Arguments:
flow - Amount of waste water to be treated [in m3]
EWQuantity - Factor to calculate population equivalent from amount of waste water.
lifeWwtps - Life span of treatment plant
interestRate - Real interest rate [%
fc_wwtpOpex - WWTP operation cost parameter
fc_wwtpCapex - WWTP replacement cost parameter
Output Arguments:
totalAnnualCosts - Total annuities
"""
r = float(interestRate + 1.0) # r of annuities formula
EW = float(flow) / float(EWQuantity) # [PE] Calculate flow in population equivalent (Convert liter in EW)
sensFactor_Operation = 1 + fc_wwtpOpex
sensFactor_Replacement = 1 + fc_wwtpCapex
# Capex - Annual Operation costs
replacementCostsPerEW = 13318 * EW**-0.209 * sensFactor_Replacement # Source: VSA
replacementCosts = replacementCostsPerEW * EW
annuitiesReplacementCosts = replacementCosts * ((interestRate * r**lifeWwtps)) / (r**lifeWwtps -1) # Calculate annuities
# Opex - Annual Operation Costs
annaulOperationCostsPerEW = 340.82 * EW**-0.171 * sensFactor_Operation # Source VSA
totannaulOperationCosts = annaulOperationCostsPerEW * EW
totalAnnualCosts = annuitiesReplacementCosts + totannaulOperationCosts # Operation Costs & replacement costs
return totalAnnualCosts
def calculateConnectionCosts(pipeCostI, totPumpCostI, pipeCostII, totPumpCostII, pipeCostIII, totPumpCostIII, WWTPcostsI, interestRate, lifeWwtps, lifeSewers):
"""
This function calculates the total replacement costs of the system in case a node is connected to the existing system.
This is needed in order to compare the costs of a central connection with the reasonable costs.
Input Arguments:
expOrMerge -- 0: In Expansion module, 1: In Expansion module
pipeCostI, pumpCostIWholePeriodI -- Pipe-, pumping costs of option I
pipeCostII, pumpCostIWholePeriodII -- Pipe- & pumping costs of option II
pipeCostIII, pumpCostIWholePeriodVIII -- Pipe- & pumping costs of option III
WWTPcostsI -- WWTP costs option I
interestRate -- Real interest rate [%]
lifeWwtps -- Life span of treatment plant
lifeSewers -- Life span of sewers
c_WWTPbefaoreAdding -- Costs of WWTP before added flow
Output Arguments:
costConnection -- Costs of lowest central connection
"""
r = float(interestRate + 1.0) # calculate r of annuities formula
var1 = r**lifeSewers
var2 = var1 -1
# Convert annuities into replacement costs
totSewerCostI = pipeCostI * var2 / (interestRate * var1)
totSewerCostII = pipeCostII * var2 / (interestRate * var1)
totSewerCostIII = pipeCostIII * var2 / (interestRate * var1)
# Calculate total replacement value only of network (including pumps) of the different options
centralConnectionI = totSewerCostI + totPumpCostI # total replacement value of pumps and sewer with central connection
connectionII = totSewerCostII + totPumpCostII # total replacement value of pumps and sewer without connection
centralConnectionIII = totSewerCostIII + totPumpCostIII # total replacement value of pumps and sewer with central connection
# Calculate connection costs of sewers and pumps not including WWTP. The already existing network needs to be substracted (option II).
costConnectionI = centralConnectionI - connectionII # Cost central - cost decentral
costConnectionIII = centralConnectionIII - connectionII # Cost central - cost decentral
# Select lowest connection costs
if costConnectionI < costConnectionIII:
costConnection = costConnectionI
else:
costConnection = costConnectionIII
return costConnection
def calculatetotalAnnuities(listWTPs, EW_Q, lifeWwtps, interestRate, pumps, pumpingYears, pricekWh, sewers, flowPoints, edgeList, nodes, stricklerC, lifeSewers, operationCosts, fc_SewerCost, fc_wwtpOpex, fc_wwtpCapex):
'''
This function calculates the total system costs of a system
Input:
listWTPs - List with wwtps
EW_Q - Waste water per person
lifeWwtps - Lifepsan of wwtp
interestRate - Interest rate
pumps - List with Pumps
pumpingYears - Pump lifespan
pricekWh - Price per kWh
sewers - Sewers
flowPoints - Nodes with flow
edgeList - List with edges
nodes - Nodes
stricklerC - Strickler Coefficient
lifeSewers - Lifespan of Sewers
operationCosts - Operation costs
fc_SewerCost - Cost factor sewers
fc_wwtpOpex - Cost factor opex WWTP
Output:
totSystemCosts - Total System Costs
'''
# calculate WWTPs costs
completeWWTPCosts = 0
for i in listWTPs:
WWTPcostsA1 = costWWTP(i[1], EW_Q, lifeWwtps, interestRate, fc_wwtpOpex, fc_wwtpCapex)
completeWWTPCosts += WWTPcostsA1
# Calculate pump costs
completePumpCosts = 0
for pmp in pumps:
flow, heightDifference = pmp[1], pmp[2]
summingPumpCosts, _ = getPumpCostsDependingOnFlow(flow, heightDifference, pricekWh, pumpingYears, interestRate) # pump is found on path
completePumpCosts += summingPumpCosts
# Calculate sewer costs
completePublicPipeCosts = 0
for pipe in sewers:
if sewers[pipe][0] != ():
oldNode = pipe
nextNode = sewers[pipe][0]
# Get flow
for a in nodes:
if a[0] == oldNode:
Q = a[4] + a[8]
break
# Get distance, slope
for edge in edgeList:
if edge[0][0] == oldNode and edge[1][0] == nextNode: # Stored inverse, thus slope needs to get inverted
distance, slope = edge[2], edge[3] * -1 # distance, # slope needs to be inverted
break
if edge[1][0] == oldNode and edge[0][0] == nextNode:
distance, slope = edge[2], edge[3] # distance, # slope stays the same
break
# Get Trench Depth
for punkt in nodes:
if punkt[0] == oldNode:
trenchDepthFrom = punkt[3] - punkt[10]
break
for punkt in nodes:
if punkt[0] == nextNode:
trenchDepthTo = punkt[3] - punkt[10]
break
averageTrenchDepth = (abs(trenchDepthFrom) + abs(trenchDepthTo)) / 2
pipeDiameter = getPipeDiameter(Q, slope, stricklerC)
costsPerYear = calculatePipeCosts(pipeDiameter, distance, averageTrenchDepth, lifeSewers, interestRate, operationCosts, fc_SewerCost)
completePublicPipeCosts += costsPerYear
return completePumpCosts, completeWWTPCosts, completePublicPipeCosts
def costsPrivateSewers(buildings, buildPoints, pipeDiameterPrivateSewer, averageTrenchDepthPrivateSewer, lifeSewers, interestRate, operationCosts, fc_SewerCost):
'''
This function calculates the costs of the private sewers. The private sewers are the closest distance to the street network,
If the street is too far, the whole distance to the building is used.
Input:
buildings - Buildings
buildPoints - Coordinates of Buildings
pipeDiameterPrivateSewer - Pipe Diameter
averageTrenchDepthPrivateSewer - Average Trench depth
lifeSewers - Lifespan of sewers
interestRate - Interest rate
operationCosts - Opex
fc_SewerCost - cost factor sewers
Output:
totCostPrivateSewer - Total replacement value of private sewers
'''
costsP_Sewer = 0
for node in buildings:
pt_to1_X, pt_to1_Y, gebListe = node[0], node[1], node[2]
for house in gebListe:
for geb in buildPoints:
if geb[0] == house:
_, pt_from1_X, pt_from1_Y, _ = geb[0], geb[1], geb[2], geb[4]
break
p0, p1 = (pt_from1_X, pt_from1_Y), (pt_to1_X, pt_to1_Y)
distance = math.hypot(p0[0] - p1[0], p0[1] - p1[1])
privateSewercostsPerYear = calculatePipeCosts(pipeDiameterPrivateSewer, distance, averageTrenchDepthPrivateSewer, lifeSewers, interestRate, operationCosts, fc_SewerCost)
costsP_Sewer += privateSewercostsPerYear
totCostPrivateSewer = costsP_Sewer * lifeSewers
return totCostPrivateSewer
def getCostsOfCrossedWWTPs(allNodesToAddToPN, pathBetweenWWTPs, WWTPS_noCon, sewers_NoCon, nodes_noCon, EW_Q, wwtpLifespan, interestRate, fc_wwtpOperation, fc_wwtpReplacement):
'''
This function estimates the costs of all crossed wwtps on the path between two wwtps.
Input:
allNodesToAddToPN - All nodes on the path
pathBetweenWWTPs - Path between WWTP
WWTPS_noCon - WWTP
sewers_NoCon - Sewers
nodes_noCon - Nodes
EW_Q, wwtpLifespan, interestRate, fc_wwtpOperation, fc_wwtpReplacement - Cost relevant parameters
Output:
sumCostcrossedWWTP - Costs
'''
# Iterate path and get the sum of all flow which flows to WWTPs in the path
allWWTPsInPath = [] # List to store all crossed wwtp with the flow [[ID, flow]]
sumCostcrossedWWTP = 0 # Total costs
# Get all WWTPs in Path
for i in allNodesToAddToPN:
for wwtp in WWTPS_noCon:
if i[0] == wwtp[0]:
if i[0] in pathBetweenWWTPs:
allWWTPsInPath.append([i[0], 0])
break
# Iterate path
if len(allWWTPsInPath) > 0:
for i in pathBetweenWWTPs:
wwtpfound = 0
# get WWTP to which this node flows
iterate = i
try:
while wwtpfound == 0:
nextN = sewers_NoCon[iterate]
if nextN[0] == ():
wwtpfound = 1
toWWTP = iterate
break
iterate = nextN[0]
except:
continue # This node was not in network
for wwtp in allWWTPsInPath:
if wwtp[0] == toWWTP:
for n in nodes_noCon:
if n[0] == i:
fl = n[8]
break
wwtp[1] += fl
break
for i in allWWTPsInPath:
flowWWTP = i[1]
costCrossed = costWWTP(flowWWTP, EW_Q, wwtpLifespan, interestRate, fc_wwtpOperation, fc_wwtpReplacement)
sumCostcrossedWWTP += costCrossed
return sumCostcrossedWWTP
|
SimVascular/VTK | refs/heads/master | ThirdParty/Twisted/twisted/web/_auth/wrapper.py | 47 | # -*- test-case-name: twisted.web.test.test_httpauth -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A guard implementation which supports HTTP header-based authentication
schemes.
If no I{Authorization} header is supplied, an anonymous login will be
attempted by using a L{Anonymous} credentials object. If such a header is
supplied and does not contain allowed credentials, or if anonymous login is
denied, a 401 will be sent in the response along with I{WWW-Authenticate}
headers for each of the allowed authentication schemes.
"""
from zope.interface import implements
from twisted.python import log
from twisted.python.components import proxyForInterface
from twisted.web.resource import IResource, ErrorPage
from twisted.web import util
from twisted.cred import error
from twisted.cred.credentials import Anonymous
class UnauthorizedResource(object):
"""
Simple IResource to escape Resource dispatch
"""
implements(IResource)
isLeaf = True
def __init__(self, factories):
self._credentialFactories = factories
def render(self, request):
"""
Send www-authenticate headers to the client
"""
def generateWWWAuthenticate(scheme, challenge):
l = []
for k,v in challenge.iteritems():
l.append("%s=%s" % (k, quoteString(v)))
return "%s %s" % (scheme, ", ".join(l))
def quoteString(s):
return '"%s"' % (s.replace('\\', '\\\\').replace('"', '\\"'),)
request.setResponseCode(401)
for fact in self._credentialFactories:
challenge = fact.getChallenge(request)
request.responseHeaders.addRawHeader(
'www-authenticate',
generateWWWAuthenticate(fact.scheme, challenge))
if request.method == 'HEAD':
return ''
return 'Unauthorized'
def getChildWithDefault(self, path, request):
"""
Disable resource dispatch
"""
return self
class HTTPAuthSessionWrapper(object):
"""
Wrap a portal, enforcing supported header-based authentication schemes.
@ivar _portal: The L{Portal} which will be used to retrieve L{IResource}
avatars.
@ivar _credentialFactories: A list of L{ICredentialFactory} providers which
will be used to decode I{Authorization} headers into L{ICredentials}
providers.
"""
implements(IResource)
isLeaf = False
def __init__(self, portal, credentialFactories):
"""
Initialize a session wrapper
@type portal: C{Portal}
@param portal: The portal that will authenticate the remote client
@type credentialFactories: C{Iterable}
@param credentialFactories: The portal that will authenticate the
remote client based on one submitted C{ICredentialFactory}
"""
self._portal = portal
self._credentialFactories = credentialFactories
def _authorizedResource(self, request):
"""
Get the L{IResource} which the given request is authorized to receive.
If the proper authorization headers are present, the resource will be
requested from the portal. If not, an anonymous login attempt will be
made.
"""
authheader = request.getHeader('authorization')
if not authheader:
return util.DeferredResource(self._login(Anonymous()))
factory, respString = self._selectParseHeader(authheader)
if factory is None:
return UnauthorizedResource(self._credentialFactories)
try:
credentials = factory.decode(respString, request)
except error.LoginFailed:
return UnauthorizedResource(self._credentialFactories)
except:
log.err(None, "Unexpected failure from credentials factory")
return ErrorPage(500, None, None)
else:
return util.DeferredResource(self._login(credentials))
def render(self, request):
"""
Find the L{IResource} avatar suitable for the given request, if
possible, and render it. Otherwise, perhaps render an error page
requiring authorization or describing an internal server failure.
"""
return self._authorizedResource(request).render(request)
def getChildWithDefault(self, path, request):
"""
Inspect the Authorization HTTP header, and return a deferred which,
when fired after successful authentication, will return an authorized
C{Avatar}. On authentication failure, an C{UnauthorizedResource} will
be returned, essentially halting further dispatch on the wrapped
resource and all children
"""
# Don't consume any segments of the request - this class should be
# transparent!
request.postpath.insert(0, request.prepath.pop())
return self._authorizedResource(request)
def _login(self, credentials):
"""
Get the L{IResource} avatar for the given credentials.
@return: A L{Deferred} which will be called back with an L{IResource}
avatar or which will errback if authentication fails.
"""
d = self._portal.login(credentials, None, IResource)
d.addCallbacks(self._loginSucceeded, self._loginFailed)
return d
def _loginSucceeded(self, (interface, avatar, logout)):
"""
Handle login success by wrapping the resulting L{IResource} avatar
so that the C{logout} callback will be invoked when rendering is
complete.
"""
class ResourceWrapper(proxyForInterface(IResource, 'resource')):
"""
Wrap an L{IResource} so that whenever it or a child of it
completes rendering, the cred logout hook will be invoked.
An assumption is made here that exactly one L{IResource} from
among C{avatar} and all of its children will be rendered. If
more than one is rendered, C{logout} will be invoked multiple
times and probably earlier than desired.
"""
def getChildWithDefault(self, name, request):
"""
Pass through the lookup to the wrapped resource, wrapping
the result in L{ResourceWrapper} to ensure C{logout} is
called when rendering of the child is complete.
"""
return ResourceWrapper(self.resource.getChildWithDefault(name, request))
def render(self, request):
"""
Hook into response generation so that when rendering has
finished completely (with or without error), C{logout} is
called.
"""
request.notifyFinish().addBoth(lambda ign: logout())
return super(ResourceWrapper, self).render(request)
return ResourceWrapper(avatar)
def _loginFailed(self, result):
"""
Handle login failure by presenting either another challenge (for
expected authentication/authorization-related failures) or a server
error page (for anything else).
"""
if result.check(error.Unauthorized, error.LoginFailed):
return UnauthorizedResource(self._credentialFactories)
else:
log.err(
result,
"HTTPAuthSessionWrapper.getChildWithDefault encountered "
"unexpected error")
return ErrorPage(500, None, None)
def _selectParseHeader(self, header):
"""
Choose an C{ICredentialFactory} from C{_credentialFactories}
suitable to use to decode the given I{Authenticate} header.
@return: A two-tuple of a factory and the remaining portion of the
header value to be decoded or a two-tuple of C{None} if no
factory can decode the header value.
"""
elements = header.split(' ')
scheme = elements[0].lower()
for fact in self._credentialFactories:
if fact.scheme == scheme:
return (fact, ' '.join(elements[1:]))
return (None, None)
|
coursemdetw/2015cdb | refs/heads/master | static/Brython3.1.0-20150301-090019/Lib/browser/websocket.py | 618 | from browser import window
import javascript
WebSocket = javascript.JSConstructor(window.WebSocket) |
TridevGuha/django | refs/heads/master | django/core/management/commands/check.py | 316 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import apps
from django.core import checks
from django.core.checks.registry import registry
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = "Checks the entire Django project for potential problems."
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='*')
parser.add_argument('--tag', '-t', action='append', dest='tags',
help='Run only checks labeled with given tag.')
parser.add_argument('--list-tags', action='store_true', dest='list_tags',
help='List available tags.')
parser.add_argument('--deploy', action='store_true', dest='deploy',
help='Check deployment settings.')
def handle(self, *app_labels, **options):
include_deployment_checks = options['deploy']
if options.get('list_tags'):
self.stdout.write('\n'.join(sorted(registry.tags_available(include_deployment_checks))))
return
if app_labels:
app_configs = [apps.get_app_config(app_label) for app_label in app_labels]
else:
app_configs = None
tags = options.get('tags')
if tags:
try:
invalid_tag = next(
tag for tag in tags if not checks.tag_exists(tag, include_deployment_checks)
)
except StopIteration:
# no invalid tags
pass
else:
raise CommandError('There is no system check with the "%s" tag.' % invalid_tag)
self.check(
app_configs=app_configs,
tags=tags,
display_num_errors=True,
include_deployment_checks=include_deployment_checks,
)
|
razvanphp/arangodb | refs/heads/devel | 3rdParty/V8-3.31.74.1/tools/testrunner/__init__.py | 651 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
ayesandarmoe/microblog_flask_tutorial | refs/heads/master | flask/lib/python2.7/site-packages/werkzeug/contrib/sessions.py | 256 | # -*- coding: utf-8 -*-
r"""
werkzeug.contrib.sessions
~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains some helper classes that help one to add session
support to a python WSGI application. For full client-side session
storage see :mod:`~werkzeug.contrib.securecookie` which implements a
secure, client-side session storage.
Application Integration
=======================
::
from werkzeug.contrib.sessions import SessionMiddleware, \
FilesystemSessionStore
app = SessionMiddleware(app, FilesystemSessionStore())
The current session will then appear in the WSGI environment as
`werkzeug.session`. However it's recommended to not use the middleware
but the stores directly in the application. However for very simple
scripts a middleware for sessions could be sufficient.
This module does not implement methods or ways to check if a session is
expired. That should be done by a cronjob and storage specific. For
example to prune unused filesystem sessions one could check the modified
time of the files. It sessions are stored in the database the new()
method should add an expiration timestamp for the session.
For better flexibility it's recommended to not use the middleware but the
store and session object directly in the application dispatching::
session_store = FilesystemSessionStore()
def application(environ, start_response):
request = Request(environ)
sid = request.cookies.get('cookie_name')
if sid is None:
request.session = session_store.new()
else:
request.session = session_store.get(sid)
response = get_the_response_object(request)
if request.session.should_save:
session_store.save(request.session)
response.set_cookie('cookie_name', request.session.sid)
return response(environ, start_response)
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import tempfile
from os import path
from time import time
from random import random
from hashlib import sha1
from pickle import dump, load, HIGHEST_PROTOCOL
from werkzeug.datastructures import CallbackDict
from werkzeug.utils import dump_cookie, parse_cookie
from werkzeug.wsgi import ClosingIterator
from werkzeug.posixemulation import rename
from werkzeug._compat import PY2, text_type
from werkzeug.filesystem import get_filesystem_encoding
_sha1_re = re.compile(r'^[a-f0-9]{40}$')
def _urandom():
if hasattr(os, 'urandom'):
return os.urandom(30)
return text_type(random()).encode('ascii')
def generate_key(salt=None):
if salt is None:
salt = repr(salt).encode('ascii')
return sha1(b''.join([
salt,
str(time()).encode('ascii'),
_urandom()
])).hexdigest()
class ModificationTrackingDict(CallbackDict):
__slots__ = ('modified',)
def __init__(self, *args, **kwargs):
def on_update(self):
self.modified = True
self.modified = False
CallbackDict.__init__(self, on_update=on_update)
dict.update(self, *args, **kwargs)
def copy(self):
"""Create a flat copy of the dict."""
missing = object()
result = object.__new__(self.__class__)
for name in self.__slots__:
val = getattr(self, name, missing)
if val is not missing:
setattr(result, name, val)
return result
def __copy__(self):
return self.copy()
class Session(ModificationTrackingDict):
"""Subclass of a dict that keeps track of direct object changes. Changes
in mutable structures are not tracked, for those you have to set
`modified` to `True` by hand.
"""
__slots__ = ModificationTrackingDict.__slots__ + ('sid', 'new')
def __init__(self, data, sid, new=False):
ModificationTrackingDict.__init__(self, data)
self.sid = sid
self.new = new
def __repr__(self):
return '<%s %s%s>' % (
self.__class__.__name__,
dict.__repr__(self),
self.should_save and '*' or ''
)
@property
def should_save(self):
"""True if the session should be saved.
.. versionchanged:: 0.6
By default the session is now only saved if the session is
modified, not if it is new like it was before.
"""
return self.modified
class SessionStore(object):
"""Baseclass for all session stores. The Werkzeug contrib module does not
implement any useful stores besides the filesystem store, application
developers are encouraged to create their own stores.
:param session_class: The session class to use. Defaults to
:class:`Session`.
"""
def __init__(self, session_class=None):
if session_class is None:
session_class = Session
self.session_class = session_class
def is_valid_key(self, key):
"""Check if a key has the correct format."""
return _sha1_re.match(key) is not None
def generate_key(self, salt=None):
"""Simple function that generates a new session key."""
return generate_key(salt)
def new(self):
"""Generate a new session."""
return self.session_class({}, self.generate_key(), True)
def save(self, session):
"""Save a session."""
def save_if_modified(self, session):
"""Save if a session class wants an update."""
if session.should_save:
self.save(session)
def delete(self, session):
"""Delete a session."""
def get(self, sid):
"""Get a session for this sid or a new session object. This method
has to check if the session key is valid and create a new session if
that wasn't the case.
"""
return self.session_class({}, sid, True)
#: used for temporary files by the filesystem session store
_fs_transaction_suffix = '.__wz_sess'
class FilesystemSessionStore(SessionStore):
"""Simple example session store that saves sessions on the filesystem.
This store works best on POSIX systems and Windows Vista / Windows
Server 2008 and newer.
.. versionchanged:: 0.6
`renew_missing` was added. Previously this was considered `True`,
now the default changed to `False` and it can be explicitly
deactivated.
:param path: the path to the folder used for storing the sessions.
If not provided the default temporary directory is used.
:param filename_template: a string template used to give the session
a filename. ``%s`` is replaced with the
session id.
:param session_class: The session class to use. Defaults to
:class:`Session`.
:param renew_missing: set to `True` if you want the store to
give the user a new sid if the session was
not yet saved.
"""
def __init__(self, path=None, filename_template='werkzeug_%s.sess',
session_class=None, renew_missing=False, mode=0o644):
SessionStore.__init__(self, session_class)
if path is None:
path = tempfile.gettempdir()
self.path = path
if isinstance(filename_template, text_type) and PY2:
filename_template = filename_template.encode(
get_filesystem_encoding())
assert not filename_template.endswith(_fs_transaction_suffix), \
'filename templates may not end with %s' % _fs_transaction_suffix
self.filename_template = filename_template
self.renew_missing = renew_missing
self.mode = mode
def get_session_filename(self, sid):
# out of the box, this should be a strict ASCII subset but
# you might reconfigure the session object to have a more
# arbitrary string.
if isinstance(sid, text_type) and PY2:
sid = sid.encode(get_filesystem_encoding())
return path.join(self.path, self.filename_template % sid)
def save(self, session):
fn = self.get_session_filename(session.sid)
fd, tmp = tempfile.mkstemp(suffix=_fs_transaction_suffix,
dir=self.path)
f = os.fdopen(fd, 'wb')
try:
dump(dict(session), f, HIGHEST_PROTOCOL)
finally:
f.close()
try:
rename(tmp, fn)
os.chmod(fn, self.mode)
except (IOError, OSError):
pass
def delete(self, session):
fn = self.get_session_filename(session.sid)
try:
os.unlink(fn)
except OSError:
pass
def get(self, sid):
if not self.is_valid_key(sid):
return self.new()
try:
f = open(self.get_session_filename(sid), 'rb')
except IOError:
if self.renew_missing:
return self.new()
data = {}
else:
try:
try:
data = load(f)
except Exception:
data = {}
finally:
f.close()
return self.session_class(data, sid, False)
def list(self):
"""Lists all sessions in the store.
.. versionadded:: 0.6
"""
before, after = self.filename_template.split('%s', 1)
filename_re = re.compile(r'%s(.{5,})%s$' % (re.escape(before),
re.escape(after)))
result = []
for filename in os.listdir(self.path):
#: this is a session that is still being saved.
if filename.endswith(_fs_transaction_suffix):
continue
match = filename_re.match(filename)
if match is not None:
result.append(match.group(1))
return result
class SessionMiddleware(object):
"""A simple middleware that puts the session object of a store provided
into the WSGI environ. It automatically sets cookies and restores
sessions.
However a middleware is not the preferred solution because it won't be as
fast as sessions managed by the application itself and will put a key into
the WSGI environment only relevant for the application which is against
the concept of WSGI.
The cookie parameters are the same as for the :func:`~dump_cookie`
function just prefixed with ``cookie_``. Additionally `max_age` is
called `cookie_age` and not `cookie_max_age` because of backwards
compatibility.
"""
def __init__(self, app, store, cookie_name='session_id',
cookie_age=None, cookie_expires=None, cookie_path='/',
cookie_domain=None, cookie_secure=None,
cookie_httponly=False, environ_key='werkzeug.session'):
self.app = app
self.store = store
self.cookie_name = cookie_name
self.cookie_age = cookie_age
self.cookie_expires = cookie_expires
self.cookie_path = cookie_path
self.cookie_domain = cookie_domain
self.cookie_secure = cookie_secure
self.cookie_httponly = cookie_httponly
self.environ_key = environ_key
def __call__(self, environ, start_response):
cookie = parse_cookie(environ.get('HTTP_COOKIE', ''))
sid = cookie.get(self.cookie_name, None)
if sid is None:
session = self.store.new()
else:
session = self.store.get(sid)
environ[self.environ_key] = session
def injecting_start_response(status, headers, exc_info=None):
if session.should_save:
self.store.save(session)
headers.append(('Set-Cookie', dump_cookie(self.cookie_name,
session.sid, self.cookie_age,
self.cookie_expires, self.cookie_path,
self.cookie_domain, self.cookie_secure,
self.cookie_httponly)))
return start_response(status, headers, exc_info)
return ClosingIterator(self.app(environ, injecting_start_response),
lambda: self.store.save_if_modified(session))
|
CraigHarris/gpdb | refs/heads/master | gpMgmt/bin/gppylib/operations/test/regress/test_package/test_regress_simple_negative.py | 54 | #!/usr/bin/env python
import os
import tarfile
from gppylib.commands.base import ExecutionError
from gppylib.operations.test.regress.test_package import GppkgTestCase, unittest, GppkgSpec, RPMSpec, ARCH, OS, GPDB_VERSION
class SimpleNegativeTestCase(GppkgTestCase):
# @unittest.expectedFailure
def test00_wrong_os(self):
os = "abcde"
A_spec = self.A_spec
alpha_spec = GppkgSpec("alpha", "1.0", GPDB_VERSION, os)
gppkg_file = self.build(alpha_spec, A_spec)
with self.assertRaisesRegexp(ExecutionError , "%s OS required. %s OS found" % (os, OS)):
self.install(gppkg_file)
def test01_wrong_arch(self):
arch = "abcde"
A_spec = self.A_spec
alpha_spec = GppkgSpec("alpha", "1.0", GPDB_VERSION, OS, arch)
gppkg_file = self.build(alpha_spec, A_spec)
with self.assertRaisesRegexp(ExecutionError, "%s Arch required. %s Arch found" % (arch, ARCH)):
self.install(gppkg_file)
def test02_wrong_gpdbversion(self):
gpdb_version = "4.6"
A_spec = self.A_spec
alpha_spec = GppkgSpec("alpha", "1.0", gpdb_version)
gppkg_file = self.build(alpha_spec, A_spec)
with self.assertRaisesRegexp(ExecutionError, "requires Greenplum Database version %s" % gpdb_version):
self.install(gppkg_file)
def test03_install_twice(self):
gppkg_file = self.build(self.alpha_spec, self.A_spec)
self.install(self.alpha_spec.get_filename())
with self.assertRaisesRegexp(ExecutionError, "%s is already installed" % gppkg_file):
self.install(gppkg_file)
@unittest.expectedFailure
def test04_update_gppkg_lower(self):
"""
This test tries to update a gppkg which has a lower version,
but the main comprising rpm is of higher version than the one
already installed on the system.
"""
#Use gppkg from previous test
self.install(self.alpha_spec.get_filename())
#Use gppkg which has a lower version, but the rpm is > the one installed
update_rpm_spec = RPMSpec("A", "1", "2")
update_gppkg_spec = GppkgSpec("alpha", "0.1")
update_gppkg_file = self.build(update_gppkg_spec, update_rpm_spec)
with self.assertRaisesRegexp(ExecutionError, "Newer version of %s already installed" % update_gppkg_spec.get_package_name()):
self.update(update_gppkg_file)
#Check that the original package is still installed and not updated
assert self.check_install(self.alpha_spec.get_filename())
def test05_update_rpm_lower(self):
"""
This test tries to install a gppkg which has a higher version,
but the main comprising rpm is of lower version than the one
already installed on the system.
"""
#Use gppkg from previous test
self.install(self.alpha_spec.get_filename())
#Use gppkg with a lower RPM version but gppkg version is > the one installed
update_rpm_spec = RPMSpec("A", "1", "0")
update_gppkg_spec = GppkgSpec("alpha", "1.1")
update_gppkg_file = self.build(update_gppkg_spec, update_rpm_spec)
with self.assertRaisesRegexp(ExecutionError, self.A_spec.get_filename()):
self.update(update_gppkg_file)
#Check that the original package is still installed and not updated
assert self.check_install(self.alpha_spec.get_filename())
def test06_uninstall_twice(self):
#Uses the gppkg from previous test
self.install(self.alpha_spec.get_filename())
#Uninstall gppkg
self.remove(self.alpha_spec.get_filename())
with self.assertRaisesRegexp(ExecutionError, "%s has not been installed" % self.alpha_spec.get_package_name()):
self.remove(self.alpha_spec.get_filename())
def test07_invalid_gppkg_name(self):
invalid_gppkg_name = "abcde-abc"
with self.assertRaisesRegexp(ExecutionError, "Cannot find package %s" % invalid_gppkg_name):
self.install(invalid_gppkg_name)
@unittest.expectedFailure
def test08_wrong_os_update(self):
os = "windows"
self.install(self.alpha_spec.get_filename())
invalid_os_gppkg = GppkgSpec("alpha", "1.1", GPDB_VERSION, os)
gppkg_file = self.build(invalid_os_gppkg, self.A_spec)
with self.assertRaisesRegexp(ExecutionError, "%s os required. %s os found" % (os, OS)):
self.update(gppkg_file)
def test09_wrong_arch_update(self):
arch = "abcde"
self.install(self.alpha_spec.get_filename())
invalid_os_gppkg = GppkgSpec("alpha", "1.1", GPDB_VERSION, OS, arch)
gppkg_file = self.build(invalid_os_gppkg, self.A_spec)
with self.assertRaisesRegexp(ExecutionError, "%s Arch required. %s Arch found" % (arch, ARCH)):
self.update(gppkg_file)
if __name__ == "__main__":
unittest.main()
|
hifly/OpenUpgrade | refs/heads/8.0 | addons/lunch/__openerp__.py | 267 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Lunch Orders',
'author': 'OpenERP SA',
'version': '0.2',
'depends': ['base', 'report'],
'category' : 'Tools',
'summary': 'Lunch Order, Meal, Food',
'description': """
The base module to manage lunch.
================================
Many companies order sandwiches, pizzas and other, from usual suppliers, for their employees to offer them more facilities.
However lunches management within the company requires proper administration especially when the number of employees or suppliers is important.
The “Lunch Order” module has been developed to make this management easier but also to offer employees more tools and usability.
In addition to a full meal and supplier management, this module offers the possibility to display warning and provides quick order selection based on employee’s preferences.
If you want to save your employees' time and avoid them to always have coins in their pockets, this module is essential.
""",
'data': [
'security/lunch_security.xml',
'lunch_view.xml',
'wizard/lunch_order_view.xml',
'wizard/lunch_validation_view.xml',
'wizard/lunch_cancel_view.xml',
'lunch_report.xml',
'report/report_lunch_order_view.xml',
'security/ir.model.access.csv',
'views/report_lunchorder.xml',
'views/lunch.xml',
],
'demo': ['lunch_demo.xml',],
'installable': True,
'website': 'https://www.odoo.com/page/employees',
'application' : True,
'certificate' : '001292377792581874189',
}
|
gvpeek/django_football | refs/heads/master | django_football/leagues/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
mapbox/pixelmatch-cpp | refs/heads/master | deps/gyp/pylib/gyp/MSVSToolFile.py | 2736 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
|
myerssr/volatility | refs/heads/master | volatility/plugins/mac/route.py | 44 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: [email protected]
@organization:
"""
import datetime
import volatility.obj as obj
import volatility.plugins.mac.common as common
class mac_route(common.AbstractMacCommand):
""" Prints the routing table """
def _get_table(self, tbl):
rnh = tbl #obj.Object("radix_node", offset=tbl.v(), vm=self.addr_space)
rn = rnh.rnh_treetop
while rn.is_valid() and rn.rn_bit >= 0:
rn = rn.rn_u.rn_node.rn_L
rnhash = {}
while rn.is_valid():
base = rn
if rn in rnhash:
break
rnhash[rn] = 1
while rn.is_valid() and rn.rn_parent.rn_u.rn_node.rn_R == rn and rn.rn_flags & 2 == 0:
rn = rn.rn_parent
rn = rn.rn_parent.rn_u.rn_node.rn_R
while rn.is_valid() and rn.rn_bit >= 0:
rn = rn.rn_u.rn_node.rn_L
nextptr = rn
while base.v() != 0:
rn = base
base = rn.rn_u.rn_leaf.rn_Dupedkey
if rn.rn_flags & 2 == 0:
rt = obj.Object("rtentry", offset = rn, vm = self.addr_space)
yield rt
rn = nextptr
if rn.rn_flags & 2 != 0:
break
def calculate(self):
common.set_plugin_members(self)
tables_addr = self.addr_space.profile.get_symbol("_rt_tables")
## FIXME: if we only use ents[2] why do we need to instantiate 32?
ents = obj.Object('Array', offset = tables_addr, vm = self.addr_space, targetType = 'Pointer', count = 32)
ipv4table = obj.Object("radix_node_head", offset = ents[2], vm = self.addr_space)
rts = self._get_table(ipv4table)
for rt in rts:
yield rt
def render_text(self, outfd, data):
self.table_header(outfd, [("Source IP", "24"),
("Dest. IP", "24"),
("Name", "^10"),
("Sent", "^18"),
("Recv", "^18"),
("Time", "^30"),
("Exp.", "^10"),
("Delta", "")])
for rt in data:
self.table_row(outfd,
rt.source_ip,
rt.dest_ip,
rt.name,
rt.sent, rt.rx,
rt.get_time(),
rt.rt_expire,
rt.delta)
|
plowman/python-mcparseface | refs/heads/master | models/syntaxnet/tensorflow/google/protobuf/python/google/protobuf/internal/text_format_test.py | 15 | #! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for google.protobuf.text_format."""
__author__ = '[email protected] (Kenton Varda)'
import re
import six
import string
try:
import unittest2 as unittest
except ImportError:
import unittest
from google.protobuf.internal import _parameterized
from google.protobuf import map_unittest_pb2
from google.protobuf import unittest_mset_pb2
from google.protobuf import unittest_pb2
from google.protobuf import unittest_proto3_arena_pb2
from google.protobuf.internal import api_implementation
from google.protobuf.internal import test_util
from google.protobuf.internal import message_set_extensions_pb2
from google.protobuf import text_format
# Low-level nuts-n-bolts tests.
class SimpleTextFormatTests(unittest.TestCase):
# The members of _QUOTES are formatted into a regexp template that
# expects single characters. Therefore it's an error (in addition to being
# non-sensical in the first place) to try to specify a "quote mark" that is
# more than one character.
def TestQuoteMarksAreSingleChars(self):
for quote in text_format._QUOTES:
self.assertEqual(1, len(quote))
# Base class with some common functionality.
class TextFormatBase(unittest.TestCase):
def ReadGolden(self, golden_filename):
with test_util.GoldenFile(golden_filename) as f:
return (f.readlines() if str is bytes else # PY3
[golden_line.decode('utf-8') for golden_line in f])
def CompareToGoldenFile(self, text, golden_filename):
golden_lines = self.ReadGolden(golden_filename)
self.assertMultiLineEqual(text, ''.join(golden_lines))
def CompareToGoldenText(self, text, golden_text):
self.assertEqual(text, golden_text)
def RemoveRedundantZeros(self, text):
# Some platforms print 1e+5 as 1e+005. This is fine, but we need to remove
# these zeros in order to match the golden file.
text = text.replace('e+0','e+').replace('e+0','e+') \
.replace('e-0','e-').replace('e-0','e-')
# Floating point fields are printed with .0 suffix even if they are
# actualy integer numbers.
text = re.compile('\.0$', re.MULTILINE).sub('', text)
return text
@_parameterized.Parameters(
(unittest_pb2),
(unittest_proto3_arena_pb2))
class TextFormatTest(TextFormatBase):
def testPrintExotic(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'repeated_int64: -9223372036854775808\n'
'repeated_uint64: 18446744073709551615\n'
'repeated_double: 123.456\n'
'repeated_double: 1.23e+22\n'
'repeated_double: 1.23e-18\n'
'repeated_string:'
' "\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n'
'repeated_string: "\\303\\274\\352\\234\\237"\n')
def testPrintExoticUnicodeSubclass(self, message_module):
class UnicodeSub(six.text_type):
pass
message = message_module.TestAllTypes()
message.repeated_string.append(UnicodeSub(u'\u00fc\ua71f'))
self.CompareToGoldenText(
text_format.MessageToString(message),
'repeated_string: "\\303\\274\\352\\234\\237"\n')
def testPrintNestedMessageAsOneLine(self, message_module):
message = message_module.TestAllTypes()
msg = message.repeated_nested_message.add()
msg.bb = 42
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'repeated_nested_message { bb: 42 }')
def testPrintRepeatedFieldsAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int32.append(1)
message.repeated_int32.append(1)
message.repeated_int32.append(3)
message.repeated_string.append('Google')
message.repeated_string.append('Zurich')
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'repeated_int32: 1 repeated_int32: 1 repeated_int32: 3 '
'repeated_string: "Google" repeated_string: "Zurich"')
def testPrintNestedNewLineInStringAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.optional_string = 'a\nnew\nline'
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'optional_string: "a\\nnew\\nline"')
def testPrintExoticAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
self.CompareToGoldenText(
self.RemoveRedundantZeros(
text_format.MessageToString(message, as_one_line=True)),
'repeated_int64: -9223372036854775808'
' repeated_uint64: 18446744073709551615'
' repeated_double: 123.456'
' repeated_double: 1.23e+22'
' repeated_double: 1.23e-18'
' repeated_string: '
'"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""'
' repeated_string: "\\303\\274\\352\\234\\237"')
def testRoundTripExoticAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
# Test as_utf8 = False.
wire_text = text_format.MessageToString(
message, as_one_line=True, as_utf8=False)
parsed_message = message_module.TestAllTypes()
r = text_format.Parse(wire_text, parsed_message)
self.assertIs(r, parsed_message)
self.assertEqual(message, parsed_message)
# Test as_utf8 = True.
wire_text = text_format.MessageToString(
message, as_one_line=True, as_utf8=True)
parsed_message = message_module.TestAllTypes()
r = text_format.Parse(wire_text, parsed_message)
self.assertIs(r, parsed_message)
self.assertEqual(message, parsed_message,
'\n%s != %s' % (message, parsed_message))
def testPrintRawUtf8String(self, message_module):
message = message_module.TestAllTypes()
message.repeated_string.append(u'\u00fc\ua71f')
text = text_format.MessageToString(message, as_utf8=True)
self.CompareToGoldenText(text, 'repeated_string: "\303\274\352\234\237"\n')
parsed_message = message_module.TestAllTypes()
text_format.Parse(text, parsed_message)
self.assertEqual(message, parsed_message,
'\n%s != %s' % (message, parsed_message))
def testPrintFloatFormat(self, message_module):
# Check that float_format argument is passed to sub-message formatting.
message = message_module.NestedTestAllTypes()
# We use 1.25 as it is a round number in binary. The proto 32-bit float
# will not gain additional imprecise digits as a 64-bit Python float and
# show up in its str. 32-bit 1.2 is noisy when extended to 64-bit:
# >>> struct.unpack('f', struct.pack('f', 1.2))[0]
# 1.2000000476837158
# >>> struct.unpack('f', struct.pack('f', 1.25))[0]
# 1.25
message.payload.optional_float = 1.25
# Check rounding at 15 significant digits
message.payload.optional_double = -.000003456789012345678
# Check no decimal point.
message.payload.repeated_float.append(-5642)
# Check no trailing zeros.
message.payload.repeated_double.append(.000078900)
formatted_fields = ['optional_float: 1.25',
'optional_double: -3.45678901234568e-6',
'repeated_float: -5642',
'repeated_double: 7.89e-5']
text_message = text_format.MessageToString(message, float_format='.15g')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_message),
'payload {{\n {0}\n {1}\n {2}\n {3}\n}}\n'.format(*formatted_fields))
# as_one_line=True is a separate code branch where float_format is passed.
text_message = text_format.MessageToString(message, as_one_line=True,
float_format='.15g')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_message),
'payload {{ {0} {1} {2} {3} }}'.format(*formatted_fields))
def testMessageToString(self, message_module):
message = message_module.ForeignMessage()
message.c = 123
self.assertEqual('c: 123\n', str(message))
def testParseAllFields(self, message_module):
message = message_module.TestAllTypes()
test_util.SetAllFields(message)
ascii_text = text_format.MessageToString(message)
parsed_message = message_module.TestAllTypes()
text_format.Parse(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
if message_module is unittest_pb2:
test_util.ExpectAllFieldsSet(self, message)
def testParseExotic(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_int64: -9223372036854775808\n'
'repeated_uint64: 18446744073709551615\n'
'repeated_double: 123.456\n'
'repeated_double: 1.23e+22\n'
'repeated_double: 1.23e-18\n'
'repeated_string: \n'
'"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n'
'repeated_string: "foo" \'corge\' "grault"\n'
'repeated_string: "\\303\\274\\352\\234\\237"\n'
'repeated_string: "\\xc3\\xbc"\n'
'repeated_string: "\xc3\xbc"\n')
text_format.Parse(text, message)
self.assertEqual(-9223372036854775808, message.repeated_int64[0])
self.assertEqual(18446744073709551615, message.repeated_uint64[0])
self.assertEqual(123.456, message.repeated_double[0])
self.assertEqual(1.23e22, message.repeated_double[1])
self.assertEqual(1.23e-18, message.repeated_double[2])
self.assertEqual(
'\000\001\a\b\f\n\r\t\v\\\'"', message.repeated_string[0])
self.assertEqual('foocorgegrault', message.repeated_string[1])
self.assertEqual(u'\u00fc\ua71f', message.repeated_string[2])
self.assertEqual(u'\u00fc', message.repeated_string[3])
def testParseTrailingCommas(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_int64: 100;\n'
'repeated_int64: 200;\n'
'repeated_int64: 300,\n'
'repeated_string: "one",\n'
'repeated_string: "two";\n')
text_format.Parse(text, message)
self.assertEqual(100, message.repeated_int64[0])
self.assertEqual(200, message.repeated_int64[1])
self.assertEqual(300, message.repeated_int64[2])
self.assertEqual(u'one', message.repeated_string[0])
self.assertEqual(u'two', message.repeated_string[1])
def testParseRepeatedScalarShortFormat(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_int64: [100, 200];\n'
'repeated_int64: 300,\n'
'repeated_string: ["one", "two"];\n')
text_format.Parse(text, message)
self.assertEqual(100, message.repeated_int64[0])
self.assertEqual(200, message.repeated_int64[1])
self.assertEqual(300, message.repeated_int64[2])
self.assertEqual(u'one', message.repeated_string[0])
self.assertEqual(u'two', message.repeated_string[1])
def testParseEmptyText(self, message_module):
message = message_module.TestAllTypes()
text = ''
text_format.Parse(text, message)
self.assertEqual(message_module.TestAllTypes(), message)
def testParseInvalidUtf8(self, message_module):
message = message_module.TestAllTypes()
text = 'repeated_string: "\\xc3\\xc3"'
self.assertRaises(text_format.ParseError, text_format.Parse, text, message)
def testParseSingleWord(self, message_module):
message = message_module.TestAllTypes()
text = 'foo'
six.assertRaisesRegex(self,
text_format.ParseError,
(r'1:1 : Message type "\w+.TestAllTypes" has no field named '
r'"foo".'),
text_format.Parse, text, message)
def testParseUnknownField(self, message_module):
message = message_module.TestAllTypes()
text = 'unknown_field: 8\n'
six.assertRaisesRegex(self,
text_format.ParseError,
(r'1:1 : Message type "\w+.TestAllTypes" has no field named '
r'"unknown_field".'),
text_format.Parse, text, message)
def testParseBadEnumValue(self, message_module):
message = message_module.TestAllTypes()
text = 'optional_nested_enum: BARR'
six.assertRaisesRegex(self,
text_format.ParseError,
(r'1:23 : Enum type "\w+.TestAllTypes.NestedEnum" '
r'has no value named BARR.'),
text_format.Parse, text, message)
message = message_module.TestAllTypes()
text = 'optional_nested_enum: 100'
six.assertRaisesRegex(self,
text_format.ParseError,
(r'1:23 : Enum type "\w+.TestAllTypes.NestedEnum" '
r'has no value with number 100.'),
text_format.Parse, text, message)
def testParseBadIntValue(self, message_module):
message = message_module.TestAllTypes()
text = 'optional_int32: bork'
six.assertRaisesRegex(self,
text_format.ParseError,
('1:17 : Couldn\'t parse integer: bork'),
text_format.Parse, text, message)
def testParseStringFieldUnescape(self, message_module):
message = message_module.TestAllTypes()
text = r'''repeated_string: "\xf\x62"
repeated_string: "\\xf\\x62"
repeated_string: "\\\xf\\\x62"
repeated_string: "\\\\xf\\\\x62"
repeated_string: "\\\\\xf\\\\\x62"
repeated_string: "\x5cx20"'''
text_format.Parse(text, message)
SLASH = '\\'
self.assertEqual('\x0fb', message.repeated_string[0])
self.assertEqual(SLASH + 'xf' + SLASH + 'x62', message.repeated_string[1])
self.assertEqual(SLASH + '\x0f' + SLASH + 'b', message.repeated_string[2])
self.assertEqual(SLASH + SLASH + 'xf' + SLASH + SLASH + 'x62',
message.repeated_string[3])
self.assertEqual(SLASH + SLASH + '\x0f' + SLASH + SLASH + 'b',
message.repeated_string[4])
self.assertEqual(SLASH + 'x20', message.repeated_string[5])
def testMergeDuplicateScalars(self, message_module):
message = message_module.TestAllTypes()
text = ('optional_int32: 42 '
'optional_int32: 67')
r = text_format.Merge(text, message)
self.assertIs(r, message)
self.assertEqual(67, message.optional_int32)
def testMergeDuplicateNestedMessageScalars(self, message_module):
message = message_module.TestAllTypes()
text = ('optional_nested_message { bb: 1 } '
'optional_nested_message { bb: 2 }')
r = text_format.Merge(text, message)
self.assertTrue(r is message)
self.assertEqual(2, message.optional_nested_message.bb)
def testParseOneof(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m2 = message_module.TestAllTypes()
text_format.Parse(text_format.MessageToString(m), m2)
self.assertEqual('oneof_uint32', m2.WhichOneof('oneof_field'))
# These are tests that aren't fundamentally specific to proto2, but are at
# the moment because of differences between the proto2 and proto3 test schemas.
# Ideally the schemas would be made more similar so these tests could pass.
class OnlyWorksWithProto2RightNowTests(TextFormatBase):
def testPrintAllFieldsPointy(self):
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(
text_format.MessageToString(message, pointy_brackets=True)),
'text_format_unittest_data_pointy_oneof.txt')
def testParseGolden(self):
golden_text = '\n'.join(self.ReadGolden('text_format_unittest_data.txt'))
parsed_message = unittest_pb2.TestAllTypes()
r = text_format.Parse(golden_text, parsed_message)
self.assertIs(r, parsed_message)
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.assertEqual(message, parsed_message)
def testPrintAllFields(self):
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'text_format_unittest_data_oneof_implemented.txt')
def testPrintAllFieldsPointy(self):
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(
text_format.MessageToString(message, pointy_brackets=True)),
'text_format_unittest_data_pointy_oneof.txt')
def testPrintInIndexOrder(self):
message = unittest_pb2.TestFieldOrderings()
message.my_string = '115'
message.my_int = 101
message.my_float = 111
message.optional_nested_message.oo = 0
message.optional_nested_message.bb = 1
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(
message, use_index_order=True)),
'my_string: \"115\"\nmy_int: 101\nmy_float: 111\n'
'optional_nested_message {\n oo: 0\n bb: 1\n}\n')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(
message)),
'my_int: 101\nmy_string: \"115\"\nmy_float: 111\n'
'optional_nested_message {\n bb: 1\n oo: 0\n}\n')
def testMergeLinesGolden(self):
opened = self.ReadGolden('text_format_unittest_data.txt')
parsed_message = unittest_pb2.TestAllTypes()
r = text_format.MergeLines(opened, parsed_message)
self.assertIs(r, parsed_message)
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.assertEqual(message, parsed_message)
def testParseLinesGolden(self):
opened = self.ReadGolden('text_format_unittest_data.txt')
parsed_message = unittest_pb2.TestAllTypes()
r = text_format.ParseLines(opened, parsed_message)
self.assertIs(r, parsed_message)
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.assertEqual(message, parsed_message)
def testPrintMap(self):
message = map_unittest_pb2.TestMap()
message.map_int32_int32[-123] = -456
message.map_int64_int64[-2**33] = -2**34
message.map_uint32_uint32[123] = 456
message.map_uint64_uint64[2**33] = 2**34
message.map_string_string["abc"] = "123"
message.map_int32_foreign_message[111].c = 5
# Maps are serialized to text format using their underlying repeated
# representation.
self.CompareToGoldenText(
text_format.MessageToString(message),
'map_int32_int32 {\n'
' key: -123\n'
' value: -456\n'
'}\n'
'map_int64_int64 {\n'
' key: -8589934592\n'
' value: -17179869184\n'
'}\n'
'map_uint32_uint32 {\n'
' key: 123\n'
' value: 456\n'
'}\n'
'map_uint64_uint64 {\n'
' key: 8589934592\n'
' value: 17179869184\n'
'}\n'
'map_string_string {\n'
' key: "abc"\n'
' value: "123"\n'
'}\n'
'map_int32_foreign_message {\n'
' key: 111\n'
' value {\n'
' c: 5\n'
' }\n'
'}\n')
def testMapOrderEnforcement(self):
message = map_unittest_pb2.TestMap()
for letter in string.ascii_uppercase[13:26]:
message.map_string_string[letter] = 'dummy'
for letter in reversed(string.ascii_uppercase[0:13]):
message.map_string_string[letter] = 'dummy'
golden = ''.join((
'map_string_string {\n key: "%c"\n value: "dummy"\n}\n' % (letter,)
for letter in string.ascii_uppercase))
self.CompareToGoldenText(text_format.MessageToString(message), golden)
def testMapOrderSemantics(self):
golden_lines = self.ReadGolden('map_test_data.txt')
# The C++ implementation emits defaulted-value fields, while the Python
# implementation does not. Adjusting for this is awkward, but it is
# valuable to test against a common golden file.
line_blacklist = (' key: 0\n',
' value: 0\n',
' key: false\n',
' value: false\n')
golden_lines = [line for line in golden_lines if line not in line_blacklist]
message = map_unittest_pb2.TestMap()
text_format.ParseLines(golden_lines, message)
candidate = text_format.MessageToString(message)
# The Python implementation emits "1.0" for the double value that the C++
# implementation emits as "1".
candidate = candidate.replace('1.0', '1', 2)
self.assertMultiLineEqual(candidate, ''.join(golden_lines))
# Tests of proto2-only features (MessageSet, extensions, etc.).
class Proto2Tests(TextFormatBase):
def testPrintMessageSet(self):
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
self.CompareToGoldenText(
text_format.MessageToString(message),
'message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
message = message_set_extensions_pb2.TestMessageSet()
ext = message_set_extensions_pb2.message_set_extension3
message.Extensions[ext].text = 'bar'
self.CompareToGoldenText(
text_format.MessageToString(message),
'[google.protobuf.internal.TestMessageSetExtension3] {\n'
' text: \"bar\"\n'
'}\n')
def testPrintMessageSetAsOneLine(self):
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'message_set {'
' [protobuf_unittest.TestMessageSetExtension1] {'
' i: 23'
' }'
' [protobuf_unittest.TestMessageSetExtension2] {'
' str: \"foo\"'
' }'
' }')
def testParseMessageSet(self):
message = unittest_pb2.TestAllTypes()
text = ('repeated_uint64: 1\n'
'repeated_uint64: 2\n')
text_format.Parse(text, message)
self.assertEqual(1, message.repeated_uint64[0])
self.assertEqual(2, message.repeated_uint64[1])
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
text_format.Parse(text, message)
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
self.assertEqual(23, message.message_set.Extensions[ext1].i)
self.assertEqual('foo', message.message_set.Extensions[ext2].str)
def testPrintAllExtensions(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'text_format_unittest_extensions_data.txt')
def testPrintAllExtensionsPointy(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(
message, pointy_brackets=True)),
'text_format_unittest_extensions_data_pointy.txt')
def testParseGoldenExtensions(self):
golden_text = '\n'.join(self.ReadGolden(
'text_format_unittest_extensions_data.txt'))
parsed_message = unittest_pb2.TestAllExtensions()
text_format.Parse(golden_text, parsed_message)
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.assertEqual(message, parsed_message)
def testParseAllExtensions(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
ascii_text = text_format.MessageToString(message)
parsed_message = unittest_pb2.TestAllExtensions()
text_format.Parse(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
def testParseAllowedUnknownExtension(self):
# Skip over unknown extension correctly.
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [unknown_extension] {\n'
' i: 23\n'
' [nested_unknown_ext]: {\n'
' i: 23\n'
' test: "test_string"\n'
' floaty_float: -0.315\n'
' num: -inf\n'
' multiline_str: "abc"\n'
' "def"\n'
' "xyz."\n'
' [nested_unknown_ext]: <\n'
' i: 23\n'
' i: 24\n'
' pointfloat: .3\n'
' test: "test_string"\n'
' floaty_float: -0.315\n'
' num: -inf\n'
' long_string: "test" "test2" \n'
' >\n'
' }\n'
' }\n'
' [unknown_extension]: 5\n'
'}\n')
text_format.Parse(text, message, allow_unknown_extension=True)
golden = 'message_set {\n}\n'
self.CompareToGoldenText(text_format.MessageToString(message), golden)
# Catch parse errors in unknown extension.
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [unknown_extension] {\n'
' i:\n' # Missing value.
' }\n'
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'Invalid field value: }',
text_format.Parse, malformed, message,
allow_unknown_extension=True)
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [unknown_extension] {\n'
' str: "malformed string\n' # Missing closing quote.
' }\n'
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'Invalid field value: "',
text_format.Parse, malformed, message,
allow_unknown_extension=True)
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [unknown_extension] {\n'
' str: "malformed\n multiline\n string\n'
' }\n'
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'Invalid field value: "',
text_format.Parse, malformed, message,
allow_unknown_extension=True)
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [malformed_extension] <\n'
' i: -5\n'
' \n' # Missing '>' here.
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'5:1 : Expected ">".',
text_format.Parse, malformed, message,
allow_unknown_extension=True)
# Don't allow unknown fields with allow_unknown_extension=True.
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' unknown_field: true\n'
' \n' # Missing '>' here.
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
('2:3 : Message type '
'"proto2_wireformat_unittest.TestMessageSet" has no'
' field named "unknown_field".'),
text_format.Parse, malformed, message,
allow_unknown_extension=True)
# Parse known extension correcty.
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
text_format.Parse(text, message, allow_unknown_extension=True)
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
self.assertEqual(23, message.message_set.Extensions[ext1].i)
self.assertEqual('foo', message.message_set.Extensions[ext2].str)
def testParseBadExtension(self):
message = unittest_pb2.TestAllExtensions()
text = '[unknown_extension]: 8\n'
six.assertRaisesRegex(self,
text_format.ParseError,
'1:2 : Extension "unknown_extension" not registered.',
text_format.Parse, text, message)
message = unittest_pb2.TestAllTypes()
six.assertRaisesRegex(self,
text_format.ParseError,
('1:2 : Message type "protobuf_unittest.TestAllTypes" does not have '
'extensions.'),
text_format.Parse, text, message)
def testMergeDuplicateExtensionScalars(self):
message = unittest_pb2.TestAllExtensions()
text = ('[protobuf_unittest.optional_int32_extension]: 42 '
'[protobuf_unittest.optional_int32_extension]: 67')
text_format.Merge(text, message)
self.assertEqual(
67,
message.Extensions[unittest_pb2.optional_int32_extension])
def testParseDuplicateExtensionScalars(self):
message = unittest_pb2.TestAllExtensions()
text = ('[protobuf_unittest.optional_int32_extension]: 42 '
'[protobuf_unittest.optional_int32_extension]: 67')
six.assertRaisesRegex(self,
text_format.ParseError,
('1:96 : Message type "protobuf_unittest.TestAllExtensions" '
'should not have multiple '
'"protobuf_unittest.optional_int32_extension" extensions.'),
text_format.Parse, text, message)
def testParseDuplicateNestedMessageScalars(self):
message = unittest_pb2.TestAllTypes()
text = ('optional_nested_message { bb: 1 } '
'optional_nested_message { bb: 2 }')
six.assertRaisesRegex(self,
text_format.ParseError,
('1:65 : Message type "protobuf_unittest.TestAllTypes.NestedMessage" '
'should not have multiple "bb" fields.'),
text_format.Parse, text, message)
def testParseDuplicateScalars(self):
message = unittest_pb2.TestAllTypes()
text = ('optional_int32: 42 '
'optional_int32: 67')
six.assertRaisesRegex(self,
text_format.ParseError,
('1:36 : Message type "protobuf_unittest.TestAllTypes" should not '
'have multiple "optional_int32" fields.'),
text_format.Parse, text, message)
def testParseGroupNotClosed(self):
message = unittest_pb2.TestAllTypes()
text = 'RepeatedGroup: <'
six.assertRaisesRegex(self,
text_format.ParseError, '1:16 : Expected ">".',
text_format.Parse, text, message)
text = 'RepeatedGroup: {'
six.assertRaisesRegex(self,
text_format.ParseError, '1:16 : Expected "}".',
text_format.Parse, text, message)
def testParseEmptyGroup(self):
message = unittest_pb2.TestAllTypes()
text = 'OptionalGroup: {}'
text_format.Parse(text, message)
self.assertTrue(message.HasField('optionalgroup'))
message.Clear()
message = unittest_pb2.TestAllTypes()
text = 'OptionalGroup: <>'
text_format.Parse(text, message)
self.assertTrue(message.HasField('optionalgroup'))
# Maps aren't really proto2-only, but our test schema only has maps for
# proto2.
def testParseMap(self):
text = ('map_int32_int32 {\n'
' key: -123\n'
' value: -456\n'
'}\n'
'map_int64_int64 {\n'
' key: -8589934592\n'
' value: -17179869184\n'
'}\n'
'map_uint32_uint32 {\n'
' key: 123\n'
' value: 456\n'
'}\n'
'map_uint64_uint64 {\n'
' key: 8589934592\n'
' value: 17179869184\n'
'}\n'
'map_string_string {\n'
' key: "abc"\n'
' value: "123"\n'
'}\n'
'map_int32_foreign_message {\n'
' key: 111\n'
' value {\n'
' c: 5\n'
' }\n'
'}\n')
message = map_unittest_pb2.TestMap()
text_format.Parse(text, message)
self.assertEqual(-456, message.map_int32_int32[-123])
self.assertEqual(-2**34, message.map_int64_int64[-2**33])
self.assertEqual(456, message.map_uint32_uint32[123])
self.assertEqual(2**34, message.map_uint64_uint64[2**33])
self.assertEqual("123", message.map_string_string["abc"])
self.assertEqual(5, message.map_int32_foreign_message[111].c)
class TokenizerTest(unittest.TestCase):
def testSimpleTokenCases(self):
text = ('identifier1:"string1"\n \n\n'
'identifier2 : \n \n123 \n identifier3 :\'string\'\n'
'identifiER_4 : 1.1e+2 ID5:-0.23 ID6:\'aaaa\\\'bbbb\'\n'
'ID7 : "aa\\"bb"\n\n\n\n ID8: {A:inf B:-inf C:true D:false}\n'
'ID9: 22 ID10: -111111111111111111 ID11: -22\n'
'ID12: 2222222222222222222 ID13: 1.23456f ID14: 1.2e+2f '
'false_bool: 0 true_BOOL:t \n true_bool1: 1 false_BOOL1:f ')
tokenizer = text_format._Tokenizer(text.splitlines())
methods = [(tokenizer.ConsumeIdentifier, 'identifier1'),
':',
(tokenizer.ConsumeString, 'string1'),
(tokenizer.ConsumeIdentifier, 'identifier2'),
':',
(tokenizer.ConsumeInt32, 123),
(tokenizer.ConsumeIdentifier, 'identifier3'),
':',
(tokenizer.ConsumeString, 'string'),
(tokenizer.ConsumeIdentifier, 'identifiER_4'),
':',
(tokenizer.ConsumeFloat, 1.1e+2),
(tokenizer.ConsumeIdentifier, 'ID5'),
':',
(tokenizer.ConsumeFloat, -0.23),
(tokenizer.ConsumeIdentifier, 'ID6'),
':',
(tokenizer.ConsumeString, 'aaaa\'bbbb'),
(tokenizer.ConsumeIdentifier, 'ID7'),
':',
(tokenizer.ConsumeString, 'aa\"bb'),
(tokenizer.ConsumeIdentifier, 'ID8'),
':',
'{',
(tokenizer.ConsumeIdentifier, 'A'),
':',
(tokenizer.ConsumeFloat, float('inf')),
(tokenizer.ConsumeIdentifier, 'B'),
':',
(tokenizer.ConsumeFloat, -float('inf')),
(tokenizer.ConsumeIdentifier, 'C'),
':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'D'),
':',
(tokenizer.ConsumeBool, False),
'}',
(tokenizer.ConsumeIdentifier, 'ID9'),
':',
(tokenizer.ConsumeUint32, 22),
(tokenizer.ConsumeIdentifier, 'ID10'),
':',
(tokenizer.ConsumeInt64, -111111111111111111),
(tokenizer.ConsumeIdentifier, 'ID11'),
':',
(tokenizer.ConsumeInt32, -22),
(tokenizer.ConsumeIdentifier, 'ID12'),
':',
(tokenizer.ConsumeUint64, 2222222222222222222),
(tokenizer.ConsumeIdentifier, 'ID13'),
':',
(tokenizer.ConsumeFloat, 1.23456),
(tokenizer.ConsumeIdentifier, 'ID14'),
':',
(tokenizer.ConsumeFloat, 1.2e+2),
(tokenizer.ConsumeIdentifier, 'false_bool'),
':',
(tokenizer.ConsumeBool, False),
(tokenizer.ConsumeIdentifier, 'true_BOOL'),
':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'true_bool1'),
':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'false_BOOL1'),
':',
(tokenizer.ConsumeBool, False)]
i = 0
while not tokenizer.AtEnd():
m = methods[i]
if type(m) == str:
token = tokenizer.token
self.assertEqual(token, m)
tokenizer.NextToken()
else:
self.assertEqual(m[1], m[0]())
i += 1
def testConsumeIntegers(self):
# This test only tests the failures in the integer parsing methods as well
# as the '0' special cases.
int64_max = (1 << 63) - 1
uint32_max = (1 << 32) - 1
text = '-1 %d %d' % (uint32_max + 1, int64_max + 1)
tokenizer = text_format._Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeUint32)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeUint64)
self.assertEqual(-1, tokenizer.ConsumeInt32())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeUint32)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeInt32)
self.assertEqual(uint32_max + 1, tokenizer.ConsumeInt64())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeInt64)
self.assertEqual(int64_max + 1, tokenizer.ConsumeUint64())
self.assertTrue(tokenizer.AtEnd())
text = '-0 -0 0 0'
tokenizer = text_format._Tokenizer(text.splitlines())
self.assertEqual(0, tokenizer.ConsumeUint32())
self.assertEqual(0, tokenizer.ConsumeUint64())
self.assertEqual(0, tokenizer.ConsumeUint32())
self.assertEqual(0, tokenizer.ConsumeUint64())
self.assertTrue(tokenizer.AtEnd())
def testConsumeByteString(self):
text = '"string1\''
tokenizer = text_format._Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = 'string1"'
tokenizer = text_format._Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\xt"'
tokenizer = text_format._Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\"'
tokenizer = text_format._Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\x"'
tokenizer = text_format._Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
def testConsumeBool(self):
text = 'not-a-bool'
tokenizer = text_format._Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeBool)
if __name__ == '__main__':
unittest.main()
|
sestrella/ansible | refs/heads/devel | lib/ansible/modules/cloud/vmware/vmware_drs_rule_info.py | 21 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_drs_rule_info
short_description: Gathers info about DRS rule on the given cluster
description:
- 'This module can be used to gather information about DRS VM-VM and VM-HOST rules from the given cluster.'
version_added: '2.9'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- DRS information for the given cluster will be returned.
- This is required parameter if C(datacenter) parameter is not provided.
type: str
datacenter:
description:
- Name of the datacenter.
- DRS information for all the clusters from the given datacenter will be returned.
- This is required parameter if C(cluster_name) parameter is not provided.
type: str
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather DRS info about given Cluster
vmware_drs_rule_info:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: '{{ cluster_name }}'
delegate_to: localhost
register: cluster_drs_info
- name: Gather DRS info about all Clusters in given datacenter
vmware_drs_rule_info:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datacenter: '{{ datacenter_name }}'
delegate_to: localhost
register: datacenter_drs_info
'''
RETURN = r'''
drs_rule_info:
description: metadata about DRS rule from given cluster / datacenter
returned: always
type: dict
sample: {
"DC0_C0": [
{
"rule_affinity": false,
"rule_enabled": true,
"rule_key": 1,
"rule_mandatory": true,
"rule_name": "drs_rule_0001",
"rule_type": "vm_vm_rule",
"rule_uuid": "52be5061-665a-68dc-3d25-85cd2d37e114",
"rule_vms": [
"VM_65",
"VM_146"
]
},
],
"DC1_C1": [
{
"rule_affine_host_group_name": "host_group_1",
"rule_affine_hosts": [
"10.76.33.204"
],
"rule_anti_affine_host_group_name": null,
"rule_anti_affine_hosts": [],
"rule_enabled": true,
"rule_key": 1,
"rule_mandatory": false,
"rule_name": "vm_host_rule_0001",
"rule_type": "vm_host_rule",
"rule_uuid": "52687108-4d3a-76f2-d29c-b708c40dbe40",
"rule_vm_group_name": "test_vm_group_1",
"rule_vms": [
"VM_8916",
"VM_4010"
]
}
],
}
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi, find_datacenter_by_name, get_all_objs
class VmwareDrsInfoManager(PyVmomi):
def __init__(self, module):
super(VmwareDrsInfoManager, self).__init__(module)
datacenter_name = self.params.get('datacenter', None)
if datacenter_name:
datacenter_obj = find_datacenter_by_name(self.content, datacenter_name=datacenter_name)
self.cluster_obj_list = []
if datacenter_obj:
folder = datacenter_obj.hostFolder
self.cluster_obj_list = get_all_objs(self.content, [vim.ClusterComputeResource], folder)
else:
self.module.fail_json(changed=False, msg="Datacenter '%s' not found" % datacenter_name)
cluster_name = self.params.get('cluster_name', None)
if cluster_name:
cluster_obj = self.find_cluster_by_name(cluster_name=cluster_name)
if cluster_obj is None:
self.module.fail_json(changed=False, msg="Cluster '%s' not found" % cluster_name)
else:
self.cluster_obj_list = [cluster_obj]
def get_all_from_group(self, group_name=None, cluster_obj=None, hostgroup=False):
"""
Return all VM / Host names using given group name
Args:
group_name: Rule name
cluster_obj: Cluster managed object
hostgroup: True if we want only host name from group
Returns: List of VM / Host names belonging to given group object
"""
obj_name_list = []
if not all([group_name, cluster_obj]):
return obj_name_list
for group in cluster_obj.configurationEx.group:
if group.name == group_name:
if not hostgroup and isinstance(group, vim.cluster.VmGroup):
obj_name_list = [vm.name for vm in group.vm]
break
elif hostgroup and isinstance(group, vim.cluster.HostGroup):
obj_name_list = [host.name for host in group.host]
break
return obj_name_list
@staticmethod
def normalize_vm_vm_rule_spec(rule_obj=None):
"""
Return human readable rule spec
Args:
rule_obj: Rule managed object
Returns: Dictionary with DRS VM VM Rule info
"""
if rule_obj is None:
return {}
return dict(rule_key=rule_obj.key,
rule_enabled=rule_obj.enabled,
rule_name=rule_obj.name,
rule_mandatory=rule_obj.mandatory,
rule_uuid=rule_obj.ruleUuid,
rule_vms=[vm.name for vm in rule_obj.vm],
rule_type="vm_vm_rule",
rule_affinity=True if isinstance(rule_obj, vim.cluster.AffinityRuleSpec) else False,
)
def normalize_vm_host_rule_spec(self, rule_obj=None, cluster_obj=None):
"""
Return human readable rule spec
Args:
rule_obj: Rule managed object
cluster_obj: Cluster managed object
Returns: Dictionary with DRS VM HOST Rule info
"""
if not all([rule_obj, cluster_obj]):
return {}
return dict(rule_key=rule_obj.key,
rule_enabled=rule_obj.enabled,
rule_name=rule_obj.name,
rule_mandatory=rule_obj.mandatory,
rule_uuid=rule_obj.ruleUuid,
rule_vm_group_name=rule_obj.vmGroupName,
rule_affine_host_group_name=rule_obj.affineHostGroupName,
rule_anti_affine_host_group_name=rule_obj.antiAffineHostGroupName,
rule_vms=self.get_all_from_group(group_name=rule_obj.vmGroupName,
cluster_obj=cluster_obj),
rule_affine_hosts=self.get_all_from_group(group_name=rule_obj.affineHostGroupName,
cluster_obj=cluster_obj,
hostgroup=True),
rule_anti_affine_hosts=self.get_all_from_group(group_name=rule_obj.antiAffineHostGroupName,
cluster_obj=cluster_obj,
hostgroup=True),
rule_type="vm_host_rule",
)
def gather_drs_rule_info(self):
"""
Gather DRS rule information about given cluster
Returns: Dictionary of clusters with DRS information
"""
cluster_rule_info = dict()
for cluster_obj in self.cluster_obj_list:
cluster_rule_info[cluster_obj.name] = []
for drs_rule in cluster_obj.configuration.rule:
if isinstance(drs_rule, vim.cluster.VmHostRuleInfo):
cluster_rule_info[cluster_obj.name].append(self.normalize_vm_host_rule_spec(
rule_obj=drs_rule,
cluster_obj=cluster_obj))
else:
cluster_rule_info[cluster_obj.name].append(self.normalize_vm_vm_rule_spec(rule_obj=drs_rule))
return cluster_rule_info
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
datacenter=dict(type='str', required=False),
cluster_name=dict(type='str', required=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'datacenter'],
],
supports_check_mode=True,
)
vmware_drs_info = VmwareDrsInfoManager(module)
module.exit_json(changed=False, drs_rule_info=vmware_drs_info.gather_drs_rule_info())
if __name__ == "__main__":
main()
|
brijeshkesariya/odoo | refs/heads/8.0 | addons/sale_layout/models/sale_layout.py | 180 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from itertools import groupby
def grouplines(self, ordered_lines, sortkey):
"""Return lines from a specified invoice or sale order grouped by category"""
grouped_lines = []
for key, valuesiter in groupby(ordered_lines, sortkey):
group = {}
group['category'] = key
group['lines'] = list(v for v in valuesiter)
if 'subtotal' in key and key.subtotal is True:
group['subtotal'] = sum(line.price_subtotal for line in group['lines'])
grouped_lines.append(group)
return grouped_lines
class SaleLayoutCategory(osv.Model):
_name = 'sale_layout.category'
_order = 'sequence, id'
_columns = {
'name': fields.char('Name', required=True),
'sequence': fields.integer('Sequence', required=True),
'subtotal': fields.boolean('Add subtotal'),
'separator': fields.boolean('Add separator'),
'pagebreak': fields.boolean('Add pagebreak')
}
_defaults = {
'subtotal': True,
'separator': True,
'pagebreak': False,
'sequence': 10
}
class AccountInvoice(osv.Model):
_inherit = 'account.invoice'
def sale_layout_lines(self, cr, uid, ids, invoice_id=None, context=None):
"""
Returns invoice lines from a specified invoice ordered by
sale_layout_category sequence. Used in sale_layout module.
:Parameters:
-'invoice_id' (int): specify the concerned invoice.
"""
ordered_lines = self.browse(cr, uid, invoice_id, context=context).invoice_line
# We chose to group first by category model and, if not present, by invoice name
sortkey = lambda x: x.sale_layout_cat_id if x.sale_layout_cat_id else ''
return grouplines(self, ordered_lines, sortkey)
import openerp
class AccountInvoiceLine(osv.Model):
_inherit = 'account.invoice.line'
_order = 'invoice_id, categ_sequence, sequence, id'
sale_layout_cat_id = openerp.fields.Many2one('sale_layout.category', string='Section')
categ_sequence = openerp.fields.Integer(related='sale_layout_cat_id.sequence',
string='Layout Sequence', store=True)
_defaults = {
'categ_sequence': 0
}
class SaleOrder(osv.Model):
_inherit = 'sale.order'
def sale_layout_lines(self, cr, uid, ids, order_id=None, context=None):
"""
Returns order lines from a specified sale ordered by
sale_layout_category sequence. Used in sale_layout module.
:Parameters:
-'order_id' (int): specify the concerned sale order.
"""
ordered_lines = self.browse(cr, uid, order_id, context=context).order_line
sortkey = lambda x: x.sale_layout_cat_id if x.sale_layout_cat_id else ''
return grouplines(self, ordered_lines, sortkey)
class SaleOrderLine(osv.Model):
_inherit = 'sale.order.line'
_columns = {
'sale_layout_cat_id': fields.many2one('sale_layout.category',
string='Section'),
'categ_sequence': fields.related('sale_layout_cat_id',
'sequence', type='integer',
string='Layout Sequence', store=True)
# Store is intentionally set in order to keep the "historic" order.
}
_defaults = {
'categ_sequence': 0
}
_order = 'order_id, categ_sequence, sale_layout_cat_id, sequence, id'
def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):
"""Save the layout when converting to an invoice line."""
invoice_vals = super(SaleOrderLine, self)._prepare_order_line_invoice_line(cr, uid, line, account_id=account_id, context=context)
if line.sale_layout_cat_id:
invoice_vals['sale_layout_cat_id'] = line.sale_layout_cat_id.id
if line.categ_sequence:
invoice_vals['categ_sequence'] = line.categ_sequence
return invoice_vals
|
flyapen/UgFlu | refs/heads/master | flumotion/component/producers/soundcard/wizard_gtk.py | 1 | # -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
import gettext
import os
from zope.interface import implements
from flumotion.admin.assistant.interfaces import IProducerPlugin
from flumotion.admin.assistant.models import AudioProducer
from flumotion.common.errors import RemoteRunFailure
from flumotion.common.i18n import N_, gettexter
from flumotion.common.messages import Info
from flumotion.admin.gtk.basesteps import AudioProducerStep
__version__ = "$Rev: 7893 $"
_ = gettext.gettext
T_ = gettexter()
CHANNELS = {1: _('Mono'),
2: _('Stereo')}
SAMPLE_RATES = [48000,
44100,
32000,
22050,
16000,
11025,
8000]
# TODO: Add other sources (pulse, jack, ...)
SOURCE_ELEMENTS = [(_('Alsa'), 'alsasrc'),
(_('OSS'), 'osssrc')]
class SoundcardProducer(AudioProducer):
componentType = 'soundcard-producer'
def __init__(self):
super(SoundcardProducer, self).__init__()
class SoundcardStep(AudioProducerStep):
name = 'Soundcard'
title = _('Sound Card')
icon = 'soundcard.png'
gladeFile = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'wizard.glade')
componentType = 'osssrc'
docSection = 'help-configuration-assistant-producer-audio-soundcard'
docAnchor = ''
def __init__(self, wizard, model):
AudioProducerStep.__init__(self, wizard, model)
# WizardStep
def setup(self):
self.input_track.data_type = str
self.channels.data_type = int
self.samplerate.data_type = int
self.depth.data_type = int
self.device.data_type = str
self.source_element.data_type = str
self.source_element.prefill(SOURCE_ELEMENTS)
self.add_proxy(self.model.properties,
['input_track',
'channels',
'samplerate',
'depth',
'device',
'source_element'])
# Connect the callback after the combo has been filled so the changed
# signal is not emited before the page has been set uhas
self.source_element.connect('changed', self.on_source_element__changed)
def workerChanged(self, worker):
self.model.worker = worker
self._blockCombos()
self._updateDevices()
def getNext(self):
return None
# Private
def _blockCombos(self, block=True):
self.input_track.set_sensitive(not block)
self.channels.set_sensitive(not block)
self.depth.set_sensitive(not block)
self.samplerate.set_sensitive(not block)
def _updateDevices(self):
self.wizard.waitForTask('soundcard checks')
self.wizard.clear_msg('soundcard-device')
msg = Info(T_(
N_("Looking for the sound devices present on the system."
"This can take a while...")), mid='soundcard-check')
self.wizard.add_msg(msg)
def checkFailed(failure):
failure.trap(RemoteRunFailure)
self.wizard.taskFinished(blockNext=True)
self._blockCombos()
def gotSoundDevices(devices):
self.wizard.clear_msg('soundcard-check')
self.wizard.taskFinished(False)
self.device.set_sensitive(True)
self.device.prefill(devices)
sourceElement = self.source_element.get_selected()
d = self.runInWorker(
'flumotion.worker.checks.audio', 'getAudioDevices',
sourceElement, mid='soundcard-device')
d.addCallback(gotSoundDevices)
d.addErrback(checkFailed)
return d
def _updateInputtrack(self):
device = self.device.get_selected()
sourceElement = self.source_element.get_selected()
if not device:
return
self.wizard.waitForTask('soundcard checks')
msg = Info(T_(
N_("Probing the sound card. This can take a while...")),
mid='soundcard-check')
self.wizard.add_msg(msg)
def checkFailed(failure):
failure.trap(RemoteRunFailure)
self._blockCombos()
self.wizard.taskFinished(True)
def soundcardCheckComplete((deviceName, tracks, caps)):
self.wizard.clear_msg('soundcard-check')
self.wizard.taskFinished(False)
self._caps = caps
self.input_track.prefill(tracks)
self.input_track.set_sensitive(bool(tracks))
d = self.runInWorker(
'flumotion.worker.checks.audio', 'checkMixerTracks',
sourceElement, device, mid='soundcard-check')
d.addCallback(soundcardCheckComplete)
d.addErrback(checkFailed)
return d
def _updateDepth(self):
bitdepths = {}
for capStruct in self._caps:
data = capStruct.copy()
bitdepths[data.pop('depth')] = data
self._capStructs = bitdepths
bitdepths = sorted(bitdepths)
self.depth.prefill(
[(_('%d-bit') % bitdepth, bitdepth) for bitdepth in bitdepths])
self.depth.set_sensitive(True)
def _updateChannels(self):
capStruct = self._capStructs.get(self.depth.get_selected())
if capStruct is None:
return
channels = []
if type(capStruct['channels']) == int:
nchannels = capStruct['channels']
channels.append((CHANNELS[nchannels], nchannels))
else:
for nchannels in capStruct['channels']:
channels.append((CHANNELS[nchannels], nchannels))
self.channels.prefill(channels)
self.channels.set_sensitive(True)
def _updateSamplerate(self):
capStruct = self._capStructs.get(self.depth.get_selected())
if capStruct is None:
return
max, min = capStruct['rate']
self.samplerate.prefill(
[(str(rate), rate) for rate in SAMPLE_RATES if min <= rate <= max])
self.samplerate.set_sensitive(True)
# Callbacks
def on_source_element__changed(self, combo):
self._updateDevices()
def on_device__changed(self, combo):
self._updateInputtrack()
def on_input_track__changed(self, combo):
self._updateDepth()
self._updateChannels()
def on_depth__changed(self, combo):
self._updateSamplerate()
class SoundcardWizardPlugin(object):
implements(IProducerPlugin)
def __init__(self, wizard):
self.wizard = wizard
self.model = SoundcardProducer()
def getProductionStep(self, type):
return SoundcardStep(self.wizard, self.model)
|
ic-labs/icekit-events | refs/heads/develop | icekit_events/page_types/eventlistingfordate/models.py | 2 | from icekit_events.models import AbstractEventListingForDatePage
class EventListingPage(AbstractEventListingForDatePage):
class Meta:
verbose_name = "Event listing for date"
|
primercuervo/cognitive_radio_ml | refs/heads/master | python/cnn_k_1_2.py | 1 | #!/usr/bin/env python
# pylint: disable=C0103
"""
Training various Deep Learning Algorithms using Keras with Tensorboard backend
This module takes paths to the location of images to be used for Image
classification. As of now it is used for cognitive radio spectrum awareness
based on the DySpan2017 Challenge spectrum setup, but it can be used for any
multiclass image recognition.
Example:
To run this script just type:
$ python2 cnn_k_1_2.py
The name stands for:
- CNN - Convolutional Neural Network
- k_1_2 - Keras-v1.2 which had to be used because of hardware
restrictions
**is written for python2**
TODO:
Urgent:
* Revise regularization
TODO: so far this regularization is applied to all convolution layers.
Better to determine if there is an optimum way or layer for this to be
applied to
Optional:
* API update - Keras and tensorflow
* py3k
"""
from argparse import ArgumentParser
import math
import os.path
import sys
from keras.callbacks import (LearningRateScheduler, TensorBoard, EarlyStopping,
CSVLogger, ModelCheckpoint)
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Flatten, Dense
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2, activity_l2
##################################################
# Parser setup
##################################################
parser = ArgumentParser()
parser.add_argument("-n", # Alias
"--name",
help="Set the basename for the generated files",
default=None)
args = parser.parse_args()
##################################################
# Constants and Variables
##################################################
BASEPATH = '../trained_models/keras'
if args.name is None:
print("[ERROR]: Please provide a basename for the generated files.")
sys.exit()
basename = args.name
# Set the filenames based on the basename
names = {'model': basename + "_model.h5",
'weights': basename + "_weights.h5",
'checkpoint': basename + '_ckpt.h5',
'csv': basename + '.csv',
}
# dimensions of our images.
img_width, img_height = 64, 64
# This is my relative location, please change accordingly
train_data_dir = '../../data/pic_set/train'
test_data_dir = '../../data/pic_set/test'
nb_train_samples = 76300
nb_test_samples = 8400
# Currently each iteration takes 7 seconds in this machine, so I set 3000
# Epochs only based on time constraints. Based on my results, more iterations
# Will *not* give me better results anyways
EPOCHS = 3000 #
BATCH_SIZE = 50
MOMENTUM = 0.9
# Apply L2 regularization
L2 = 0.0005
##################################################
# Procedural Start
##################################################
# Check if files already exist to avoid accidental
# Overwrite
for folder, name in names.items():
my_file = os.path.join(BASEPATH, folder, name)
if os.path.isfile(my_file):
print("[ERROR]: " + my_file + " already exists.")
print("Please choose another basename")
sys.exit()
##################################################
# Model definition
##################################################
# Define CNN based on
# http://ieeexplore.ieee.org/document/8017499/
model = Sequential()
# The last dimension is 1 because of gray-scale
model.add(Conv2D(48, 2, 2,
input_shape=(img_width, img_height, 1)
)
)
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, 2, 2))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(192, 2, 2))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(192, 2, 2))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, 2, 2))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(1024,
W_regularizer=l2(L2),
activity_regularizer=activity_l2(L2)
)
)
model.add(Dense(1024,
W_regularizer=l2(L2),
activity_regularizer=activity_l2(L2)
)
)
model.add(Dense(10))
model.add(Activation('softmax'))
# Print the model summary to terminal
model.summary()
# Compile the model
# Need to be categorical because this is not a binary classification problem
# Compile stochastic gradient descent model with step decreasing learning rate
sgd = SGD(lr=0.001, momentum=MOMENTUM, decay=0.0, nesterov=False)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
# Add a learning rate schedule
# References
# ---------
# https://goo.gl/4vQhdj
# https://goo.gl/VrrciJ
def step_decay(epoch):
"""
Define a step decay, which takes the form
lr = lr0 * drop^floor(epoch/epocs_drop)
"""
initial_lrate = 0.001
gamma = 0.1
epochs_drop = EPOCHS / 4
lrate = initial_lrate * math.pow(gamma, math.floor((1+epoch) / epochs_drop))
return lrate
# learning schedule callback
lschedule = LearningRateScheduler(step_decay)
# Set the callback for Tensorboard (visualization purposes)
tbCallBack = TensorBoard(log_dir=os.path.join(BASEPATH,
'tensorboard'),
histogram_freq=0,
write_graph=True, write_images=False)
#Setting EarlyStopping for rmsprop
early_stop = EarlyStopping(monitor='val_acc',
min_delta=0,
patience=60,
verbose=2,
mode='auto')
# CVS logger callback
csv = CSVLogger(os.path.join(BASEPATH,
'csv',
names['csv']),
separator=',', append=False)
# Checkpointer callback: Saves the model weights after each epoch if
# the validation loss decreased
checkpointer = ModelCheckpoint(filepath=os.path.join(BASEPATH,
'checkpoint',
names['checkpoint']),
verbose=1,
save_best_only=True)
# Set the callback list
# callbacks_list = [lschedule, tbCallBack]
callbacks_list = [tbCallBack, csv, checkpointer]
# Create Image generators
# Not using rescaling or any other data augmentation technique
train_datagen = ImageDataGenerator()
test_datagen = ImageDataGenerator()
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=BATCH_SIZE,
color_mode="grayscale",
shuffle=True,
class_mode='categorical')
test_generator = test_datagen.flow_from_directory(
test_data_dir,
target_size=(img_width, img_height),
batch_size=BATCH_SIZE,
color_mode="grayscale",
shuffle=True,
class_mode='categorical')
# Fit the model
model.fit_generator(
generator=train_generator,
samples_per_epoch=1550,
nb_epoch=EPOCHS,
callbacks=callbacks_list,
validation_data=test_generator,
nb_val_samples=nb_test_samples // BATCH_SIZE)
# Save the models
model.save_weights(os.path.join(BASEPATH, 'weights', names['weights']))
model.save(os.path.join(BASEPATH, 'model', names['model']))
|
GiraldTec/2015-igraphic | refs/heads/master | mrdoob-three.js-f73593b/utils/exporters/blender/addons/io_three/exporter/io.py | 201 | import os
import shutil
from .. import constants, logger
from . import _json
def copy_registered_textures(dest, registration):
"""Copy the registered textures to the destination (root) path
:param dest: destination directory
:param registration: registered textures
:type dest: str
:type registration: dict
"""
logger.debug("io.copy_registered_textures(%s, %s)", dest, registration)
os.makedirs(dest, exist_ok=True)
for value in registration.values():
copy(value['file_path'], dest)
def copy(src, dst):
"""Copy a file to a destination
:param src: source file
:param dst: destination file/path
"""
logger.debug("io.copy(%s, %s)" % (src, dst))
if os.path.isdir(dst):
file_name = os.path.basename(src)
dst = os.path.join(dst, file_name)
if src != dst:
shutil.copy(src, dst)
def dump(filepath, data, options=None):
"""Dump the output to disk (JSON, msgpack, etc)
:param filepath: output file path
:param data: serializable data to write to disk
:param options: (Default value = None)
:type options: dict
"""
options = options or {}
logger.debug("io.dump(%s, data, options=%s)", filepath, options)
compress = options.get(constants.COMPRESSION, constants.NONE)
if compress == constants.MSGPACK:
try:
import msgpack
except ImportError:
logger.error("msgpack module not found")
raise
logger.info("Dumping to msgpack")
func = lambda x, y: msgpack.dump(x, y)
mode = 'wb'
else:
round_off = options.get(constants.ENABLE_PRECISION)
if round_off:
_json.ROUND = options[constants.PRECISION]
else:
_json.ROUND = None
indent = options.get(constants.INDENT, True)
indent = 4 if indent else None
logger.info("Dumping to JSON")
func = lambda x, y: _json.json.dump(x, y, indent=indent)
mode = 'w'
logger.info("Writing to %s", filepath)
with open(filepath, mode=mode) as stream:
func(data, stream)
def load(filepath, options):
"""Load the contents of the file path with the correct parser
:param filepath: input file path
:param options:
:type options: dict
"""
logger.debug("io.load(%s, %s)", filepath, options)
compress = options.get(constants.COMPRESSION, constants.NONE)
if compress == constants.MSGPACK:
try:
import msgpack
except ImportError:
logger.error("msgpack module not found")
raise
module = msgpack
mode = 'rb'
else:
logger.info("Loading JSON")
module = _json.json
mode = 'r'
with open(filepath, mode=mode) as stream:
data = module.load(stream)
return data
|
idlesign/django-sitetree | refs/heads/master | sitetree/tests/testapp/admin.py | 1 | from sitetree.admin import TreeItemAdmin, TreeAdmin, override_tree_admin, override_item_admin
class CustomTreeAdmin(TreeAdmin):
pass
class CustomTreeItemAdmin(TreeItemAdmin):
pass
override_tree_admin(CustomTreeAdmin)
override_item_admin(CustomTreeItemAdmin)
|
jettify/aioodbc | refs/heads/master | setup.py | 2 | import os
import re
import sys
from setuptools import setup, find_packages
install_requires = ['pyodbc']
PY_VER = sys.version_info
if not PY_VER >= (3, 5):
raise RuntimeError("aioodbc doesn't support Python earlier than 3.5")
def read(f):
return open(os.path.join(os.path.dirname(__file__), f)).read().strip()
extras_require = {}
def read_version():
regexp = re.compile(r"^__version__\W*=\W*'([\d.abrc]+)'")
init_py = os.path.join(os.path.dirname(__file__),
'aioodbc', '__init__.py')
with open(init_py) as f:
for line in f:
match = regexp.match(line)
if match is not None:
return match.group(1)
else:
raise RuntimeError('Cannot find version in aioodbc/__init__.py')
classifiers = [
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: POSIX',
'Environment :: Web Environment',
'Development Status :: 3 - Alpha',
'Topic :: Database',
'Topic :: Database :: Front-Ends',
'Framework :: AsyncIO',
]
setup(name='aioodbc',
version=read_version(),
description=('ODBC driver for asyncio.'),
long_description='\n\n'.join((read('README.rst'), read('CHANGES.txt'))),
classifiers=classifiers,
platforms=['POSIX'],
author="Nikolay Novik",
author_email="[email protected]",
url='https://github.com/aio-libs/aioodbc',
download_url='https://pypi.python.org/pypi/aioodbc',
license='Apache 2',
packages=find_packages(),
python_requires='>=3.5',
install_requires=install_requires,
extras_require=extras_require,
include_package_data=True)
|
babble/babble | refs/heads/master | include/jython/Lib/pstats.py | 94 | """Class for printing reports on profiled python code."""
# Class for printing reports on profiled python code. rev 1.0 4/1/94
#
# Based on prior profile module by Sjoerd Mullender...
# which was hacked somewhat by: Guido van Rossum
#
# see profile.doc and profile.py for more info.
# Copyright 1994, by InfoSeek Corporation, all rights reserved.
# Written by James Roskind
#
# Permission to use, copy, modify, and distribute this Python software
# and its associated documentation for any purpose (subject to the
# restriction in the following sentence) without fee is hereby granted,
# provided that the above copyright notice appears in all copies, and
# that both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of InfoSeek not be used in
# advertising or publicity pertaining to distribution of the software
# without specific, written prior permission. This permission is
# explicitly restricted to the copying and modification of the software
# to remain in Python, compiled Python, or other languages (such as C)
# wherein the modified or derived code is exclusively imported into a
# Python module.
#
# INFOSEEK CORPORATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL INFOSEEK CORPORATION BE LIABLE FOR ANY
# SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import sys
import os
import time
import marshal
import re
__all__ = ["Stats"]
class Stats:
"""This class is used for creating reports from data generated by the
Profile class. It is a "friend" of that class, and imports data either
by direct access to members of Profile class, or by reading in a dictionary
that was emitted (via marshal) from the Profile class.
The big change from the previous Profiler (in terms of raw functionality)
is that an "add()" method has been provided to combine Stats from
several distinct profile runs. Both the constructor and the add()
method now take arbitrarily many file names as arguments.
All the print methods now take an argument that indicates how many lines
to print. If the arg is a floating point number between 0 and 1.0, then
it is taken as a decimal percentage of the available lines to be printed
(e.g., .1 means print 10% of all available lines). If it is an integer,
it is taken to mean the number of lines of data that you wish to have
printed.
The sort_stats() method now processes some additional options (i.e., in
addition to the old -1, 0, 1, or 2). It takes an arbitrary number of
quoted strings to select the sort order. For example sort_stats('time',
'name') sorts on the major key of 'internal function time', and on the
minor key of 'the name of the function'. Look at the two tables in
sort_stats() and get_sort_arg_defs(self) for more examples.
All methods return self, so you can string together commands like:
Stats('foo', 'goo').strip_dirs().sort_stats('calls').\
print_stats(5).print_callers(5)
"""
def __init__(self, *args, **kwds):
# I can't figure out how to explictly specify a stream keyword arg
# with *args:
# def __init__(self, *args, stream=sys.stdout): ...
# so I use **kwds and sqauwk if something unexpected is passed in.
self.stream = sys.stdout
if "stream" in kwds:
self.stream = kwds["stream"]
del kwds["stream"]
if kwds:
keys = kwds.keys()
keys.sort()
extras = ", ".join(["%s=%s" % (k, kwds[k]) for k in keys])
raise ValueError, "unrecognized keyword args: %s" % extras
if not len(args):
arg = None
else:
arg = args[0]
args = args[1:]
self.init(arg)
self.add(*args)
def init(self, arg):
self.all_callees = None # calc only if needed
self.files = []
self.fcn_list = None
self.total_tt = 0
self.total_calls = 0
self.prim_calls = 0
self.max_name_len = 0
self.top_level = {}
self.stats = {}
self.sort_arg_dict = {}
self.load_stats(arg)
trouble = 1
try:
self.get_top_level_stats()
trouble = 0
finally:
if trouble:
print >> self.stream, "Invalid timing data",
if self.files: print >> self.stream, self.files[-1],
print >> self.stream
def load_stats(self, arg):
if not arg: self.stats = {}
elif isinstance(arg, basestring):
f = open(arg, 'rb')
self.stats = marshal.load(f)
f.close()
try:
file_stats = os.stat(arg)
arg = time.ctime(file_stats.st_mtime) + " " + arg
except: # in case this is not unix
pass
self.files = [ arg ]
elif hasattr(arg, 'create_stats'):
arg.create_stats()
self.stats = arg.stats
arg.stats = {}
if not self.stats:
raise TypeError, "Cannot create or construct a %r object from '%r''" % (
self.__class__, arg)
return
def get_top_level_stats(self):
for func, (cc, nc, tt, ct, callers) in self.stats.items():
self.total_calls += nc
self.prim_calls += cc
self.total_tt += tt
if callers.has_key(("jprofile", 0, "profiler")):
self.top_level[func] = None
if len(func_std_string(func)) > self.max_name_len:
self.max_name_len = len(func_std_string(func))
def add(self, *arg_list):
if not arg_list: return self
if len(arg_list) > 1: self.add(*arg_list[1:])
other = arg_list[0]
if type(self) != type(other) or self.__class__ != other.__class__:
other = Stats(other)
self.files += other.files
self.total_calls += other.total_calls
self.prim_calls += other.prim_calls
self.total_tt += other.total_tt
for func in other.top_level:
self.top_level[func] = None
if self.max_name_len < other.max_name_len:
self.max_name_len = other.max_name_len
self.fcn_list = None
for func, stat in other.stats.iteritems():
if func in self.stats:
old_func_stat = self.stats[func]
else:
old_func_stat = (0, 0, 0, 0, {},)
self.stats[func] = add_func_stats(old_func_stat, stat)
return self
def dump_stats(self, filename):
"""Write the profile data to a file we know how to load back."""
f = file(filename, 'wb')
try:
marshal.dump(self.stats, f)
finally:
f.close()
# list the tuple indices and directions for sorting,
# along with some printable description
sort_arg_dict_default = {
"calls" : (((1,-1), ), "call count"),
"cumulative": (((3,-1), ), "cumulative time"),
"file" : (((4, 1), ), "file name"),
"line" : (((5, 1), ), "line number"),
"module" : (((4, 1), ), "file name"),
"name" : (((6, 1), ), "function name"),
"nfl" : (((6, 1),(4, 1),(5, 1),), "name/file/line"),
"pcalls" : (((0,-1), ), "call count"),
"stdname" : (((7, 1), ), "standard name"),
"time" : (((2,-1), ), "internal time"),
}
def get_sort_arg_defs(self):
"""Expand all abbreviations that are unique."""
if not self.sort_arg_dict:
self.sort_arg_dict = dict = {}
bad_list = {}
for word, tup in self.sort_arg_dict_default.iteritems():
fragment = word
while fragment:
if not fragment:
break
if fragment in dict:
bad_list[fragment] = 0
break
dict[fragment] = tup
fragment = fragment[:-1]
for word in bad_list:
del dict[word]
return self.sort_arg_dict
def sort_stats(self, *field):
if not field:
self.fcn_list = 0
return self
if len(field) == 1 and type(field[0]) == type(1):
# Be compatible with old profiler
field = [ {-1: "stdname",
0:"calls",
1:"time",
2: "cumulative" } [ field[0] ] ]
sort_arg_defs = self.get_sort_arg_defs()
sort_tuple = ()
self.sort_type = ""
connector = ""
for word in field:
sort_tuple = sort_tuple + sort_arg_defs[word][0]
self.sort_type += connector + sort_arg_defs[word][1]
connector = ", "
stats_list = []
for func, (cc, nc, tt, ct, callers) in self.stats.iteritems():
stats_list.append((cc, nc, tt, ct) + func +
(func_std_string(func), func))
stats_list.sort(TupleComp(sort_tuple).compare)
self.fcn_list = fcn_list = []
for tuple in stats_list:
fcn_list.append(tuple[-1])
return self
def reverse_order(self):
if self.fcn_list:
self.fcn_list.reverse()
return self
def strip_dirs(self):
oldstats = self.stats
self.stats = newstats = {}
max_name_len = 0
for func, (cc, nc, tt, ct, callers) in oldstats.iteritems():
newfunc = func_strip_path(func)
if len(func_std_string(newfunc)) > max_name_len:
max_name_len = len(func_std_string(newfunc))
newcallers = {}
for func2, caller in callers.iteritems():
newcallers[func_strip_path(func2)] = caller
if newfunc in newstats:
newstats[newfunc] = add_func_stats(
newstats[newfunc],
(cc, nc, tt, ct, newcallers))
else:
newstats[newfunc] = (cc, nc, tt, ct, newcallers)
old_top = self.top_level
self.top_level = new_top = {}
for func in old_top:
new_top[func_strip_path(func)] = None
self.max_name_len = max_name_len
self.fcn_list = None
self.all_callees = None
return self
def calc_callees(self):
if self.all_callees: return
self.all_callees = all_callees = {}
for func, (cc, nc, tt, ct, callers) in self.stats.iteritems():
if not func in all_callees:
all_callees[func] = {}
for func2, caller in callers.iteritems():
if not func2 in all_callees:
all_callees[func2] = {}
all_callees[func2][func] = caller
return
#******************************************************************
# The following functions support actual printing of reports
#******************************************************************
# Optional "amount" is either a line count, or a percentage of lines.
def eval_print_amount(self, sel, list, msg):
new_list = list
if type(sel) == type(""):
new_list = []
for func in list:
if re.search(sel, func_std_string(func)):
new_list.append(func)
else:
count = len(list)
if type(sel) == type(1.0) and 0.0 <= sel < 1.0:
count = int(count * sel + .5)
new_list = list[:count]
elif type(sel) == type(1) and 0 <= sel < count:
count = sel
new_list = list[:count]
if len(list) != len(new_list):
msg = msg + " List reduced from %r to %r due to restriction <%r>\n" % (
len(list), len(new_list), sel)
return new_list, msg
def get_print_list(self, sel_list):
width = self.max_name_len
if self.fcn_list:
list = self.fcn_list[:]
msg = " Ordered by: " + self.sort_type + '\n'
else:
list = self.stats.keys()
msg = " Random listing order was used\n"
for selection in sel_list:
list, msg = self.eval_print_amount(selection, list, msg)
count = len(list)
if not list:
return 0, list
print >> self.stream, msg
if count < len(self.stats):
width = 0
for func in list:
if len(func_std_string(func)) > width:
width = len(func_std_string(func))
return width+2, list
def print_stats(self, *amount):
for filename in self.files:
print >> self.stream, filename
if self.files: print >> self.stream
indent = ' ' * 8
for func in self.top_level:
print >> self.stream, indent, func_get_function_name(func)
print >> self.stream, indent, self.total_calls, "function calls",
if self.total_calls != self.prim_calls:
print >> self.stream, "(%d primitive calls)" % self.prim_calls,
print >> self.stream, "in %.3f CPU seconds" % self.total_tt
print >> self.stream
width, list = self.get_print_list(amount)
if list:
self.print_title()
for func in list:
self.print_line(func)
print >> self.stream
print >> self.stream
return self
def print_callees(self, *amount):
width, list = self.get_print_list(amount)
if list:
self.calc_callees()
self.print_call_heading(width, "called...")
for func in list:
if func in self.all_callees:
self.print_call_line(width, func, self.all_callees[func])
else:
self.print_call_line(width, func, {})
print >> self.stream
print >> self.stream
return self
def print_callers(self, *amount):
width, list = self.get_print_list(amount)
if list:
self.print_call_heading(width, "was called by...")
for func in list:
cc, nc, tt, ct, callers = self.stats[func]
self.print_call_line(width, func, callers, "<-")
print >> self.stream
print >> self.stream
return self
def print_call_heading(self, name_size, column_title):
print >> self.stream, "Function ".ljust(name_size) + column_title
# print sub-header only if we have new-style callers
subheader = False
for cc, nc, tt, ct, callers in self.stats.itervalues():
if callers:
value = callers.itervalues().next()
subheader = isinstance(value, tuple)
break
if subheader:
print >> self.stream, " "*name_size + " ncalls tottime cumtime"
def print_call_line(self, name_size, source, call_dict, arrow="->"):
print >> self.stream, func_std_string(source).ljust(name_size) + arrow,
if not call_dict:
print >> self.stream
return
clist = call_dict.keys()
clist.sort()
indent = ""
for func in clist:
name = func_std_string(func)
value = call_dict[func]
if isinstance(value, tuple):
nc, cc, tt, ct = value
if nc != cc:
substats = '%d/%d' % (nc, cc)
else:
substats = '%d' % (nc,)
substats = '%s %s %s %s' % (substats.rjust(7+2*len(indent)),
f8(tt), f8(ct), name)
left_width = name_size + 1
else:
substats = '%s(%r) %s' % (name, value, f8(self.stats[func][3]))
left_width = name_size + 3
print >> self.stream, indent*left_width + substats
indent = " "
def print_title(self):
print >> self.stream, ' ncalls tottime percall cumtime percall',
print >> self.stream, 'filename:lineno(function)'
def print_line(self, func): # hack : should print percentages
cc, nc, tt, ct, callers = self.stats[func]
c = str(nc)
if nc != cc:
c = c + '/' + str(cc)
print >> self.stream, c.rjust(9),
print >> self.stream, f8(tt),
if nc == 0:
print >> self.stream, ' '*8,
else:
print >> self.stream, f8(tt/nc),
print >> self.stream, f8(ct),
if cc == 0:
print >> self.stream, ' '*8,
else:
print >> self.stream, f8(ct/cc),
print >> self.stream, func_std_string(func)
class TupleComp:
"""This class provides a generic function for comparing any two tuples.
Each instance records a list of tuple-indices (from most significant
to least significant), and sort direction (ascending or decending) for
each tuple-index. The compare functions can then be used as the function
argument to the system sort() function when a list of tuples need to be
sorted in the instances order."""
def __init__(self, comp_select_list):
self.comp_select_list = comp_select_list
def compare (self, left, right):
for index, direction in self.comp_select_list:
l = left[index]
r = right[index]
if l < r:
return -direction
if l > r:
return direction
return 0
#**************************************************************************
# func_name is a triple (file:string, line:int, name:string)
def func_strip_path(func_name):
filename, line, name = func_name
return os.path.basename(filename), line, name
def func_get_function_name(func):
return func[2]
def func_std_string(func_name): # match what old profile produced
if func_name[:2] == ('~', 0):
# special case for built-in functions
name = func_name[2]
if name.startswith('<') and name.endswith('>'):
return '{%s}' % name[1:-1]
else:
return name
else:
return "%s:%d(%s)" % func_name
#**************************************************************************
# The following functions combine statists for pairs functions.
# The bulk of the processing involves correctly handling "call" lists,
# such as callers and callees.
#**************************************************************************
def add_func_stats(target, source):
"""Add together all the stats for two profile entries."""
cc, nc, tt, ct, callers = source
t_cc, t_nc, t_tt, t_ct, t_callers = target
return (cc+t_cc, nc+t_nc, tt+t_tt, ct+t_ct,
add_callers(t_callers, callers))
def add_callers(target, source):
"""Combine two caller lists in a single list."""
new_callers = {}
for func, caller in target.iteritems():
new_callers[func] = caller
for func, caller in source.iteritems():
if func in new_callers:
new_callers[func] = caller + new_callers[func]
else:
new_callers[func] = caller
return new_callers
def count_calls(callers):
"""Sum the caller statistics to get total number of calls received."""
nc = 0
for calls in callers.itervalues():
nc += calls
return nc
#**************************************************************************
# The following functions support printing of reports
#**************************************************************************
def f8(x):
return "%8.3f" % x
#**************************************************************************
# Statistics browser added by ESR, April 2001
#**************************************************************************
if __name__ == '__main__':
import cmd
try:
import readline
except ImportError:
pass
class ProfileBrowser(cmd.Cmd):
def __init__(self, profile=None):
cmd.Cmd.__init__(self)
self.prompt = "% "
if profile is not None:
self.stats = Stats(profile)
self.stream = self.stats.stream
else:
self.stats = None
self.stream = sys.stdout
def generic(self, fn, line):
args = line.split()
processed = []
for term in args:
try:
processed.append(int(term))
continue
except ValueError:
pass
try:
frac = float(term)
if frac > 1 or frac < 0:
print >> self.stream, "Fraction argument must be in [0, 1]"
continue
processed.append(frac)
continue
except ValueError:
pass
processed.append(term)
if self.stats:
getattr(self.stats, fn)(*processed)
else:
print >> self.stream, "No statistics object is loaded."
return 0
def generic_help(self):
print >> self.stream, "Arguments may be:"
print >> self.stream, "* An integer maximum number of entries to print."
print >> self.stream, "* A decimal fractional number between 0 and 1, controlling"
print >> self.stream, " what fraction of selected entries to print."
print >> self.stream, "* A regular expression; only entries with function names"
print >> self.stream, " that match it are printed."
def do_add(self, line):
self.stats.add(line)
return 0
def help_add(self):
print >> self.stream, "Add profile info from given file to current statistics object."
def do_callees(self, line):
return self.generic('print_callees', line)
def help_callees(self):
print >> self.stream, "Print callees statistics from the current stat object."
self.generic_help()
def do_callers(self, line):
return self.generic('print_callers', line)
def help_callers(self):
print >> self.stream, "Print callers statistics from the current stat object."
self.generic_help()
def do_EOF(self, line):
print >> self.stream, ""
return 1
def help_EOF(self):
print >> self.stream, "Leave the profile brower."
def do_quit(self, line):
return 1
def help_quit(self):
print >> self.stream, "Leave the profile brower."
def do_read(self, line):
if line:
try:
self.stats = Stats(line)
except IOError, args:
print >> self.stream, args[1]
return
self.prompt = line + "% "
elif len(self.prompt) > 2:
line = self.prompt[-2:]
else:
print >> self.stream, "No statistics object is current -- cannot reload."
return 0
def help_read(self):
print >> self.stream, "Read in profile data from a specified file."
def do_reverse(self, line):
self.stats.reverse_order()
return 0
def help_reverse(self):
print >> self.stream, "Reverse the sort order of the profiling report."
def do_sort(self, line):
abbrevs = self.stats.get_sort_arg_defs()
if line and not filter(lambda x,a=abbrevs: x not in a,line.split()):
self.stats.sort_stats(*line.split())
else:
print >> self.stream, "Valid sort keys (unique prefixes are accepted):"
for (key, value) in Stats.sort_arg_dict_default.iteritems():
print >> self.stream, "%s -- %s" % (key, value[1])
return 0
def help_sort(self):
print >> self.stream, "Sort profile data according to specified keys."
print >> self.stream, "(Typing `sort' without arguments lists valid keys.)"
def complete_sort(self, text, *args):
return [a for a in Stats.sort_arg_dict_default if a.startswith(text)]
def do_stats(self, line):
return self.generic('print_stats', line)
def help_stats(self):
print >> self.stream, "Print statistics from the current stat object."
self.generic_help()
def do_strip(self, line):
self.stats.strip_dirs()
return 0
def help_strip(self):
print >> self.stream, "Strip leading path information from filenames in the report."
def postcmd(self, stop, line):
if stop:
return stop
return None
import sys
if len(sys.argv) > 1:
initprofile = sys.argv[1]
else:
initprofile = None
try:
browser = ProfileBrowser(initprofile)
print >> browser.stream, "Welcome to the profile statistics browser."
browser.cmdloop()
print >> browser.stream, "Goodbye."
except KeyboardInterrupt:
pass
# That's all, folks.
|
zhenv5/tweepy | refs/heads/master | tweepy/cursor.py | 66 | # Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
from __future__ import print_function
from tweepy.error import TweepError
from tweepy.parsers import ModelParser, RawParser
class Cursor(object):
"""Pagination helper class"""
def __init__(self, method, *args, **kargs):
if hasattr(method, 'pagination_mode'):
if method.pagination_mode == 'cursor':
self.iterator = CursorIterator(method, args, kargs)
elif method.pagination_mode == 'id':
self.iterator = IdIterator(method, args, kargs)
elif method.pagination_mode == 'page':
self.iterator = PageIterator(method, args, kargs)
else:
raise TweepError('Invalid pagination mode.')
else:
raise TweepError('This method does not perform pagination')
def pages(self, limit=0):
"""Return iterator for pages"""
if limit > 0:
self.iterator.limit = limit
return self.iterator
def items(self, limit=0):
"""Return iterator for items in each page"""
i = ItemIterator(self.iterator)
i.limit = limit
return i
class BaseIterator(object):
def __init__(self, method, args, kargs):
self.method = method
self.args = args
self.kargs = kargs
self.limit = 0
def __next__(self):
return self.next()
def next(self):
raise NotImplementedError
def prev(self):
raise NotImplementedError
def __iter__(self):
return self
class CursorIterator(BaseIterator):
def __init__(self, method, args, kargs):
BaseIterator.__init__(self, method, args, kargs)
start_cursor = kargs.pop('cursor', None)
self.next_cursor = start_cursor or -1
self.prev_cursor = start_cursor or 0
self.num_tweets = 0
def next(self):
if self.next_cursor == 0 or (self.limit and self.num_tweets == self.limit):
raise StopIteration
data, cursors = self.method(cursor=self.next_cursor,
*self.args,
**self.kargs)
self.prev_cursor, self.next_cursor = cursors
if len(data) == 0:
raise StopIteration
self.num_tweets += 1
return data
def prev(self):
if self.prev_cursor == 0:
raise TweepError('Can not page back more, at first page')
data, self.next_cursor, self.prev_cursor = self.method(cursor=self.prev_cursor,
*self.args,
**self.kargs)
self.num_tweets -= 1
return data
class IdIterator(BaseIterator):
def __init__(self, method, args, kargs):
BaseIterator.__init__(self, method, args, kargs)
self.max_id = kargs.pop('max_id', None)
self.num_tweets = 0
self.results = []
self.model_results = []
self.index = 0
def next(self):
"""Fetch a set of items with IDs less than current set."""
if self.limit and self.limit == self.num_tweets:
raise StopIteration
if self.index >= len(self.results) - 1:
data = self.method(max_id=self.max_id, parser=RawParser(), *self.args, **self.kargs)
if hasattr(self.method, '__self__'):
old_parser = self.method.__self__.parser
# Hack for models which expect ModelParser to be set
self.method.__self__.parser = ModelParser()
# This is a special invocation that returns the underlying
# APIMethod class
model = ModelParser().parse(self.method(create=True), data)
if hasattr(self.method, '__self__'):
self.method.__self__.parser = old_parser
result = self.method.__self__.parser.parse(self.method(create=True), data)
else:
result = model
if len(self.results) != 0:
self.index += 1
self.results.append(result)
self.model_results.append(model)
else:
self.index += 1
result = self.results[self.index]
model = self.model_results[self.index]
if len(result) == 0:
raise StopIteration
# TODO: Make this not dependant on the parser making max_id and
# since_id available
self.max_id = model.max_id
self.num_tweets += 1
return result
def prev(self):
"""Fetch a set of items with IDs greater than current set."""
if self.limit and self.limit == self.num_tweets:
raise StopIteration
self.index -= 1
if self.index < 0:
# There's no way to fetch a set of tweets directly 'above' the
# current set
raise StopIteration
data = self.results[self.index]
self.max_id = self.model_results[self.index].max_id
self.num_tweets += 1
return data
class PageIterator(BaseIterator):
def __init__(self, method, args, kargs):
BaseIterator.__init__(self, method, args, kargs)
self.current_page = 0
def next(self):
if self.limit > 0:
if self.current_page > self.limit:
raise StopIteration
items = self.method(page=self.current_page, *self.args, **self.kargs)
if len(items) == 0:
raise StopIteration
self.current_page += 1
return items
def prev(self):
if self.current_page == 1:
raise TweepError('Can not page back more, at first page')
self.current_page -= 1
return self.method(page=self.current_page, *self.args, **self.kargs)
class ItemIterator(BaseIterator):
def __init__(self, page_iterator):
self.page_iterator = page_iterator
self.limit = 0
self.current_page = None
self.page_index = -1
self.num_tweets = 0
def next(self):
if self.limit > 0:
if self.num_tweets == self.limit:
raise StopIteration
if self.current_page is None or self.page_index == len(self.current_page) - 1:
# Reached end of current page, get the next page...
self.current_page = self.page_iterator.next()
self.page_index = -1
self.page_index += 1
self.num_tweets += 1
return self.current_page[self.page_index]
def prev(self):
if self.current_page is None:
raise TweepError('Can not go back more, at first page')
if self.page_index == 0:
# At the beginning of the current page, move to next...
self.current_page = self.page_iterator.prev()
self.page_index = len(self.current_page)
if self.page_index == 0:
raise TweepError('No more items')
self.page_index -= 1
self.num_tweets -= 1
return self.current_page[self.page_index]
|
2ndy/RaspIM | refs/heads/master | usr/lib/python2.7/dummy_thread.py | 326 | """Drop-in replacement for the thread module.
Meant to be used as a brain-dead substitute so that threaded code does
not need to be rewritten for when the thread module is not present.
Suggested usage is::
try:
import thread
except ImportError:
import dummy_thread as thread
"""
# Exports only things specified by thread documentation;
# skipping obsolete synonyms allocate(), start_new(), exit_thread().
__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
'interrupt_main', 'LockType']
import traceback as _traceback
class error(Exception):
"""Dummy implementation of thread.error."""
def __init__(self, *args):
self.args = args
def start_new_thread(function, args, kwargs={}):
"""Dummy implementation of thread.start_new_thread().
Compatibility is maintained by making sure that ``args`` is a
tuple and ``kwargs`` is a dictionary. If an exception is raised
and it is SystemExit (which can be done by thread.exit()) it is
caught and nothing is done; all other exceptions are printed out
by using traceback.print_exc().
If the executed function calls interrupt_main the KeyboardInterrupt will be
raised when the function returns.
"""
if type(args) != type(tuple()):
raise TypeError("2nd arg must be a tuple")
if type(kwargs) != type(dict()):
raise TypeError("3rd arg must be a dict")
global _main
_main = False
try:
function(*args, **kwargs)
except SystemExit:
pass
except:
_traceback.print_exc()
_main = True
global _interrupt
if _interrupt:
_interrupt = False
raise KeyboardInterrupt
def exit():
"""Dummy implementation of thread.exit()."""
raise SystemExit
def get_ident():
"""Dummy implementation of thread.get_ident().
Since this module should only be used when threadmodule is not
available, it is safe to assume that the current process is the
only thread. Thus a constant can be safely returned.
"""
return -1
def allocate_lock():
"""Dummy implementation of thread.allocate_lock()."""
return LockType()
def stack_size(size=None):
"""Dummy implementation of thread.stack_size()."""
if size is not None:
raise error("setting thread stack size not supported")
return 0
class LockType(object):
"""Class implementing dummy implementation of thread.LockType.
Compatibility is maintained by maintaining self.locked_status
which is a boolean that stores the state of the lock. Pickling of
the lock, though, should not be done since if the thread module is
then used with an unpickled ``lock()`` from here problems could
occur from this class not having atomic methods.
"""
def __init__(self):
self.locked_status = False
def acquire(self, waitflag=None):
"""Dummy implementation of acquire().
For blocking calls, self.locked_status is automatically set to
True and returned appropriately based on value of
``waitflag``. If it is non-blocking, then the value is
actually checked and not set if it is already acquired. This
is all done so that threading.Condition's assert statements
aren't triggered and throw a little fit.
"""
if waitflag is None or waitflag:
self.locked_status = True
return True
else:
if not self.locked_status:
self.locked_status = True
return True
else:
return False
__enter__ = acquire
def __exit__(self, typ, val, tb):
self.release()
def release(self):
"""Release the dummy lock."""
# XXX Perhaps shouldn't actually bother to test? Could lead
# to problems for complex, threaded code.
if not self.locked_status:
raise error
self.locked_status = False
return True
def locked(self):
return self.locked_status
# Used to signal that interrupt_main was called in a "thread"
_interrupt = False
# True when not executing in a "thread"
_main = True
def interrupt_main():
"""Set _interrupt flag to True to have start_new_thread raise
KeyboardInterrupt upon exiting."""
if _main:
raise KeyboardInterrupt
else:
global _interrupt
_interrupt = True
|
Workday/OpenFrame | refs/heads/master | tools/telemetry/telemetry/value/__init__.py | 23 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
The Value hierarchy provides a way of representing the values measurements
produce such that they can be merged across runs, grouped by page, and output
to different targets.
The core Value concept provides the basic functionality:
- association with a page, may be none
- naming and units
- importance tracking [whether a value will show up on a waterfall or output
file by default]
- other metadata, such as a description of what was measured
- default conversion to scalar and string
- merging properties
A page may actually run a few times during a single telemetry session.
Downstream consumers of test results typically want to group these runs
together, then compute summary statistics across runs. Value provides the
Merge* family of methods for this kind of aggregation.
"""
import os
from telemetry.core import discover
from telemetry.core import util
# When combining a pair of Values togehter, it is sometimes ambiguous whether
# the values should be concatenated, or one should be picked as representative.
# The possible merging policies are listed here.
CONCATENATE = 'concatenate'
PICK_FIRST = 'pick-first'
# When converting a Value to its buildbot equivalent, the context in which the
# value is being interpreted actually affects the conversion. This is insane,
# but there you have it. There are three contexts in which Values are converted
# for use by buildbot, represented by these output-intent values.
PER_PAGE_RESULT_OUTPUT_CONTEXT = 'per-page-result-output-context'
COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT = 'merged-pages-result-output-context'
SUMMARY_RESULT_OUTPUT_CONTEXT = 'summary-result-output-context'
class Value(object):
"""An abstract value produced by a telemetry page test.
"""
def __init__(self, page, name, units, important, description,
tir_label):
"""A generic Value object.
Args:
page: A Page object, may be given as None to indicate that the value
represents results for multiple pages.
name: A value name string, may contain a dot. Values from the same test
with the same prefix before the dot may be considered to belong to
the same chart.
units: A units string.
important: Whether the value is "important". Causes the value to appear
by default in downstream UIs.
description: A string explaining in human-understandable terms what this
value represents.
tir_label: The string label of the TimelineInteractionRecord with
which this value is associated.
"""
# TODO(eakuefner): Check story here after migration (crbug.com/442036)
if not isinstance(name, basestring):
raise ValueError('name field of Value must be string.')
if not isinstance(units, basestring):
raise ValueError('units field of Value must be string.')
if not isinstance(important, bool):
raise ValueError('important field of Value must be bool.')
if not ((description is None) or isinstance(description, basestring)):
raise ValueError('description field of Value must absent or string.')
if not ((tir_label is None) or
isinstance(tir_label, basestring)):
raise ValueError('tir_label field of Value must absent or '
'string.')
self.page = page
self.name = name
self.units = units
self.important = important
self.description = description
self.tir_label = tir_label
def __eq__(self, other):
return hash(self) == hash(other)
def __hash__(self):
return hash(str(self))
def IsMergableWith(self, that):
return (self.units == that.units and
type(self) == type(that) and
self.important == that.important)
@classmethod
def MergeLikeValuesFromSamePage(cls, values):
"""Combines the provided list of values into a single compound value.
When a page runs multiple times, it may produce multiple values. This
function is given the same-named values across the multiple runs, and has
the responsibility of producing a single result.
It must return a single Value. If merging does not make sense, the
implementation must pick a representative value from one of the runs.
For instance, it may be given
[ScalarValue(page, 'a', 1), ScalarValue(page, 'a', 2)]
and it might produce
ListOfScalarValues(page, 'a', [1, 2])
"""
raise NotImplementedError()
@classmethod
def MergeLikeValuesFromDifferentPages(cls, values):
"""Combines the provided values into a single compound value.
When a full pageset runs, a single value_name will usually end up getting
collected for multiple pages. For instance, we may end up with
[ScalarValue(page1, 'a', 1),
ScalarValue(page2, 'a', 2)]
This function takes in the values of the same name, but across multiple
pages, and produces a single summary result value. In this instance, it
could produce a ScalarValue(None, 'a', 1.5) to indicate averaging, or even
ListOfScalarValues(None, 'a', [1, 2]) if concatenated output was desired.
Some results are so specific to a page that they make no sense when
aggregated across pages. If merging values of this type across pages is
non-sensical, this method may return None.
"""
raise NotImplementedError()
def _IsImportantGivenOutputIntent(self, output_context):
if output_context == PER_PAGE_RESULT_OUTPUT_CONTEXT:
return False
elif output_context == COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT:
return self.important
elif output_context == SUMMARY_RESULT_OUTPUT_CONTEXT:
return self.important
def GetBuildbotDataType(self, output_context):
"""Returns the buildbot's equivalent data_type.
This should be one of the values accepted by perf_tests_results_helper.py.
"""
raise NotImplementedError()
def GetBuildbotValue(self):
"""Returns the buildbot's equivalent value."""
raise NotImplementedError()
def GetChartAndTraceNameForPerPageResult(self):
chart_name, _ = _ConvertValueNameToChartAndTraceName(self.name)
trace_name = self.page.display_name
return chart_name, trace_name
@property
def name_suffix(self):
"""Returns the string after a . in the name, or the full name otherwise."""
if '.' in self.name:
return self.name.split('.', 1)[1]
else:
return self.name
def GetChartAndTraceNameForComputedSummaryResult(
self, trace_tag):
chart_name, trace_name = (
_ConvertValueNameToChartAndTraceName(self.name))
if trace_tag:
return chart_name, trace_name + trace_tag
else:
return chart_name, trace_name
def GetRepresentativeNumber(self):
"""Gets a single scalar value that best-represents this value.
Returns None if not possible.
"""
raise NotImplementedError()
def GetRepresentativeString(self):
"""Gets a string value that best-represents this value.
Returns None if not possible.
"""
raise NotImplementedError()
@staticmethod
def GetJSONTypeName():
"""Gets the typename for serialization to JSON using AsDict."""
raise NotImplementedError()
def AsDict(self):
"""Pre-serializes a value to a dict for output as JSON."""
return self._AsDictImpl()
def _AsDictImpl(self):
d = {
'name': self.name,
'type': self.GetJSONTypeName(),
'units': self.units,
'important': self.important
}
if self.description:
d['description'] = self.description
if self.tir_label:
d['tir_label'] = self.tir_label
if self.page:
d['page_id'] = self.page.id
return d
def AsDictWithoutBaseClassEntries(self):
full_dict = self.AsDict()
base_dict_keys = set(self._AsDictImpl().keys())
# Extracts only entries added by the subclass.
return dict([(k, v) for (k, v) in full_dict.iteritems()
if k not in base_dict_keys])
@staticmethod
def FromDict(value_dict, page_dict):
"""Produces a value from a value dict and a page dict.
Value dicts are produced by serialization to JSON, and must be accompanied
by a dict mapping page IDs to pages, also produced by serialization, in
order to be completely deserialized. If deserializing multiple values, use
ListOfValuesFromListOfDicts instead.
value_dict: a dictionary produced by AsDict() on a value subclass.
page_dict: a dictionary mapping IDs to page objects.
"""
return Value.ListOfValuesFromListOfDicts([value_dict], page_dict)[0]
@staticmethod
def ListOfValuesFromListOfDicts(value_dicts, page_dict):
"""Takes a list of value dicts to values.
Given a list of value dicts produced by AsDict, this method
deserializes the dicts given a dict mapping page IDs to pages.
This method performs memoization for deserializing a list of values
efficiently, where FromDict is meant to handle one-offs.
values: a list of value dicts produced by AsDict() on a value subclass.
page_dict: a dictionary mapping IDs to page objects.
"""
value_dir = os.path.dirname(__file__)
value_classes = discover.DiscoverClasses(
value_dir, util.GetTelemetryDir(),
Value, index_by_class_name=True)
value_json_types = dict((value_classes[x].GetJSONTypeName(), x) for x in
value_classes)
values = []
for value_dict in value_dicts:
value_class = value_classes[value_json_types[value_dict['type']]]
assert 'FromDict' in value_class.__dict__, \
'Subclass doesn\'t override FromDict'
values.append(value_class.FromDict(value_dict, page_dict))
return values
@staticmethod
def GetConstructorKwArgs(value_dict, page_dict):
"""Produces constructor arguments from a value dict and a page dict.
Takes a dict parsed from JSON and an index of pages and recovers the
keyword arguments to be passed to the constructor for deserializing the
dict.
value_dict: a dictionary produced by AsDict() on a value subclass.
page_dict: a dictionary mapping IDs to page objects.
"""
d = {
'name': value_dict['name'],
'units': value_dict['units']
}
description = value_dict.get('description', None)
if description:
d['description'] = description
else:
d['description'] = None
page_id = value_dict.get('page_id', None)
if page_id:
d['page'] = page_dict[int(page_id)]
else:
d['page'] = None
d['important'] = False
tir_label = value_dict.get('tir_label', None)
if tir_label:
d['tir_label'] = tir_label
else:
d['tir_label'] = None
return d
def ValueNameFromTraceAndChartName(trace_name, chart_name=None):
"""Mangles a trace name plus optional chart name into a standard string.
A value might just be a bareword name, e.g. numPixels. In that case, its
chart may be None.
But, a value might also be intended for display with other values, in which
case the chart name indicates that grouping. So, you might have
screen.numPixels, screen.resolution, where chartName='screen'.
"""
assert trace_name != 'url', 'The name url cannot be used'
if chart_name:
return '%s.%s' % (chart_name, trace_name)
else:
assert '.' not in trace_name, ('Trace names cannot contain "." with an '
'empty chart_name since this is used to delimit chart_name.trace_name.')
return trace_name
def _ConvertValueNameToChartAndTraceName(value_name):
"""Converts a value_name into the equivalent chart-trace name pair.
Buildbot represents values by the measurement name and an optional trace name,
whereas telemetry represents values with a chart_name.trace_name convention,
where chart_name is optional. This convention is also used by chart_json.
This converts from the telemetry convention to the buildbot convention,
returning a 2-tuple (measurement_name, trace_name).
"""
if '.' in value_name:
return value_name.split('.', 1)
else:
return value_name, value_name
|
themrmax/scikit-learn | refs/heads/master | sklearn/cluster/mean_shift_.py | 5 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Martino Sorbaro <[email protected]>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0,
n_jobs=1):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
X = check_array(X)
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile),
n_jobs=n_jobs)
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (np.linalg.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
.. versionadded:: 0.17
Parallel Execution using *n_jobs*.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_mean_shift.py for an example.
"""
if bandwidth is None:
bandwidth = estimate_bandwidth(X, n_jobs=n_jobs)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=n_jobs).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth,
n_jobs=n_jobs).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1, n_jobs=n_jobs).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will tend
towards O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
|
Ms2ger/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/wptserve/tests/functional/test_cookies.py | 109 | import unittest
import wptserve
from .base import TestUsingServer
class TestResponseSetCookie(TestUsingServer):
def test_name_value(self):
@wptserve.handlers.handler
def handler(request, response):
response.set_cookie("name", "value")
return "Test"
route = ("GET", "/test/name_value", handler)
self.server.router.register(*route)
resp = self.request(route[1])
self.assertEqual(resp.info()["Set-Cookie"], "name=value; Path=/")
def test_unset(self):
@wptserve.handlers.handler
def handler(request, response):
response.set_cookie("name", "value")
response.unset_cookie("name")
return "Test"
route = ("GET", "/test/unset", handler)
self.server.router.register(*route)
resp = self.request(route[1])
self.assertTrue("Set-Cookie" not in resp.info())
def test_delete(self):
@wptserve.handlers.handler
def handler(request, response):
response.delete_cookie("name")
return "Test"
route = ("GET", "/test/delete", handler)
self.server.router.register(*route)
resp = self.request(route[1])
parts = dict(item.split("=") for
item in resp.info()["Set-Cookie"].split("; ") if item)
self.assertEqual(parts["name"], "")
self.assertEqual(parts["Path"], "/")
#Should also check that expires is in the past
class TestRequestCookies(TestUsingServer):
def test_set_cookie(self):
@wptserve.handlers.handler
def handler(request, response):
return request.cookies["name"].value
route = ("GET", "/test/set_cookie", handler)
self.server.router.register(*route)
resp = self.request(route[1], headers={"Cookie": "name=value"})
self.assertEqual(resp.read(), b"value")
if __name__ == '__main__':
unittest.main()
|
saurabhbajaj207/CarpeDiem | refs/heads/master | venv/Lib/site-packages/pip/commands/wheel.py | 341 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import os
import warnings
from pip.basecommand import RequirementCommand
from pip.exceptions import CommandError, PreviousBuildDirError
from pip.req import RequirementSet
from pip.utils import import_or_raise
from pip.utils.build import BuildDirectory
from pip.utils.deprecation import RemovedInPip10Warning
from pip.wheel import WheelCache, WheelBuilder
from pip import cmdoptions
logger = logging.getLogger(__name__)
class WheelCommand(RequirementCommand):
"""
Build Wheel archives for your requirements and dependencies.
Wheel is a built-package format, and offers the advantage of not
recompiling your software during every install. For more details, see the
wheel docs: https://wheel.readthedocs.io/en/latest/
Requirements: setuptools>=0.8, and wheel.
'pip wheel' uses the bdist_wheel setuptools extension from the wheel
package to build individual wheels.
"""
name = 'wheel'
usage = """
%prog [options] <requirement specifier> ...
%prog [options] -r <requirements file> ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Build wheels from your requirements.'
def __init__(self, *args, **kw):
super(WheelCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-w', '--wheel-dir',
dest='wheel_dir',
metavar='dir',
default=os.curdir,
help=("Build wheels into <dir>, where the default is the "
"current working directory."),
)
cmd_opts.add_option(cmdoptions.use_wheel())
cmd_opts.add_option(cmdoptions.no_use_wheel())
cmd_opts.add_option(cmdoptions.no_binary())
cmd_opts.add_option(cmdoptions.only_binary())
cmd_opts.add_option(
'--build-option',
dest='build_options',
metavar='options',
action='append',
help="Extra arguments to be supplied to 'setup.py bdist_wheel'.")
cmd_opts.add_option(cmdoptions.constraints())
cmd_opts.add_option(cmdoptions.editable())
cmd_opts.add_option(cmdoptions.requirements())
cmd_opts.add_option(cmdoptions.src())
cmd_opts.add_option(cmdoptions.ignore_requires_python())
cmd_opts.add_option(cmdoptions.no_deps())
cmd_opts.add_option(cmdoptions.build_dir())
cmd_opts.add_option(
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the 'bdist_wheel' command.")
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
cmd_opts.add_option(cmdoptions.no_clean())
cmd_opts.add_option(cmdoptions.require_hashes())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def check_required_packages(self):
import_or_raise(
'wheel.bdist_wheel',
CommandError,
"'pip wheel' requires the 'wheel' package. To fix this, run: "
"pip install wheel"
)
pkg_resources = import_or_raise(
'pkg_resources',
CommandError,
"'pip wheel' requires setuptools >= 0.8 for dist-info support."
" To fix this, run: pip install --upgrade setuptools"
)
if not hasattr(pkg_resources, 'DistInfoDistribution'):
raise CommandError(
"'pip wheel' requires setuptools >= 0.8 for dist-info "
"support. To fix this, run: pip install --upgrade "
"setuptools"
)
def run(self, options, args):
self.check_required_packages()
cmdoptions.resolve_wheel_no_use_binary(options)
cmdoptions.check_install_build_global(options)
if options.allow_external:
warnings.warn(
"--allow-external has been deprecated and will be removed in "
"the future. Due to changes in the repository protocol, it no "
"longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_all_external:
warnings.warn(
"--allow-all-external has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_unverified:
warnings.warn(
"--allow-unverified has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.debug('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
with self._build_session(options) as session:
finder = self._build_package_finder(options, session)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
with BuildDirectory(options.build_dir,
delete=build_delete) as build_dir:
requirement_set = RequirementSet(
build_dir=build_dir,
src_dir=options.src_dir,
download_dir=None,
ignore_dependencies=options.ignore_dependencies,
ignore_installed=True,
ignore_requires_python=options.ignore_requires_python,
isolated=options.isolated_mode,
session=session,
wheel_cache=wheel_cache,
wheel_download_dir=options.wheel_dir,
require_hashes=options.require_hashes
)
self.populate_requirement_set(
requirement_set, args, options, finder, session, self.name,
wheel_cache
)
if not requirement_set.has_requirements:
return
try:
# build wheels
wb = WheelBuilder(
requirement_set,
finder,
build_options=options.build_options or [],
global_options=options.global_options or [],
)
if not wb.build():
raise CommandError(
"Failed to build one or more wheels"
)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
if not options.no_clean:
requirement_set.cleanup_files()
|
concentricsky/django-allauth | refs/heads/master | allauth/socialaccount/providers/linkedin/provider.py | 68 | from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth.provider import OAuthProvider
from allauth.socialaccount import app_settings
class LinkedInAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get('public-profile-url')
def get_avatar_url(self):
# try to return the higher res picture-urls::(original) first
try:
if self.account.extra_data.get('picture-urls', {}).get('picture-url'):
return self.account.extra_data.get('picture-urls', {}).get('picture-url')
except:
pass # if we can't get higher res for any reason, we'll just return the low res
return self.account.extra_data.get('picture-url')
def to_str(self):
dflt = super(LinkedInAccount, self).to_str()
name = self.account.extra_data.get('name', dflt)
first_name = self.account.extra_data.get('first-name', None)
last_name = self.account.extra_data.get('last-name', None)
if first_name and last_name:
name = first_name+' '+last_name
return name
class LinkedInProvider(OAuthProvider):
id = 'linkedin'
name = 'LinkedIn'
package = 'allauth.socialaccount.providers.linkedin'
account_class = LinkedInAccount
def get_default_scope(self):
scope = []
if app_settings.QUERY_EMAIL:
scope.append('r_emailaddress')
return scope
def get_profile_fields(self):
default_fields = ['id',
'first-name',
'last-name',
'email-address',
'picture-url',
'picture-urls::(original)', # picture-urls::(original) is higher res
'public-profile-url']
fields = self.get_settings().get('PROFILE_FIELDS',
default_fields)
return fields
def extract_uid(self, data):
return data['id']
def extract_common_fields(self, data):
return dict(email=data.get('email-address'),
first_name=data.get('first-name'),
last_name=data.get('last-name'))
providers.registry.register(LinkedInProvider)
|
wimnat/ansible | refs/heads/devel | test/lib/ansible_test/_data/sanity/pylint/plugins/deprecated.py | 5 | # (c) 2018, Matt Martz <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import re
from distutils.version import LooseVersion
import astroid
from pylint.interfaces import IAstroidChecker
from pylint.checkers import BaseChecker
from pylint.checkers.utils import check_messages
from ansible.module_utils.six import string_types
from ansible.release import __version__ as ansible_version_raw
from ansible.utils.version import SemanticVersion
MSGS = {
'E9501': ("Deprecated version (%r) found in call to Display.deprecated "
"or AnsibleModule.deprecate",
"ansible-deprecated-version",
"Used when a call to Display.deprecated specifies a version "
"less than or equal to the current version of Ansible",
{'minversion': (2, 6)}),
'E9502': ("Display.deprecated call without a version or date",
"ansible-deprecated-no-version",
"Used when a call to Display.deprecated does not specify a "
"version or date",
{'minversion': (2, 6)}),
'E9503': ("Invalid deprecated version (%r) found in call to "
"Display.deprecated or AnsibleModule.deprecate",
"ansible-invalid-deprecated-version",
"Used when a call to Display.deprecated specifies an invalid "
"Ansible version number",
{'minversion': (2, 6)}),
'E9504': ("Deprecated version (%r) found in call to Display.deprecated "
"or AnsibleModule.deprecate",
"collection-deprecated-version",
"Used when a call to Display.deprecated specifies a collection "
"version less than or equal to the current version of this "
"collection",
{'minversion': (2, 6)}),
'E9505': ("Invalid deprecated version (%r) found in call to "
"Display.deprecated or AnsibleModule.deprecate",
"collection-invalid-deprecated-version",
"Used when a call to Display.deprecated specifies an invalid "
"collection version number",
{'minversion': (2, 6)}),
'E9506': ("No collection name found in call to Display.deprecated or "
"AnsibleModule.deprecate",
"ansible-deprecated-no-collection-name",
"The current collection name in format `namespace.name` must "
"be provided as collection_name when calling Display.deprecated "
"or AnsibleModule.deprecate (`ansible.builtin` for ansible-core)",
{'minversion': (2, 6)}),
'E9507': ("Wrong collection name (%r) found in call to "
"Display.deprecated or AnsibleModule.deprecate",
"wrong-collection-deprecated",
"The name of the current collection must be passed to the "
"Display.deprecated resp. AnsibleModule.deprecate calls "
"(`ansible.builtin` for ansible-core)",
{'minversion': (2, 6)}),
'E9508': ("Expired date (%r) found in call to Display.deprecated "
"or AnsibleModule.deprecate",
"ansible-deprecated-date",
"Used when a call to Display.deprecated specifies a date "
"before today",
{'minversion': (2, 6)}),
'E9509': ("Invalid deprecated date (%r) found in call to "
"Display.deprecated or AnsibleModule.deprecate",
"ansible-invalid-deprecated-date",
"Used when a call to Display.deprecated specifies an invalid "
"date. It must be a string in format `YYYY-MM-DD` (ISO 8601)",
{'minversion': (2, 6)}),
'E9510': ("Both version and date found in call to "
"Display.deprecated or AnsibleModule.deprecate",
"ansible-deprecated-both-version-and-date",
"Only one of version and date must be specified",
{'minversion': (2, 6)}),
'E9511': ("Removal version (%r) must be a major release, not a minor or "
"patch release (see the specification at https://semver.org/)",
"removal-version-must-be-major",
"Used when a call to Display.deprecated or "
"AnsibleModule.deprecate for a collection specifies a version "
"which is not of the form x.0.0",
{'minversion': (2, 6)}),
}
ANSIBLE_VERSION = LooseVersion('.'.join(ansible_version_raw.split('.')[:3]))
def _get_expr_name(node):
"""Funciton to get either ``attrname`` or ``name`` from ``node.func.expr``
Created specifically for the case of ``display.deprecated`` or ``self._display.deprecated``
"""
try:
return node.func.expr.attrname
except AttributeError:
# If this fails too, we'll let it raise, the caller should catch it
return node.func.expr.name
def parse_isodate(value):
msg = 'Expected ISO 8601 date string (YYYY-MM-DD)'
if not isinstance(value, string_types):
raise ValueError(msg)
# From Python 3.7 in, there is datetime.date.fromisoformat(). For older versions,
# we have to do things manually.
if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', value):
raise ValueError(msg)
try:
return datetime.datetime.strptime(value, '%Y-%m-%d').date()
except ValueError:
raise ValueError(msg)
class AnsibleDeprecatedChecker(BaseChecker):
"""Checks for Display.deprecated calls to ensure that the ``version``
has not passed or met the time for removal
"""
__implements__ = (IAstroidChecker,)
name = 'deprecated'
msgs = MSGS
options = (
('collection-name', {
'default': None,
'type': 'string',
'metavar': '<name>',
'help': 'The collection\'s name used to check collection names in deprecations.',
}),
('collection-version', {
'default': None,
'type': 'string',
'metavar': '<version>',
'help': 'The collection\'s version number used to check deprecations.',
}),
)
def __init__(self, *args, **kwargs):
self.collection_version = None
self.collection_name = None
super(AnsibleDeprecatedChecker, self).__init__(*args, **kwargs)
def set_option(self, optname, value, action=None, optdict=None):
super(AnsibleDeprecatedChecker, self).set_option(optname, value, action, optdict)
if optname == 'collection-version' and value is not None:
self.collection_version = SemanticVersion(self.config.collection_version)
if optname == 'collection-name' and value is not None:
self.collection_name = self.config.collection_name
def _check_date(self, node, date):
if not isinstance(date, str):
self.add_message('invalid-date', node=node, args=(date,))
return
try:
date_parsed = parse_isodate(date)
except ValueError:
self.add_message('ansible-invalid-deprecated-date', node=node, args=(date,))
return
if date_parsed < datetime.date.today():
self.add_message('ansible-deprecated-date', node=node, args=(date,))
def _check_version(self, node, version, collection_name):
if not isinstance(version, (str, float)):
self.add_message('invalid-version', node=node, args=(version,))
return
version_no = str(version)
if collection_name == 'ansible.builtin':
# Ansible-base
try:
if not version_no:
raise ValueError('Version string should not be empty')
loose_version = LooseVersion(str(version_no))
if ANSIBLE_VERSION >= loose_version:
self.add_message('ansible-deprecated-version', node=node, args=(version,))
except ValueError:
self.add_message('ansible-invalid-deprecated-version', node=node, args=(version,))
elif collection_name:
# Collections
try:
if not version_no:
raise ValueError('Version string should not be empty')
semantic_version = SemanticVersion(version_no)
if collection_name == self.collection_name and self.collection_version is not None:
if self.collection_version >= semantic_version:
self.add_message('collection-deprecated-version', node=node, args=(version,))
if semantic_version.major != 0 and (semantic_version.minor != 0 or semantic_version.patch != 0):
self.add_message('removal-version-must-be-major', node=node, args=(version,))
except ValueError:
self.add_message('collection-invalid-deprecated-version', node=node, args=(version,))
@check_messages(*(MSGS.keys()))
def visit_call(self, node):
version = None
date = None
collection_name = None
try:
if (node.func.attrname == 'deprecated' and 'display' in _get_expr_name(node) or
node.func.attrname == 'deprecate' and _get_expr_name(node)):
if node.keywords:
for keyword in node.keywords:
if len(node.keywords) == 1 and keyword.arg is None:
# This is likely a **kwargs splat
return
if keyword.arg == 'version':
if isinstance(keyword.value.value, astroid.Name):
# This is likely a variable
return
version = keyword.value.value
if keyword.arg == 'date':
if isinstance(keyword.value.value, astroid.Name):
# This is likely a variable
return
date = keyword.value.value
if keyword.arg == 'collection_name':
if isinstance(keyword.value.value, astroid.Name):
# This is likely a variable
return
collection_name = keyword.value.value
if not version and not date:
try:
version = node.args[1].value
except IndexError:
self.add_message('ansible-deprecated-no-version', node=node)
return
if version and date:
self.add_message('ansible-deprecated-both-version-and-date', node=node)
if collection_name:
this_collection = collection_name == (self.collection_name or 'ansible.builtin')
if not this_collection:
self.add_message('wrong-collection-deprecated', node=node, args=(collection_name,))
else:
self.add_message('ansible-deprecated-no-collection-name', node=node)
if date:
self._check_date(node, date)
elif version:
self._check_version(node, version, collection_name)
except AttributeError:
# Not the type of node we are interested in
pass
def register(linter):
"""required method to auto register this checker """
linter.register_checker(AnsibleDeprecatedChecker(linter))
|
girving/tensorflow | refs/heads/master | tensorflow/contrib/summary/summary_ops_graph_test.py | 15 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import time
import six
from tensorflow.contrib.summary import summary_test_util
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2 as summary_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import training_util
get_all = summary_test_util.get_all
class GraphFileTest(test_util.TensorFlowTestCase):
def testSummaryOps(self):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir, max_queue=0)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.generic('tensor', 1, step=1)
summary_ops.scalar('scalar', 2.0, step=1)
summary_ops.histogram('histogram', [1.0], step=1)
summary_ops.image('image', [[[[1.0]]]], step=1)
summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
sess.run(summary_ops.all_summary_ops())
# The working condition of the ops is tested in the C++ test so we just
# test here that we're calling them correctly.
self.assertTrue(gfile.Exists(logdir))
def testSummaryName(self):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir, max_queue=0)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
sess.run(summary_ops.all_summary_ops())
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual('scalar', events[1].summary.value[0].tag)
def testSummaryNameScope(self):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir, max_queue=0)
with writer.as_default(), summary_ops.always_record_summaries():
with ops.name_scope('scope'):
summary_ops.scalar('scalar', 2.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
sess.run(summary_ops.all_summary_ops())
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual('scope/scalar', events[1].summary.value[0].tag)
def testSummaryGlobalStep(self):
training_util.get_or_create_global_step()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir, max_queue=0)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(summary_ops.summary_writer_initializer_op())
step, _ = sess.run(
[training_util.get_global_step(), summary_ops.all_summary_ops()])
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(step, events[1].step)
def testMaxQueue(self):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(
logdir, max_queue=1, flush_millis=999999)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
# Note: First tf.Event is always file_version.
self.assertEqual(1, get_total())
sess.run(summary_ops.all_summary_ops())
self.assertEqual(1, get_total())
# Should flush after second summary since max_queue = 1
sess.run(summary_ops.all_summary_ops())
self.assertEqual(3, get_total())
def testFlushFunction(self):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(
logdir, max_queue=999999, flush_millis=999999)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0, step=1)
flush_op = summary_ops.flush()
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
# Note: First tf.Event is always file_version.
self.assertEqual(1, get_total())
sess.run(summary_ops.all_summary_ops())
self.assertEqual(1, get_total())
sess.run(flush_op)
self.assertEqual(2, get_total())
# Test "writer" parameter
sess.run(summary_ops.all_summary_ops())
sess.run(summary_ops.flush(writer=writer))
self.assertEqual(3, get_total())
sess.run(summary_ops.all_summary_ops())
sess.run(summary_ops.flush(writer=writer._resource)) # pylint:disable=protected-access
self.assertEqual(4, get_total())
def testSharedName(self):
logdir = self.get_temp_dir()
with summary_ops.always_record_summaries():
# Create with default shared name (should match logdir)
writer1 = summary_ops.create_file_writer(logdir)
with writer1.as_default():
summary_ops.scalar('one', 1.0, step=1)
# Create with explicit logdir shared name (should be same resource/file)
shared_name = 'logdir:' + logdir
writer2 = summary_ops.create_file_writer(logdir, name=shared_name)
with writer2.as_default():
summary_ops.scalar('two', 2.0, step=2)
# Create with different shared name (should be separate resource/file)
writer3 = summary_ops.create_file_writer(logdir, name='other')
with writer3.as_default():
summary_ops.scalar('three', 3.0, step=3)
with self.cached_session() as sess:
# Run init ops across writers sequentially to avoid race condition.
# TODO(nickfelt): fix race condition in resource manager lookup or create
sess.run(writer1.init())
sess.run(writer2.init())
time.sleep(1.1) # Ensure filename has a different timestamp
sess.run(writer3.init())
sess.run(summary_ops.all_summary_ops())
sess.run([writer1.flush(), writer2.flush(), writer3.flush()])
event_files = iter(sorted(gfile.Glob(os.path.join(logdir, '*tfevents*'))))
# First file has tags "one" and "two"
events = summary_test_util.events_from_file(next(event_files))
self.assertEqual('brain.Event:2', events[0].file_version)
tags = [e.summary.value[0].tag for e in events[1:]]
self.assertItemsEqual(['one', 'two'], tags)
# Second file has tag "three"
events = summary_test_util.events_from_file(next(event_files))
self.assertEqual('brain.Event:2', events[0].file_version)
tags = [e.summary.value[0].tag for e in events[1:]]
self.assertItemsEqual(['three'], tags)
# No more files
self.assertRaises(StopIteration, lambda: next(event_files))
def testWriterInitAndClose(self):
logdir = self.get_temp_dir()
with summary_ops.always_record_summaries():
writer = summary_ops.create_file_writer(
logdir, max_queue=100, flush_millis=1000000)
with writer.as_default():
summary_ops.scalar('one', 1.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
self.assertEqual(1, get_total()) # file_version Event
# Running init() again while writer is open has no effect
sess.run(writer.init())
self.assertEqual(1, get_total())
sess.run(summary_ops.all_summary_ops())
self.assertEqual(1, get_total())
# Running close() should do an implicit flush
sess.run(writer.close())
self.assertEqual(2, get_total())
# Running init() on a closed writer should start a new file
time.sleep(1.1) # Ensure filename has a different timestamp
sess.run(writer.init())
sess.run(summary_ops.all_summary_ops())
sess.run(writer.close())
files = sorted(gfile.Glob(os.path.join(logdir, '*tfevents*')))
self.assertEqual(2, len(files))
self.assertEqual(2, len(summary_test_util.events_from_file(files[1])))
def testWriterFlush(self):
logdir = self.get_temp_dir()
with summary_ops.always_record_summaries():
writer = summary_ops.create_file_writer(
logdir, max_queue=100, flush_millis=1000000)
with writer.as_default():
summary_ops.scalar('one', 1.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
self.assertEqual(1, get_total()) # file_version Event
sess.run(summary_ops.all_summary_ops())
self.assertEqual(1, get_total())
sess.run(writer.flush())
self.assertEqual(2, get_total())
class GraphDbTest(summary_test_util.SummaryDbTest):
def testGraphPassedToGraph_isForbiddenForThineOwnSafety(self):
with self.assertRaises(TypeError):
summary_ops.graph(ops.Graph())
with self.assertRaises(TypeError):
summary_ops.graph('')
def testGraphSummary(self):
training_util.get_or_create_global_step()
name = 'hi'
graph = graph_pb2.GraphDef(node=(node_def_pb2.NodeDef(name=name),))
with self.cached_session():
with self.create_db_writer().as_default():
summary_ops.initialize(graph=graph)
six.assertCountEqual(self, [name],
get_all(self.db, 'SELECT node_name FROM Nodes'))
def testScalarSummary(self):
"""Test record_summaries_every_n_global_steps and all_summaries()."""
with ops.Graph().as_default(), self.cached_session() as sess:
global_step = training_util.get_or_create_global_step()
global_step.initializer.run()
with ops.device('/cpu:0'):
step_increment = state_ops.assign_add(global_step, 1)
sess.run(step_increment) # Increment global step from 0 to 1
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(logdir, max_queue=0,
name='t2').as_default():
with summary_ops.record_summaries_every_n_global_steps(2):
summary_ops.initialize()
summary_op = summary_ops.scalar('my_scalar', 2.0)
# Neither of these should produce a summary because
# global_step is 1 and "1 % 2 != 0"
sess.run(summary_ops.all_summary_ops())
sess.run(summary_op)
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 1)
# Increment global step from 1 to 2 and check that the summary
# is now written
sess.run(step_increment)
sess.run(summary_ops.all_summary_ops())
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'my_scalar')
def testScalarSummaryNameScope(self):
"""Test record_summaries_every_n_global_steps and all_summaries()."""
with ops.Graph().as_default(), self.cached_session() as sess:
global_step = training_util.get_or_create_global_step()
global_step.initializer.run()
with ops.device('/cpu:0'):
step_increment = state_ops.assign_add(global_step, 1)
sess.run(step_increment) # Increment global step from 0 to 1
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(logdir, max_queue=0,
name='t2').as_default():
with summary_ops.record_summaries_every_n_global_steps(2):
summary_ops.initialize()
with ops.name_scope('scope'):
summary_op = summary_ops.scalar('my_scalar', 2.0)
# Neither of these should produce a summary because
# global_step is 1 and "1 % 2 != 0"
sess.run(summary_ops.all_summary_ops())
sess.run(summary_op)
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 1)
# Increment global step from 1 to 2 and check that the summary
# is now written
sess.run(step_increment)
sess.run(summary_ops.all_summary_ops())
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'scope/my_scalar')
def testSummaryGraphModeCond(self):
with ops.Graph().as_default(), self.cached_session():
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t2').as_default(), summary_ops.always_record_summaries():
summary_ops.initialize()
training_util.get_or_create_global_step().initializer.run()
def f():
summary_ops.scalar('scalar', 2.0)
return constant_op.constant(True)
pred = array_ops.placeholder(dtypes.bool)
x = control_flow_ops.cond(pred, f,
lambda: constant_op.constant(False))
x.eval(feed_dict={pred: True})
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'cond/scalar')
def testSummaryGraphModeWhile(self):
with ops.Graph().as_default(), self.cached_session():
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t2').as_default(), summary_ops.always_record_summaries():
summary_ops.initialize()
training_util.get_or_create_global_step().initializer.run()
def body(unused_pred):
summary_ops.scalar('scalar', 2.0)
return constant_op.constant(False)
def cond(pred):
return pred
pred = array_ops.placeholder(dtypes.bool)
x = control_flow_ops.while_loop(cond, body, [pred])
x.eval(feed_dict={pred: True})
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'while/scalar')
if __name__ == '__main__':
test.main()
|
Subsets and Splits