repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
nsharp3/OptimalAlphaShapes
|
refs/heads/master
|
FluidFuncs.py
|
1
|
# Nicholas Sharp - [email protected]
# Various 2D fluid functions and derivatives for dynamical systems problems
import numpy as np
# A a linear flow in the postive X direction
class LinearXFlow2D:
def __init__(self, vel):
self.name = 'LinearXFlow3D'
self.info = '%s: vel = %.4e'%(self.name,vel)
self.vel = vel
# Velocity in the x-direction
def Ux(self,x,y,t):
return self.vel
# Velocity in the y-direction
def Uy(self,x,y,t):
return 0
# dU/dx
def dUxdx(self,x,y,t):
return 0
# dU/dy
def dUxdy(self,x,y,t):
return 0
# dV/dx
def dUydx(self,x,y,t):
return 0
# dV/dy
def dUydy(self,x,y,t):
return 0
# A 3D linear flow in the postive X direction
class LinearXFlow3D:
def __init__(self, vel):
self.name = 'LinearXFlow3D'
self.info = '%s: vel = %.4e'%(self.name,vel)
self.vel = vel
# Velocity in the x-direction
def Ux(self,x,y,z,t):
return self.vel
# Velocity in the y-direction
def Uy(self,x,y,z,t):
return 0
# Velocity in the z-direction
def Uz(self,x,y,z,t):
return 0
def dUxdx(self,x,y,z,t):
return 0
def dUxdy(self,x,y,z,t):
return 0
def dUxdz(self,x,y,z,t):
return 0
def dUydx(self,x,y,z,t):
return 0
def dUydy(self,x,y,z,t):
return 0
def dUydz(self,x,y,z,t):
return 0
def dUzdx(self,x,y,z,t):
return 0
def dUzdy(self,x,y,z,t):
return 0
def dUzdz(self,x,y,z,t):
return 0
class DoubleVortex:
def __init__(self, vel, b, omega):
self.name = 'DoubleVortex'
self.info = '%s: vel = %.4e b = %.4e omega = %.4e'%(self.name,vel,b,omega)
self.vel = vel
self.b = b
self.omega = omega
def Ux(self,x,y,t):
arg = np.pi * (x + self.b * np.cos(self.omega * t))
return self.vel*np.sin(arg)*np.cos(np.pi*y)
def Uy(self,x,y,t):
arg = np.pi * (x + self.b * np.cos(self.omega * t))
return -self.vel*np.cos(arg)*np.sin(np.pi*y)
def dUxdx(self,x,y,t):
arg = np.pi * (x + self.b * np.cos(self.omega * t))
return self.vel*np.pi*np.cos(arg)*np.cos(np.pi*y)
def dUydy(self,x,y,t):
arg = np.pi * (x + self.b * np.cos(self.omega * t))
return -self.vel*np.pi*np.cos(arg)*np.cos(np.pi*y)
def dUxdy(self,x,y,t):
arg = np.pi * (x + self.b * np.cos(self.omega * t))
return -self.vel*np.pi*np.sin(arg)*np.sin(np.pi*y)
def dUydx(self,x,y,t):
arg = np.pi * (x + self.b * np.cos(self.omega * t))
return self.vel*np.pi*np.sin(arg)*np.sin(np.pi*y)
class Gyre3D:
def __init__(self, vel, a, b, c, omega):
self.name = 'Gyre3D'
self.info = '%s: vel = %.4e a = %.4e b = %.4e c = %.4e omega = %.4e'%(self.name,vel,a,b,c,omega)
self.vel = vel
self.a = a
self.b = b
self.c = c
self.omega = omega
# Velocity in the x-direction
def Ux(self,x,y,z,t):
valX = self.a*x + self.omega*t
valY = self.b*y + self.omega*t
valZ = self.c*z + self.omega*t
return -self.vel * np.sin(valX) * np.cos(valY) * np.sin(valZ)
# Velocity in the y-direction
def Uy(self,x,y,z,t):
valX = self.a*x + self.omega*t
valY = self.b*y + self.omega*t
valZ = self.c*z + self.omega*t
return -self.vel * np.cos(valX) * np.sin(valY) * np.sin(valZ)
# Velocity in the z-direction
def Uz(self,x,y,z,t):
valX = self.a*x + self.omega*t
valY = self.b*y + self.omega*t
valZ = self.c*z + self.omega*t
return self.vel * np.cos(valX) * np.sin(valY) * np.cos(valZ)
def dUxdx(self,x,y,z,t):
valX = self.a*x + self.omega*t
valY = self.b*y + self.omega*t
valZ = self.c*z + self.omega*t
return - self.a * self.vel * np.cos(valX) * np.cos(valY) * np.sin(valZ)
def dUxdy(self,x,y,z,t):
valX = self.a*x + self.omega*t
valY = self.b*y + self.omega*t
valZ = self.c*z + self.omega*t
return self.b * self.vel * np.sin(valX) * np.sin(valY) * np.sin(valZ)
def dUxdz(self,x,y,z,t):
valX = self.a*x + self.omega*t
valY = self.b*y + self.omega*t
valZ = self.c*z + self.omega*t
return - self.c * self.vel * np.sin(valX) * np.cos(valY) * np.cos(valZ)
def dUydx(self,x,y,z,t):
valX = self.a*x + self.omega*t
valY = self.b*y + self.omega*t
valZ = self.c*z + self.omega*t
return self.a * self.vel * np.sin(valX) * np.sin(valY) * np.sin(valZ)
def dUydy(self,x,y,z,t):
valX = self.a*x + self.omega*t
valY = self.b*y + self.omega*t
valZ = self.c*z + self.omega*t
return -self.b * self.vel * np.cos(valX) * np.cos(valY) * np.sin(valZ)
def dUydz(self,x,y,z,t):
valX = self.a*x + self.omega*t
valY = self.b*y + self.omega*t
valZ = self.c*z + self.omega*t
return - self.c * self.vel * np.cos(valX) * np.sin(valY) * np.cos(valZ)
def dUzdx(self,x,y,z,t):
valX = self.a*x + self.omega*t
valY = self.b*y + self.omega*t
valZ = self.c*z + self.omega*t
return - self.a * self.vel * np.sin(valX) * np.sin(valY) * np.cos(valZ)
def dUzdy(self,x,y,z,t):
valX = self.a*x + self.omega*t
valY = self.b*y + self.omega*t
valZ = self.c*z + self.omega*t
return self.b * self.vel * np.cos(valX) * np.cos(valY) * np.cos(valZ)
def dUzdz(self,x,y,z,t):
valX = self.a*x + self.omega*t
valY = self.b*y + self.omega*t
valZ = self.c*z + self.omega*t
return - self.c * self.vel * np.cos(valX) * np.sin(valY) * np.sin(valZ)
|
seblefevre/testerman
|
refs/heads/master
|
qtesterman/epydoc/checker.py
|
2
|
#
# objdoc: epydoc documentation completeness checker
# Edward Loper
#
# Created [01/30/01 05:18 PM]
# $Id: checker.py,v 1.1 2008/06/01 17:57:45 slefevr Exp $
#
"""
Documentation completeness checker. This module defines a single
class, C{DocChecker}, which can be used to check the that specified
classes of objects are documented.
"""
__docformat__ = 'epytext en'
##################################################
## Imports
##################################################
import re, sys, os.path, string
from xml.dom.minidom import Text as _Text
from epydoc.apidoc import *
# The following methods may be undocumented:
_NO_DOCS = ['__hash__', '__repr__', '__str__', '__cmp__']
# The following methods never need descriptions, authors, or
# versions:
_NO_BASIC = ['__hash__', '__repr__', '__str__', '__cmp__']
# The following methods never need return value descriptions.
_NO_RETURN = ['__init__', '__hash__', '__repr__', '__str__', '__cmp__']
# The following methods don't need parameters documented:
_NO_PARAM = ['__cmp__']
class DocChecker:
"""
Documentation completeness checker. C{DocChecker} can be used to
check that specified classes of objects are documented. To check
the documentation for a group of objects, you should create a
C{DocChecker} from a L{DocIndex<apidoc.DocIndex>} that documents
those objects; and then use the L{check} method to run specified
checks on the objects' documentation.
What checks are run, and what objects they are run on, are
specified by the constants defined by C{DocChecker}. These
constants are divided into three groups.
- Type specifiers indicate what type of objects should be
checked: L{MODULE}; L{CLASS}; L{FUNC}; L{VAR}; L{IVAR};
L{CVAR}; L{PARAM}; and L{RETURN}.
- Public/private specifiers indicate whether public or private
objects should be checked: L{PRIVATE}.
- Check specifiers indicate what checks should be run on the
objects: L{TYPE}; L{DESCR}; L{AUTHOR};
and L{VERSION}.
The L{check} method is used to perform a check on the
documentation. Its parameter is formed by or-ing together at
least one value from each specifier group:
>>> checker.check(DocChecker.MODULE | DocChecker.DESCR)
To specify multiple values from a single group, simply or their
values together:
>>> checker.check(DocChecker.MODULE | DocChecker.CLASS |
... DocChecker.FUNC )
@group Types: MODULE, CLASS, FUNC, VAR, IVAR, CVAR, PARAM,
RETURN, ALL_T
@type MODULE: C{int}
@cvar MODULE: Type specifier that indicates that the documentation
of modules should be checked.
@type CLASS: C{int}
@cvar CLASS: Type specifier that indicates that the documentation
of classes should be checked.
@type FUNC: C{int}
@cvar FUNC: Type specifier that indicates that the documentation
of functions should be checked.
@type VAR: C{int}
@cvar VAR: Type specifier that indicates that the documentation
of module variables should be checked.
@type IVAR: C{int}
@cvar IVAR: Type specifier that indicates that the documentation
of instance variables should be checked.
@type CVAR: C{int}
@cvar CVAR: Type specifier that indicates that the documentation
of class variables should be checked.
@type PARAM: C{int}
@cvar PARAM: Type specifier that indicates that the documentation
of function and method parameters should be checked.
@type RETURN: C{int}
@cvar RETURN: Type specifier that indicates that the documentation
of return values should be checked.
@type ALL_T: C{int}
@cvar ALL_T: Type specifier that indicates that the documentation
of all objects should be checked.
@group Checks: TYPE, AUTHOR, VERSION, DESCR, ALL_C
@type TYPE: C{int}
@cvar TYPE: Check specifier that indicates that every variable and
parameter should have a C{@type} field.
@type AUTHOR: C{int}
@cvar AUTHOR: Check specifier that indicates that every object
should have an C{author} field.
@type VERSION: C{int}
@cvar VERSION: Check specifier that indicates that every object
should have a C{version} field.
@type DESCR: C{int}
@cvar DESCR: Check specifier that indicates that every object
should have a description.
@type ALL_C: C{int}
@cvar ALL_C: Check specifier that indicates that all checks
should be run.
@group Publicity: PRIVATE
@type PRIVATE: C{int}
@cvar PRIVATE: Specifier that indicates that private objects should
be checked.
"""
# Types
MODULE = 1
CLASS = 2
FUNC = 4
VAR = 8
#IVAR = 16
#CVAR = 32
PARAM = 64
RETURN = 128
PROPERTY = 256
ALL_T = 1+2+4+8+16+32+64+128+256
# Checks
TYPE = 256
AUTHOR = 1024
VERSION = 2048
DESCR = 4096
ALL_C = 256+512+1024+2048+4096
# Private/public
PRIVATE = 16384
ALL = ALL_T + ALL_C + PRIVATE
def __init__(self, docindex):
"""
Create a new C{DocChecker} that can be used to run checks on
the documentation of the objects documented by C{docindex}
@param docindex: A documentation map containing the
documentation for the objects to be checked.
@type docindex: L{Docindex<apidoc.DocIndex>}
"""
self._docindex = docindex
# Initialize instance variables
self._checks = 0
self._last_warn = None
self._out = sys.stdout
self._num_warnings = 0
def check(self, *check_sets):
"""
Run the specified checks on the documentation of the objects
contained by this C{DocChecker}'s C{DocIndex}. Any errors found
are printed to standard out.
@param check_sets: The checks that should be run on the
documentation. This value is constructed by or-ing
together the specifiers that indicate which objects should
be checked, and which checks should be run. See the
L{module description<checker>} for more information.
If no checks are specified, then a default set of checks
will be run.
@type check_sets: C{int}
@return: True if no problems were found.
@rtype: C{boolean}
"""
if not check_sets:
check_sets = (DocChecker.MODULE | DocChecker.CLASS |
DocChecker.FUNC | DocChecker.VAR |
DocChecker.DESCR,)
self._warnings = {}
log.start_progress('Checking docs')
for j, checks in enumerate(check_sets):
self._check(checks)
log.end_progress()
for (warning, docs) in self._warnings.items():
docs = sorted(docs)
docnames = '\n'.join([' - %s' % self._name(d) for d in docs])
log.warning('%s:\n%s' % (warning, docnames))
def _check(self, checks):
self._checks = checks
# Get the list of objects to check.
valdocs = sorted(self._docindex.reachable_valdocs(
imports=False, packages=False, bases=False, submodules=False,
subclasses=False, private = (checks & DocChecker.PRIVATE)))
docs = set()
for d in valdocs:
if not isinstance(d, GenericValueDoc): docs.add(d)
for doc in valdocs:
if isinstance(doc, NamespaceDoc):
for d in doc.variables.values():
if isinstance(d.value, GenericValueDoc): docs.add(d)
for i, doc in enumerate(sorted(docs)):
if isinstance(doc, ModuleDoc):
self._check_module(doc)
elif isinstance(doc, ClassDoc):
self._check_class(doc)
elif isinstance(doc, RoutineDoc):
self._check_func(doc)
elif isinstance(doc, PropertyDoc):
self._check_property(doc)
elif isinstance(doc, VariableDoc):
self._check_var(doc)
else:
log.error("Don't know how to check %r" % doc)
def _name(self, doc):
name = str(doc.canonical_name)
if isinstance(doc, RoutineDoc): name += '()'
return name
def _check_basic(self, doc):
"""
Check the description, author, version, and see-also fields of
C{doc}. This is used as a helper function by L{_check_module},
L{_check_class}, and L{_check_func}.
@param doc: The documentation that should be checked.
@type doc: L{APIDoc}
@rtype: C{None}
"""
if ((self._checks & DocChecker.DESCR) and
(doc.descr in (None, UNKNOWN))):
if doc.docstring in (None, UNKNOWN):
self.warning('Undocumented', doc)
else:
self.warning('No description', doc)
if self._checks & DocChecker.AUTHOR:
for tag, arg, descr in doc.metadata:
if 'author' == tag: break
else:
self.warning('No authors', doc)
if self._checks & DocChecker.VERSION:
for tag, arg, descr in doc.metadata:
if 'version' == tag: break
else:
self.warning('No version', doc)
def _check_module(self, doc):
"""
Run checks on the module whose APIDoc is C{doc}.
@param doc: The APIDoc of the module to check.
@type doc: L{APIDoc}
@rtype: C{None}
"""
if self._checks & DocChecker.MODULE:
self._check_basic(doc)
def _check_class(self, doc):
"""
Run checks on the class whose APIDoc is C{doc}.
@param doc: The APIDoc of the class to check.
@type doc: L{APIDoc}
@rtype: C{None}
"""
if self._checks & DocChecker.CLASS:
self._check_basic(doc)
def _check_property(self, doc):
if self._checks & DocChecker.PROPERTY:
self._check_basic(doc)
def _check_var(self, doc):
"""
Run checks on the variable whose documentation is C{var} and
whose name is C{name}.
@param doc: The documentation for the variable to check.
@type doc: L{APIDoc}
@rtype: C{None}
"""
if self._checks & DocChecker.VAR:
if (self._checks & (DocChecker.DESCR|DocChecker.TYPE) and
doc.descr in (None, UNKNOWN) and
doc.type_descr in (None, UNKNOWN) and
doc.docstring in (None, UNKNOWN)):
self.warning('Undocumented', doc)
else:
if (self._checks & DocChecker.DESCR and
doc.descr in (None, UNKNOWN)):
self.warning('No description', doc)
if (self._checks & DocChecker.TYPE and
doc.type_descr in (None, UNKNOWN)):
self.warning('No type information', doc)
def _check_func(self, doc):
"""
Run checks on the function whose APIDoc is C{doc}.
@param doc: The APIDoc of the function to check.
@type doc: L{APIDoc}
@rtype: C{None}
"""
name = doc.canonical_name
if (self._checks & DocChecker.FUNC and
doc.docstring in (None, UNKNOWN) and
doc.canonical_name[-1] not in _NO_DOCS):
self.warning('Undocumented', doc)
return
if (self._checks & DocChecker.FUNC and
doc.canonical_name[-1] not in _NO_BASIC):
self._check_basic(doc)
if (self._checks & DocChecker.RETURN and
doc.canonical_name[-1] not in _NO_RETURN):
if (doc.return_type in (None, UNKNOWN) and
doc.return_descr in (None, UNKNOWN)):
self.warning('No return descr', doc)
if (self._checks & DocChecker.PARAM and
doc.canonical_name[-1] not in _NO_PARAM):
if doc.arg_descrs in (None, UNKNOWN):
self.warning('No argument info', doc)
else:
args_with_descr = []
for arg, descr in doc.arg_descrs:
if isinstance(arg, basestring):
args_with_descr.append(arg)
else:
args_with_descr += arg
for posarg in doc.posargs:
if (self._checks & DocChecker.DESCR and
posarg not in args_with_descr):
self.warning('Argument(s) not described', doc)
if (self._checks & DocChecker.TYPE and
posarg not in doc.arg_types):
self.warning('Argument type(s) not described', doc)
def warning(self, msg, doc):
self._warnings.setdefault(msg,set()).add(doc)
|
tareqalayan/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/aws_config_aggregator.py
|
31
|
#!/usr/bin/python
# Copyright: (c) 2018, Aaron Smith <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aws_config_aggregator
short_description: Manage AWS Config aggregations across multiple accounts
description:
- Module manages AWS Config resources
version_added: "2.6"
requirements: [ 'botocore', 'boto3' ]
author:
- "Aaron Smith (@slapula)"
options:
name:
description:
- The name of the AWS Config resource.
required: true
state:
description:
- Whether the Config rule should be present or absent.
default: present
choices: ['present', 'absent']
account_sources:
description:
- Provides a list of source accounts and regions to be aggregated.
suboptions:
account_ids:
description:
- A list of 12-digit account IDs of accounts being aggregated.
aws_regions:
description:
- A list of source regions being aggregated.
all_aws_regions:
description:
- If true, aggreagate existing AWS Config regions and future regions.
organization_source:
description:
- The region authorized to collect aggregated data.
suboptions:
role_arn:
description:
- ARN of the IAM role used to retreive AWS Organization details associated with the aggregator account.
aws_regions:
description:
- The source regions being aggregated.
all_aws_regions:
description:
- If true, aggreagate existing AWS Config regions and future regions.
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = r'''
- name: Create cross-account aggregator
aws_config_aggregator:
name: test_config_rule
state: present
account_sources:
account_ids:
- 1234567890
- 0123456789
- 9012345678
all_aws_regions: yes
'''
RETURN = r'''#'''
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict
def resource_exists(client, module, params):
try:
aggregator = client.describe_configuration_aggregators(
ConfigurationAggregatorNames=[params['name']]
)
return aggregator['ConfigurationAggregators'][0]
except is_boto3_error_code('NoSuchConfigurationAggregatorException'):
return
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e)
def create_resource(client, module, params, result):
try:
client.put_configuration_aggregator(
ConfigurationAggregatorName=params['ConfigurationAggregatorName'],
AccountAggregationSources=params['AccountAggregationSources'],
OrganizationAggregationSource=params['OrganizationAggregationSource']
)
result['changed'] = True
result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator")
def update_resource(client, module, resource_type, params, result):
current_params = client.describe_configuration_aggregators(
ConfigurationAggregatorNames=[params['name']]
)
del current_params['ConfigurationAggregatorArn']
del current_params['CreationTime']
del current_params['LastUpdatedTime']
if params != current_params['ConfigurationAggregators'][0]:
try:
client.put_configuration_aggregator(
ConfigurationAggregatorName=params['ConfigurationAggregatorName'],
AccountAggregationSources=params['AccountAggregationSources'],
OrganizationAggregationSource=params['OrganizationAggregationSource']
)
result['changed'] = True
result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator")
def delete_resource(client, module, resource_type, params, result):
try:
client.delete_configuration_aggregator(
ConfigurationAggregatorName=params['ConfigurationAggregatorName']
)
result['changed'] = True
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration aggregator")
def main():
module = AnsibleAWSModule(
argument_spec={
'name': dict(type='str', required=True),
'state': dict(type='str', choices=['present', 'absent'], default='present'),
'account_sources': dict(type='list', required=True),
'organization_source': dict(type='dict', required=True)
},
supports_check_mode=False,
)
result = {
'changed': False
}
name = module.params.get('name')
state = module.params.get('state')
params = {}
if name:
params['ConfigurationAggregatorName'] = name
if module.params.get('account_sources'):
params['AccountAggregationSources'] = []
for i in module.params.get('account_sources'):
tmp_dict = {}
if i.get('account_ids'):
tmp_dict['AccountIds'] = i.get('account_ids')
if i.get('aws_regions'):
tmp_dict['AwsRegions'] = i.get('aws_regions')
if i.get('all_aws_regions') is not None:
tmp_dict['AllAwsRegions'] = i.get('all_aws_regions')
params['AccountAggregationSources'].append(tmp_dict)
if module.params.get('organization_source'):
params['OrganizationAggregationSource'] = {}
if module.params.get('organization_source').get('role_arn'):
params['OrganizationAggregationSource'].update({
'RoleArn': module.params.get('organization_source').get('role_arn')
})
if module.params.get('organization_source').get('aws_regions'):
params['OrganizationAggregationSource'].update({
'AwsRegions': module.params.get('organization_source').get('aws_regions')
})
if module.params.get('organization_source').get('all_aws_regions') is not None:
params['OrganizationAggregationSourcep'].update({
'AllAwsRegions': module.params.get('organization_source').get('all_aws_regions')
})
client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
resource_status = resource_exists(client, module, params)
if state == 'present':
if not resource_status:
create_resource(client, module, params, result)
else:
update_resource(client, module, params, result)
if state == 'absent':
if resource_status:
delete_resource(client, module, params, result)
module.exit_json(changed=result['changed'])
if __name__ == '__main__':
main()
|
chvrga/outdoor-explorer
|
refs/heads/master
|
java/play-1.4.4/samples-and-tests/i-am-a-developer/mechanize/_pullparser.py
|
15
|
"""A simple "pull API" for HTML parsing, after Perl's HTML::TokeParser.
Examples
This program extracts all links from a document. It will print one
line for each link, containing the URL and the textual description
between the <A>...</A> tags:
import pullparser, sys
f = file(sys.argv[1])
p = pullparser.PullParser(f)
for token in p.tags("a"):
if token.type == "endtag": continue
url = dict(token.attrs).get("href", "-")
text = p.get_compressed_text(endat=("endtag", "a"))
print "%s\t%s" % (url, text)
This program extracts the <TITLE> from the document:
import pullparser, sys
f = file(sys.argv[1])
p = pullparser.PullParser(f)
if p.get_tag("title"):
title = p.get_compressed_text()
print "Title: %s" % title
Copyright 2003-2006 John J. Lee <[email protected]>
Copyright 1998-2001 Gisle Aas (original libwww-perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses.
"""
import re, htmlentitydefs
import sgmllib, HTMLParser
from xml.sax import saxutils
from _html import unescape, unescape_charref
class NoMoreTokensError(Exception): pass
class Token:
"""Represents an HTML tag, declaration, processing instruction etc.
Behaves as both a tuple-like object (ie. iterable) and has attributes
.type, .data and .attrs.
>>> t = Token("starttag", "a", [("href", "http://www.python.org/")])
>>> t == ("starttag", "a", [("href", "http://www.python.org/")])
True
>>> (t.type, t.data) == ("starttag", "a")
True
>>> t.attrs == [("href", "http://www.python.org/")]
True
Public attributes
type: one of "starttag", "endtag", "startendtag", "charref", "entityref",
"data", "comment", "decl", "pi", after the corresponding methods of
HTMLParser.HTMLParser
data: For a tag, the tag name; otherwise, the relevant data carried by the
tag, as a string
attrs: list of (name, value) pairs representing HTML attributes
(or None if token does not represent an opening tag)
"""
def __init__(self, type, data, attrs=None):
self.type = type
self.data = data
self.attrs = attrs
def __iter__(self):
return iter((self.type, self.data, self.attrs))
def __eq__(self, other):
type, data, attrs = other
if (self.type == type and
self.data == data and
self.attrs == attrs):
return True
else:
return False
def __ne__(self, other): return not self.__eq__(other)
def __repr__(self):
args = ", ".join(map(repr, [self.type, self.data, self.attrs]))
return self.__class__.__name__+"(%s)" % args
def __str__(self):
"""
>>> print Token("starttag", "br")
<br>
>>> print Token("starttag", "a",
... [("href", "http://www.python.org/"), ("alt", '"foo"')])
<a href="http://www.python.org/" alt='"foo"'>
>>> print Token("startendtag", "br")
<br />
>>> print Token("startendtag", "br", [("spam", "eggs")])
<br spam="eggs" />
>>> print Token("endtag", "p")
</p>
>>> print Token("charref", "38")
&
>>> print Token("entityref", "amp")
&
>>> print Token("data", "foo\\nbar")
foo
bar
>>> print Token("comment", "Life is a bowl\\nof cherries.")
<!--Life is a bowl
of cherries.-->
>>> print Token("decl", "decl")
<!decl>
>>> print Token("pi", "pi")
<?pi>
"""
if self.attrs is not None:
attrs = "".join([" %s=%s" % (k, saxutils.quoteattr(v)) for
k, v in self.attrs])
else:
attrs = ""
if self.type == "starttag":
return "<%s%s>" % (self.data, attrs)
elif self.type == "startendtag":
return "<%s%s />" % (self.data, attrs)
elif self.type == "endtag":
return "</%s>" % self.data
elif self.type == "charref":
return "&#%s;" % self.data
elif self.type == "entityref":
return "&%s;" % self.data
elif self.type == "data":
return self.data
elif self.type == "comment":
return "<!--%s-->" % self.data
elif self.type == "decl":
return "<!%s>" % self.data
elif self.type == "pi":
return "<?%s>" % self.data
assert False
def iter_until_exception(fn, exception, *args, **kwds):
while 1:
try:
yield fn(*args, **kwds)
except exception:
raise StopIteration
class _AbstractParser:
chunk = 1024
compress_re = re.compile(r"\s+")
def __init__(self, fh, textify={"img": "alt", "applet": "alt"},
encoding="ascii", entitydefs=None):
"""
fh: file-like object (only a .read() method is required) from which to
read HTML to be parsed
textify: mapping used by .get_text() and .get_compressed_text() methods
to represent opening tags as text
encoding: encoding used to encode numeric character references by
.get_text() and .get_compressed_text() ("ascii" by default)
entitydefs: mapping like {"amp": "&", ...} containing HTML entity
definitions (a sensible default is used). This is used to unescape
entities in .get_text() (and .get_compressed_text()) and attribute
values. If the encoding can not represent the character, the entity
reference is left unescaped. Note that entity references (both
numeric - e.g. { or ઼ - and non-numeric - e.g. &) are
unescaped in attribute values and the return value of .get_text(), but
not in data outside of tags. Instead, entity references outside of
tags are represented as tokens. This is a bit odd, it's true :-/
If the element name of an opening tag matches a key in the textify
mapping then that tag is converted to text. The corresponding value is
used to specify which tag attribute to obtain the text from. textify
maps from element names to either:
- an HTML attribute name, in which case the HTML attribute value is
used as its text value along with the element name in square
brackets (eg."alt text goes here[IMG]", or, if the alt attribute
were missing, just "[IMG]")
- a callable object (eg. a function) which takes a Token and returns
the string to be used as its text value
If textify has no key for an element name, nothing is substituted for
the opening tag.
Public attributes:
encoding and textify: see above
"""
self._fh = fh
self._tokenstack = [] # FIFO
self.textify = textify
self.encoding = encoding
if entitydefs is None:
entitydefs = htmlentitydefs.name2codepoint
self._entitydefs = entitydefs
def __iter__(self): return self
def tags(self, *names):
return iter_until_exception(self.get_tag, NoMoreTokensError, *names)
def tokens(self, *tokentypes):
return iter_until_exception(self.get_token, NoMoreTokensError,
*tokentypes)
def next(self):
try:
return self.get_token()
except NoMoreTokensError:
raise StopIteration()
def get_token(self, *tokentypes):
"""Pop the next Token object from the stack of parsed tokens.
If arguments are given, they are taken to be token types in which the
caller is interested: tokens representing other elements will be
skipped. Element names must be given in lower case.
Raises NoMoreTokensError.
"""
while 1:
while self._tokenstack:
token = self._tokenstack.pop(0)
if tokentypes:
if token.type in tokentypes:
return token
else:
return token
data = self._fh.read(self.chunk)
if not data:
raise NoMoreTokensError()
self.feed(data)
def unget_token(self, token):
"""Push a Token back onto the stack."""
self._tokenstack.insert(0, token)
def get_tag(self, *names):
"""Return the next Token that represents an opening or closing tag.
If arguments are given, they are taken to be element names in which the
caller is interested: tags representing other elements will be skipped.
Element names must be given in lower case.
Raises NoMoreTokensError.
"""
while 1:
tok = self.get_token()
if tok.type not in ["starttag", "endtag", "startendtag"]:
continue
if names:
if tok.data in names:
return tok
else:
return tok
def get_text(self, endat=None):
"""Get some text.
endat: stop reading text at this tag (the tag is included in the
returned text); endtag is a tuple (type, name) where type is
"starttag", "endtag" or "startendtag", and name is the element name of
the tag (element names must be given in lower case)
If endat is not given, .get_text() will stop at the next opening or
closing tag, or when there are no more tokens (no exception is raised).
Note that .get_text() includes the text representation (if any) of the
opening tag, but pushes the opening tag back onto the stack. As a
result, if you want to call .get_text() again, you need to call
.get_tag() first (unless you want an empty string returned when you
next call .get_text()).
Entity references are translated using the value of the entitydefs
constructor argument (a mapping from names to characters like that
provided by the standard module htmlentitydefs). Named entity
references that are not in this mapping are left unchanged.
The textify attribute is used to translate opening tags into text: see
the class docstring.
"""
text = []
tok = None
while 1:
try:
tok = self.get_token()
except NoMoreTokensError:
# unget last token (not the one we just failed to get)
if tok: self.unget_token(tok)
break
if tok.type == "data":
text.append(tok.data)
elif tok.type == "entityref":
t = unescape("&%s;"%tok.data, self._entitydefs, self.encoding)
text.append(t)
elif tok.type == "charref":
t = unescape_charref(tok.data, self.encoding)
text.append(t)
elif tok.type in ["starttag", "endtag", "startendtag"]:
tag_name = tok.data
if tok.type in ["starttag", "startendtag"]:
alt = self.textify.get(tag_name)
if alt is not None:
if callable(alt):
text.append(alt(tok))
elif tok.attrs is not None:
for k, v in tok.attrs:
if k == alt:
text.append(v)
text.append("[%s]" % tag_name.upper())
if endat is None or endat == (tok.type, tag_name):
self.unget_token(tok)
break
return "".join(text)
def get_compressed_text(self, *args, **kwds):
"""
As .get_text(), but collapses each group of contiguous whitespace to a
single space character, and removes all initial and trailing
whitespace.
"""
text = self.get_text(*args, **kwds)
text = text.strip()
return self.compress_re.sub(" ", text)
def handle_startendtag(self, tag, attrs):
self._tokenstack.append(Token("startendtag", tag, attrs))
def handle_starttag(self, tag, attrs):
self._tokenstack.append(Token("starttag", tag, attrs))
def handle_endtag(self, tag):
self._tokenstack.append(Token("endtag", tag))
def handle_charref(self, name):
self._tokenstack.append(Token("charref", name))
def handle_entityref(self, name):
self._tokenstack.append(Token("entityref", name))
def handle_data(self, data):
self._tokenstack.append(Token("data", data))
def handle_comment(self, data):
self._tokenstack.append(Token("comment", data))
def handle_decl(self, decl):
self._tokenstack.append(Token("decl", decl))
def unknown_decl(self, data):
# XXX should this call self.error instead?
#self.error("unknown declaration: " + `data`)
self._tokenstack.append(Token("decl", data))
def handle_pi(self, data):
self._tokenstack.append(Token("pi", data))
def unescape_attr(self, name):
return unescape(name, self._entitydefs, self.encoding)
def unescape_attrs(self, attrs):
escaped_attrs = []
for key, val in attrs:
escaped_attrs.append((key, self.unescape_attr(val)))
return escaped_attrs
class PullParser(_AbstractParser, HTMLParser.HTMLParser):
def __init__(self, *args, **kwds):
HTMLParser.HTMLParser.__init__(self)
_AbstractParser.__init__(self, *args, **kwds)
def unescape(self, name):
# Use the entitydefs passed into constructor, not
# HTMLParser.HTMLParser's entitydefs.
return self.unescape_attr(name)
class TolerantPullParser(_AbstractParser, sgmllib.SGMLParser):
def __init__(self, *args, **kwds):
sgmllib.SGMLParser.__init__(self)
_AbstractParser.__init__(self, *args, **kwds)
def unknown_starttag(self, tag, attrs):
attrs = self.unescape_attrs(attrs)
self._tokenstack.append(Token("starttag", tag, attrs))
def unknown_endtag(self, tag):
self._tokenstack.append(Token("endtag", tag))
def _test():
import doctest, _pullparser
return doctest.testmod(_pullparser)
if __name__ == "__main__":
_test()
|
lijieamd/mavlink
|
refs/heads/master
|
pymavlink/tools/mavsigloss.py
|
47
|
#!/usr/bin/env python
'''
show times when signal is lost
'''
import sys, time, os
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--no-timestamps", dest="notimestamps", action='store_true', help="Log doesn't have timestamps")
parser.add_argument("--planner", action='store_true', help="use planner file format")
parser.add_argument("--robust", action='store_true', help="Enable robust parsing (skip over bad data)")
parser.add_argument("--deltat", type=float, default=1.0, help="loss threshold in seconds")
parser.add_argument("--condition", default=None, help="select packets by condition")
parser.add_argument("--types", default=None, help="types of messages (comma separated)")
parser.add_argument("logs", metavar="LOG", nargs="+")
args = parser.parse_args()
from pymavlink import mavutil
def sigloss(logfile):
'''work out signal loss times for a log file'''
print("Processing log %s" % filename)
mlog = mavutil.mavlink_connection(filename,
planner_format=args.planner,
notimestamps=args.notimestamps,
robust_parsing=args.robust)
last_t = 0
types = args.types
if types is not None:
types = types.split(',')
while True:
m = mlog.recv_match(condition=args.condition)
if m is None:
return
if types is not None and m.get_type() not in types:
continue
if args.notimestamps:
if not 'usec' in m._fieldnames:
continue
t = m.usec / 1.0e6
else:
t = m._timestamp
if last_t != 0:
if t - last_t > args.deltat:
print("Sig lost for %.1fs at %s" % (t-last_t, time.asctime(time.localtime(t))))
last_t = t
total = 0.0
for filename in args.logs:
sigloss(filename)
|
JorisDeRieck/Flexget
|
refs/heads/develop
|
flexget/components/sites/sites/wordpress.py
|
4
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import urlencode
import logging
import re
from flexget import plugin
from flexget.event import event
from flexget.plugin import PluginError
from requests import Request, RequestException
from requests.utils import dict_from_cookiejar, cookiejar_from_dict
log = logging.getLogger('wordpress_auth')
def construct_request(url, username='', password='', redirect='/wp-admin/'):
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/50.0.2661.102 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded',
'DNT': '1',
}
data = {
'log': username,
'pwd': password,
'wp-submit': 'Log In',
'testcookie': '1',
'redirect_to': redirect,
}
return Request(
method='POST', url=url, headers=headers, data=urlencode(data).encode('UTF-8')
).prepare()
def collect_cookies(response):
cookies = dict_from_cookiejar(response.cookies)
for h_resp in response.history:
cookies.update(dict_from_cookiejar(h_resp.cookies))
return cookiejar_from_dict(cookies)
def get_valid_cookies(cookies):
def is_wp_cookie(key):
return re.match(r'(wordpress|wp)(?!_*test)[A-z0-9]*', key, re.IGNORECASE)
valid_cookies = {key: value for key, value in cookies.items() if is_wp_cookie(key)}
return cookiejar_from_dict(valid_cookies)
class PluginWordPress(object):
"""
Supports accessing feeds and media that require wordpress account credentials
Usage:
wordpress_auth:
url: 'your wordpress blog login page (ex http://example.org/wp-login.php)'
username: 'your username'
password: 'your password'
"""
schema = {
'type': 'object',
'properties': {
'url': {'type': 'string', 'oneOf': [{'format': 'url'}]},
'username': {'type': 'string', 'default': ''},
'password': {'type': 'string', 'default': ''},
},
'required': ['url'],
'additionalProperties': False,
}
@plugin.priority(135)
def on_task_start(self, task, config):
url = config['url']
username = config['username']
password = config['password']
try:
response = task.requests.send(
construct_request(url, username=username, password=password)
)
if not response.ok:
raise RequestException(str(response))
cookies = collect_cookies(response)
if len(get_valid_cookies(cookies)) < 1:
raise RequestException(
'No recognized WordPress cookies found. Perhaps username/password is invalid?'
)
task.requests.add_cookiejar(cookies)
except RequestException as err:
log.error('%s', err)
raise PluginError('WordPress Authentication at %s failed' % (url,))
@event('plugin.register')
def register_plugin():
plugin.register(PluginWordPress, 'wordpress_auth', api_ver=2)
|
craigderington/studentloan5
|
refs/heads/master
|
studentloan5/Lib/site-packages/django/contrib/messages/storage/fallback.py
|
704
|
from django.contrib.messages.storage.base import BaseStorage
from django.contrib.messages.storage.cookie import CookieStorage
from django.contrib.messages.storage.session import SessionStorage
class FallbackStorage(BaseStorage):
"""
Tries to store all messages in the first backend, storing any unstored
messages in each subsequent backend backend.
"""
storage_classes = (CookieStorage, SessionStorage)
def __init__(self, *args, **kwargs):
super(FallbackStorage, self).__init__(*args, **kwargs)
self.storages = [storage_class(*args, **kwargs)
for storage_class in self.storage_classes]
self._used_storages = set()
def _get(self, *args, **kwargs):
"""
Gets a single list of messages from all storage backends.
"""
all_messages = []
for storage in self.storages:
messages, all_retrieved = storage._get()
# If the backend hasn't been used, no more retrieval is necessary.
if messages is None:
break
if messages:
self._used_storages.add(storage)
all_messages.extend(messages)
# If this storage class contained all the messages, no further
# retrieval is necessary
if all_retrieved:
break
return all_messages, all_retrieved
def _store(self, messages, response, *args, **kwargs):
"""
Stores the messages, returning any unstored messages after trying all
backends.
For each storage backend, any messages not stored are passed on to the
next backend.
"""
for storage in self.storages:
if messages:
messages = storage._store(messages, response,
remove_oldest=False)
# Even if there are no more messages, continue iterating to ensure
# storages which contained messages are flushed.
elif storage in self._used_storages:
storage._store([], response)
self._used_storages.remove(storage)
return messages
|
hifly/OpenUpgrade
|
refs/heads/8.0
|
addons/pad/__init__.py
|
433
|
# -*- coding: utf-8 -*-
import pad
import res_company
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
acogdev/ansible
|
refs/heads/devel
|
lib/ansible/executor/process/__init__.py
|
7690
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
jkugler/ansible
|
refs/heads/devel
|
test/units/playbook/__init__.py
|
7690
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
sagarduwal/programming
|
refs/heads/master
|
permutation_combination/permutation/permutation.py
|
3
|
def npr(n, r):
if r > n or n < 0 or r < 0:
return -1
ans = 1
for i in range(n, n - r, -1):
ans *= i
return ans
def main():
permutation = npr(15, 5)
if permutation > 0:
print(permutation)
else:
print('Invalid Input')
if __name__ == '__main__':
main()
|
jlaura/pysal
|
refs/heads/master
|
pysal/network/network.py
|
5
|
from collections import defaultdict, OrderedDict
import math
import os
import cPickle
import copy
import numpy as np
import pysal as ps
from pysal.weights.util import get_ids
from analysis import NetworkG, NetworkK, NetworkF
import util
__all__ = ["Network", "PointPattern", "NetworkG", "NetworkK", "NetworkF"]
class Network:
"""
Spatially constrained network representation and analytical functionality.
Parameters
-----------
in_shp: str
The input shapefile. This must be in .shp format.
node_sig: int
Round the x and y coordinates of all nodes to node_sig significant
digits (combined significant digits on the left and right
of the decimal place)
-- Default is 11
-- Set to None for no rounding
unique_segs: bool
If True (default), keep only unique segments (i.e., prune out any
duplicated segments).
If False keep all segments.
Attributes
----------
in_shp: str
The input shapefile. This must be in .shp format.
adjacencylist: list
List of lists storing node adjacency.
nodes: dict
Keys are tuples of node coords and values are the node ID.
edge_lengths: dict
Keys are tuples of sorted node IDs representing an edge and values are
the length.
pointpatterns: dict
Keys are a string name of the pattern and values are point pattern
class instances.
node_coords: dict
Keys are the node ID and values are the (x,y) coordinates inverse
to nodes.
edges: list
List of edges, where each edge is a sorted tuple of node IDs.
node_list: list
List of node IDs.
alldistances: dict
Keys are the node IDs.
Values are tuples with two elements:
1. A list of the shortest path distances
2. A dict with the key being the id of the destination node and
the value being a list of the shortest path.
Examples
--------
Instantiate an instance of a network.
>>> ntw = ps.Network(ps.examples.get_path('streets.shp'))
Snap point observations to the network with attribute information.
>>> ntw.snapobservations(ps.examples.get_path('crimes.shp'), 'crimes', attribute=True)
And without attribute information.
>>> ntw.snapobservations(ps.examples.get_path('schools.shp'), 'schools', attribute=False)
"""
def __init__(self, in_shp=None, node_sig=11, unique_segs=True):
if in_shp:
self.in_shp = in_shp
self.node_sig = node_sig
self.unique_segs = unique_segs
self.adjacencylist = defaultdict(list)
self.nodes = {}
self.edge_lengths = {}
self.edges = []
self.pointpatterns = {}
self._extractnetwork()
self.node_coords = dict((value, key) for key, value in self.nodes.iteritems())
# This is a spatial representation of the network.
self.edges = sorted(self.edges)
# Extract the graph.
self.extractgraph()
self.node_list = sorted(self.nodes.values())
def _round_sig(self, v):
"""
Used internally to round the vertex to a set number of significant digits. If sig
is set to 4, then the following are some possible results for a coordinate:
0.0xxxx, 0.xxxx, x.xxx, xx.xx, xxx.x, xxxx.0, xxxx0.0
"""
sig = self.node_sig
if sig is None:
return v
out_v = [val if 0 \
else round(val, -int(math.floor(math.log10(math.fabs(val)))) +\
(sig-1)) \
for val in v]
return tuple(out_v)
def _extractnetwork(self):
"""
Used internally, to extract a network from a polyline shapefile.
"""
nodecount = 0
shps = ps.open(self.in_shp)
for shp in shps:
vertices = shp.vertices
for i, v in enumerate(vertices[:-1]):
v = self._round_sig(v)
try:
vid = self.nodes[v]
except:
self.nodes[v] = vid = nodecount
nodecount += 1
v2 = self._round_sig(vertices[i+1])
try:
nvid = self.nodes[v2]
except:
self.nodes[v2] = nvid = nodecount
nodecount += 1
self.adjacencylist[vid].append(nvid)
self.adjacencylist[nvid].append(vid)
# Sort the edges so that mono-directional keys can be stored.
edgenodes = sorted([vid, nvid])
edge = tuple(edgenodes)
self.edges.append(edge)
length = util.compute_length(v, vertices[i+1])
self.edge_lengths[edge] = length
if self.unique_segs == True:
# Remove duplicate edges and duplicate adjacent nodes.
self.edges = list(set(self.edges))
for k, v in self.adjacencylist.iteritems():
self.adjacencylist[k] = list(set(v))
def extractgraph(self):
"""
Using the existing network representation, create a graph based representation by
removing all nodes with a neighbor incidence of two. That is, we assume these
nodes are bridges between nodes with higher incidence.
"""
self.graphedges = []
self.edge_to_graph = {}
self.graph_lengths = {}
# Find all nodes with cardinality 2.
segment_nodes = []
for k, v in self.adjacencylist.iteritems():
#len(v) == 1 #cul-de-sac
#len(v) == 2 #bridge segment
#len(v) > 2 #intersection
if len(v) == 2:
segment_nodes.append(k)
# Start with a copy of the spatial representation and iteratively remove edges
# deemed to be segments.
self.graphedges = copy.deepcopy(self.edges)
self.graph_lengths = copy.deepcopy(self.edge_lengths)
# Mapping all the edges contained within a single graph represented edge.
self.graph_to_edges = {}
bridges = []
for s in segment_nodes:
bridge = [s]
neighbors = self._yieldneighbor(s, segment_nodes, bridge)
while neighbors:
cnode = neighbors.pop()
segment_nodes.remove(cnode)
bridge.append(cnode)
newneighbors = self._yieldneighbor(cnode, segment_nodes, bridge)
neighbors += newneighbors
bridges.append(bridge)
for bridge in bridges:
if len(bridge) == 1:
n = self.adjacencylist[bridge[0]]
newedge = tuple(sorted([n[0], n[1]]))
# Identify the edges to be removed.
e1 = tuple(sorted([bridge[0], n[0]]))
e2 = tuple(sorted([bridge[0], n[1]]))
# Remove them from the graph.
self.graphedges.remove(e1)
self.graphedges.remove(e2)
# Remove from the edge lengths.
length_e1 = self.edge_lengths[e1]
length_e2 = self.edge_lengths[e2]
self.graph_lengths.pop(e1, None)
self.graph_lengths.pop(e2, None)
self.graph_lengths[newedge] = length_e1 + length_e2
# Update the pointers.
self.graph_to_edges[e1] = newedge
self.graph_to_edges[e2] = newedge
else:
cumulative_length = 0
startend = {}
redundant = set([])
for b in bridge:
for n in self.adjacencylist[b]:
if n not in bridge:
startend[b] = n
else:
redundant.add(tuple(sorted([b,n])))
newedge = tuple(sorted(startend.values()))
for k, v in startend.iteritems():
redundant.add(tuple(sorted([k,v])))
for r in redundant:
self.graphedges.remove(r)
cumulative_length += self.edge_lengths[r]
self.graph_lengths.pop(r, None)
self.graph_to_edges[r] = newedge
self.graph_lengths[newedge] = cumulative_length
self.graphedges.append(newedge)
self.graphedges = sorted(self.graphedges)
def _yieldneighbor(self, node, segment_nodes, bridge):
"""
Used internally, this method traverses a bridge segement to find the source and
destination nodes.
"""
n = []
for i in self.adjacencylist[node]:
if i in segment_nodes and i not in bridge:
n.append(i)
return n
def contiguityweights(self, graph=True, weightings=None):
"""
Create a contiguity based W object
Parameters
----------
graph: bool
{True, False} controls whether the W is generated using the spatial
representation or the graph representation.
weightings: dict
Dict of lists of weightings for each edge.
Returns
-------
W: object
A PySAL W Object representing the binary adjacency of the network.
Examples
--------
>>> ntw = ps.Network(ps.examples.get_path('streets.shp'))
>>> w = ntw.contiguityweights(graph=False)
>>> ntw.snapobservations(ps.examples.get_path('crimes.shp'), 'crimes', attribute=True)
>>> counts = ntw.count_per_edge(ntw.pointpatterns['crimes'].obs_to_edge, graph=False)
Using the W object, access to ESDA functionality is provided. First,
a vector of attributes is created for all edges with observations.
>>> w = ntw.contiguityweights(graph=False)
>>> edges = w.neighbors.keys()
>>> y = np.zeros(len(edges))
>>> for i, e in enumerate(edges):
... if e in counts.keys():
... y[i] = counts[e]
Next, a standard call ot Moran is made and the result placed into `res`
>>> res = ps.esda.moran.Moran(y, w, permutations=99)
"""
neighbors = {}
neighbors = OrderedDict()
if graph:
edges = self.graphedges
else:
edges = self.edges
if weightings:
weights = {}
else:
weights = None
for key in edges:
neighbors[key] = []
if weightings:
weights[key] = []
for neigh in edges:
if key == neigh:
continue
if key[0] == neigh[0] or key[0] == neigh[1] or key[1] == neigh[0]\
or key[1] == neigh[1]:
neighbors[key].append(neigh)
if weightings:
weights[key].append(weightings[neigh])
# TODO: Add a break condition - everything is sorted, so we know when we
# have stepped beyond a possible neighbor.
#if key[1] > neigh[1]: #NOT THIS
#break
return ps.weights.W(neighbors, weights=weights)
def distancebandweights(self, threshold):
"""
Create distance based weights
Parameters
----------
threshold: float
Distance threshold value.
"""
try:
hasattr(self.alldistances)
except:
self.node_distance_matrix()
neighbor_query = np.where(self.distancematrix < threshold)
neighbors = defaultdict(list)
for i, n in enumerate(neighbor_query[0]):
neigh = neighbor_query[1][i]
if n != neigh:
neighbors[n].append(neighbor_query[1][i])
return ps.weights.W(neighbors)
def snapobservations(self, shapefile, name, idvariable=None, attribute=None):
"""
Snap a point pattern shapefile to this network object. The point pattern is
stored in the network.pointpattern['key'] attribute of the network object.
Parameters
----------
shapefile: str
The path to the shapefile.
name: str
Name to be assigned to the point dataset.
idvariable: str
Column name to be used as ID variable.
attribute: bool
Defines whether attributes should be extracted.
True for attribute extraction.
False for no attribute extraaction.
Returns
-------
"""
self.pointpatterns[name] = PointPattern(shapefile, idvariable=idvariable, attribute=attribute)
self._snap_to_edge(self.pointpatterns[name])
def compute_distance_to_nodes(self, x, y, edge):
"""
Given an observation on a network edge, return the distance to the two nodes that
bound that end.
Parameters
----------
x: float
x-coordinate of the snapped point.
y: float
y-coordiante of the snapped point.
edge: tuple
(node0, node1) representation of the network edge.
Returns
-------
d1: float
The distance to node0.
- always the node with the lesser id
d2: float
The distance to node1.
- always the node with the greater id
"""
d1 = util.compute_length((x,y), self.node_coords[edge[0]])
d2 = util.compute_length((x,y), self.node_coords[edge[1]])
return d1, d2
def _snap_to_edge(self, pointpattern):
"""
Used internally to snap point observations to network edges.
Parameters
-----------
pointpattern: object
PySAL Point Pattern Object
Returns
-------
obs_to_edge: dict
Dict with edges as keys and lists of points as values.
edge_to_obs: dict
Dict with point ids as keys and edge tuples as values.
dist_to_node: dict
Dict with point ids as keys and values as dicts with keys for
node ids and values as distances from point to node.
"""
obs_to_edge = {}
dist_to_node = {}
pointpattern.snapped_coordinates = {}
segments = []
s2e = {}
for edge in self.edges:
head = self.node_coords[edge[0]]
tail = self.node_coords[edge[1]]
segments.append(ps.cg.Chain([head,tail]))
s2e[(head,tail)] = edge
points = {}
p2id = {}
for pointIdx, point in pointpattern.points.iteritems():
points[pointIdx] = point['coordinates']
snapped = util.snapPointsOnSegments(points, segments)
for pointIdx, snapInfo in snapped.iteritems():
x,y = snapInfo[1].tolist()
edge = s2e[tuple(snapInfo[0])]
if edge not in obs_to_edge:
obs_to_edge[edge] = {}
obs_to_edge[edge][pointIdx] = (x,y)
pointpattern.snapped_coordinates[pointIdx] = (x,y)
d1,d2 = self.compute_distance_to_nodes(x, y, edge)
dist_to_node[pointIdx] = {edge[0]:d1, edge[1]:d2}
obs_to_node = defaultdict(list)
for k, v in obs_to_edge.iteritems():
keys = v.keys()
obs_to_node[k[0]] = keys
obs_to_node[k[1]] = keys
pointpattern.obs_to_edge = obs_to_edge
pointpattern.dist_to_node = dist_to_node
pointpattern.obs_to_node = obs_to_node
def count_per_edge(self, obs_on_network, graph=True):
"""
Compute the counts per edge.
Parameters
----------
obs_on_network: dict
Dict of observations on the network.
{(edge):{pt_id:(coords)}} or {edge:[(coord),(coord),(coord)]}
Returns
-------
counts: dict
{(edge):count}
Example
-------
Note that this passes the obs_to_edge attribute of a point pattern snapped to the
network.
>>> ntw = ps.Network(ps.examples.get_path('streets.shp'))
>>> ntw.snapobservations(ps.examples.get_path('crimes.shp'), 'crimes', attribute=True)
>>> counts = ntw.count_per_edge(ntw.pointpatterns['crimes'].obs_to_edge,graph=False)
>>> s = sum([v for v in counts.itervalues()])
>>> s
287
"""
counts = {}
if graph:
for key, observations in obs_on_network.iteritems():
cnt = len(observations)
if key in self.graph_to_edges.keys():
key = self.graph_to_edges[key]
try:
counts[key] += cnt
except:
counts[key] = cnt
else:
for key in obs_on_network.iterkeys():
counts[key] = len(obs_on_network[key])
return counts
def _newpoint_coords(self, edge, distance):
"""
Used internally to compute new point coordinates during snapping.
"""
x1 = self.node_coords[edge[0]][0]
y1 = self.node_coords[edge[0]][1]
x2 = self.node_coords[edge[1]][0]
y2 = self.node_coords[edge[1]][1]
if x1 == x2: # Vertical line case
x0 = x1
if y1 < y2:
y0 = y1 + distance
elif y1 > y2:
y0 = y2 + distance
else: # Zero length edge
y0 = y1
return x0, y0
m = (y2 - y1) / (x2 - x1)
if x1 > x2:
x0 = x1 - distance / math.sqrt(1 + m**2)
elif x1 < x2:
x0 = x1 + distance / math.sqrt(1 + m**2)
y0 = m * (x0 - x1) + y1
return x0, y0
def simulate_observations(self, count, distribution='uniform'):
"""
Generate a simulated point pattern on the network.
Parameters
----------
count: int
The number of points to create or mean of the distribution if not
'uniform'.
distribution: str
{'uniform', 'poisson'} distribution of random points.
Returns
-------
random_pts: dict
Keys are the edge tuple.
Value are a list of new point coordinates.
Example
-------
>>> ntw = ps.Network(ps.examples.get_path('streets.shp'))
>>> ntw.snapobservations(ps.examples.get_path('crimes.shp'), 'crimes', attribute=True)
>>> npts = ntw.pointpatterns['crimes'].npoints
>>> sim = ntw.simulate_observations(npts)
>>> isinstance(sim, ps.network.network.SimulatedPointPattern)
True
"""
simpts = SimulatedPointPattern()
# Cumulative Network Length.
edges = []
lengths = np.zeros(len(self.edge_lengths))
for i, key in enumerate(self.edge_lengths.iterkeys()):
edges.append(key)
lengths[i] = self.edge_lengths[key]
stops = np.cumsum(lengths)
totallength = stops[-1]
if distribution is 'uniform':
nrandompts = np.random.uniform(0, totallength, size=(count,))
elif distribution is 'poisson':
nrandompts = np.random.uniform(0, totallength, size=(np.random.poisson(count),))
for i, r in enumerate(nrandompts):
idx = np.where(r < stops)[0][0]
assignment_edge = edges[idx]
distance_from_start = stops[idx] - r
# Populate the coordinates dict.
x0, y0 = self._newpoint_coords(assignment_edge, distance_from_start)
simpts.snapped_coordinates[i] = (x0, y0)
simpts.obs_to_node[assignment_edge[0]].append(i)
simpts.obs_to_node[assignment_edge[1]].append(i)
# Populate the distance to node.
simpts.dist_to_node[i] = {assignment_edge[0] : distance_from_start,
assignment_edge[1] : self.edge_lengths[edges[idx]] - distance_from_start}
simpts.points = simpts.snapped_coordinates
simpts.npoints = len(simpts.points)
return simpts
def enum_links_node(self, v0):
"""
Returns the edges (links) around node
Parameters
-----------
v0: int
Node id
Returns
-------
links: list
List of tuple edges adjacent to the node.
"""
links = []
neighbornodes = self.adjacencylist[v0]
for n in neighbornodes:
links.append(tuple(sorted([n, v0])))
return links
def node_distance_matrix(self):
self.alldistances = {}
nnodes = len(self.node_list)
self.distancematrix = np.empty((nnodes, nnodes))
for node in self.node_list:
distance, pred = util.dijkstra(self, self.edge_lengths, node, n=float('inf'))
pred = np.array(pred)
#tree = util.generatetree(pred) <---- something to look at in the future
tree = None
self.alldistances[node] = (distance, tree)
self.distancematrix[node] = distance
def allneighbordistances(self, sourcepattern, destpattern=None, fill_diagonal=None):
"""
Compute either all distances between i and j in a single point pattern or all
distances between each i from a source pattern and all j from a destination pattern.
Parameters
----------
sourcepattern: str
The key of a point pattern snapped to the network.
destpattern: str
(Optional) The key of a point pattern snapped to the network.
fill_diagonal: float, int
(Optional) Fill the diagonal of the cost matrix.
Default in None and will populate the diagonal with numpy.nan
Do not declare a destpattern for a custom fill_diagonal.
Returns
-------
nearest: array (n,n)
An array of shape (n,n) storing distances between all points.
"""
if not hasattr(self,'alldistances'):
self.node_distance_matrix()
# Source setup
src_indices = sourcepattern.points.keys()
nsource_pts = len(src_indices)
src_dist_to_node = sourcepattern.dist_to_node
src_nodes = {}
for s in src_indices:
e1, e2 = src_dist_to_node[s].keys()
src_nodes[s] = (e1, e2)
# Destination setup
symmetric = False
if destpattern is None:
symmetric = True
destpattern = sourcepattern
dest_indices = destpattern.points.keys()
ndest_pts = len(dest_indices)
dest_dist_to_node = destpattern.dist_to_node
dest_searchpts = copy.deepcopy(dest_indices)
dest_nodes = {}
for s in dest_indices:
e1, e2 = dest_dist_to_node[s].keys()
dest_nodes[s] = (e1, e2)
# Output setup
nearest = np.empty((nsource_pts, ndest_pts))
nearest[:] = np.inf
for p1 in src_indices:
# Get the source nodes and dist to source nodes.
source1, source2 = src_nodes[p1]
set1 = set(src_nodes[p1])
# Distance from node1 to p, distance from node2 to p.
sdist1, sdist2 = src_dist_to_node[p1].values()
if symmetric:
# Only compute the upper triangle if symmetric.
dest_searchpts.remove(p1)
for p2 in dest_searchpts:
dest1, dest2 = dest_nodes[p2]
set2 = set(dest_nodes[p2])
if set1 == set2: # same edge
x1,y1 = sourcepattern.snapped_coordinates[p1]
x2,y2 = destpattern.snapped_coordinates[p2]
xd = x1-x2
yd = y1-y2
nearest[p1,p2] = np.sqrt(xd*xd + yd*yd)
else:
ddist1, ddist2 = dest_dist_to_node[p2].values()
d11 = self.alldistances[source1][0][dest1]
d21 = self.alldistances[source2][0][dest1]
d12 = self.alldistances[source1][0][dest2]
d22 = self.alldistances[source2][0][dest2]
# Find the shortest distance from the path passing through each of the
# two origin nodes to the first destination node.
sd_1 = d11 + sdist1
sd_21 = d21 + sdist2
if sd_1 > sd_21:
sd_1 = sd_21
# Now add the point to node one distance on the destination edge.
len_1 = sd_1 + ddist1
# Repeat the prior but now for the paths entering at the second node
# of the second edge.
sd_2 = d12 + sdist1
sd_22 = d22 + sdist2
b = 0
if sd_2 > sd_22:
sd_2 = sd_22
b = 1
len_2 = sd_2 + ddist2
# Now find the shortest distance path between point 1 on edge 1 and
# point 2 on edge 2, and assign.
sp_12 = len_1
if len_1 > len_2:
sp_12 = len_2
nearest[p1, p2] = sp_12
if symmetric:
# Mirror the upper and lower triangle when symmetric.
nearest[p2,p1] = nearest[p1,p2]
# Populate the main diagonal when symmetric.
if symmetric:
if fill_diagonal == None:
np.fill_diagonal(nearest, np.nan)
else:
np.fill_diagonal(nearest, fill_diagonal)
return nearest
def nearestneighbordistances(self, sourcepattern, destpattern=None):
"""
Compute the interpattern nearest neighbor distances or the intrapattern
nearest neighbor distances between a source pattern and a destination pattern.
Parameters
----------
sourcepattern: str
The key of a point pattern snapped to the network.
destpattern: str
(Optional) The key of a point pattern snapped to the network.
Returns
-------
nearest: ndarray (n,2)
With column[:,0] containing the id of the nearest neighbor and
column [:,1] containing the distance.
"""
if not sourcepattern in self.pointpatterns.keys():
raise KeyError("Available point patterns are {}".format(self.pointpatterns.keys()))
if not hasattr(self,'alldistances'):
self.node_distance_matrix()
pt_indices = self.pointpatterns[sourcepattern].points.keys()
dist_to_node = self.pointpatterns[sourcepattern].dist_to_node
nearest = np.zeros((len(pt_indices), 2), dtype=np.float32)
nearest[:,1] = np.inf
if destpattern == None:
destpattern = sourcepattern
searchpts = copy.deepcopy(pt_indices)
searchnodes = {}
for s in searchpts:
e1, e2 = dist_to_node[s].keys()
searchnodes[s] = (e1, e2)
for p1 in pt_indices:
# Get the source nodes and dist to source nodes.
source1, source2 = searchnodes[p1]
sdist1, sdist2 = dist_to_node[p1].values()
searchpts.remove(p1)
for p2 in searchpts:
dest1, dest2 = searchnodes[p2]
ddist1, ddist2 = dist_to_node[p2].values()
source1_to_dest1 = sdist1 + self.alldistances[source1][0][dest1] + ddist1
source1_to_dest2 = sdist1 + self.alldistances[source1][0][dest2] + ddist2
source2_to_dest1 = sdist2 + self.alldistances[source2][0][dest1] + ddist1
source2_to_dest2 = sdist2 + self.alldistances[source2][0][dest2] + ddist2
if source1_to_dest1 < nearest[p1, 1]:
nearest[p1, 0] = p2
nearest[p1, 1] = source1_to_dest1
if source1_to_dest1 < nearest[p2, 1]:
nearest[p2, 0] = p1
nearest[p2, 1] = source1_to_dest1
if source1_to_dest2 < nearest[p1, 1]:
nearest[p1, 0] = p2
nearest[p1, 1] = source1_to_dest2
if source1_to_dest1 < nearest[p2, 1]:
nearest[p2, 0] = p1
nearest[p2, 1] = source1_to_dest2
if source2_to_dest1 < nearest[p1, 1]:
nearest[p1, 0] = p2
nearest[p1, 1] = source2_to_dest1
if source2_to_dest1 < nearest[p2, 1]:
nearest[p2, 0] = p1
nearest[p2, 1] = source2_to_dest1
if source2_to_dest2 < nearest[p1, 1]:
nearest[p1, 0] = p2
nearest[p1, 1] = source2_to_dest2
if source2_to_dest2 < nearest[p2, 1]:
nearest[p2, 0] = p1
nearest[p2, 1] = source2_to_dest2
return nearest
def NetworkF(self, pointpattern, nsteps=10, permutations=99,
threshold=0.2, distribution='uniform',
lowerbound=None, upperbound=None):
"""
Computes a network constrained F-Function
Parameters
----------
pointpattern: object
A PySAL point pattern object.
nsteps: int
The number of steps at which the count of the nearest neighbors
is computed.
permutations: int
The number of permutations to perform (default 99).
threshold: float
The level at which significance is computed.
-- 0.5 would be 97.5% and 2.5%
distribution: str
The distribution from which random points are sampled:
-- uniform or poisson
lowerbound: float
The lower bound at which the F-function is computed. (Default 0)
upperbound: float
The upper bound at which the F-function is computed.
Defaults to the maximum observed nearest neighbor distance.
Returns
-------
NetworkF: object
A network F class instance.
"""
return NetworkF(self, pointpattern, nsteps=nsteps,
permutations=permutations,threshold=threshold,
distribution=distribution,lowerbound=lowerbound,
upperbound=upperbound)
def NetworkG(self, pointpattern, nsteps=10, permutations=99,
threshold=0.5, distribution='uniform',
lowerbound=None, upperbound=None):
"""
Computes a network constrained G-Function
Parameters
----------
pointpattern: object
A PySAL point pattern object.
nsteps: int
The number of steps at which the count of the nearest neighbors
is computed.
permutations: int
The number of permutations to perform (default 99).
threshold: float
The level at which significance is computed.
-- 0.5 would be 97.5% and 2.5%
distribution: str
The distribution from which random points are sampled:
-- uniform or poisson
lowerbound: float
The lower bound at which the G-function is computed. (Default 0)
upperbound: float
The upper bound at which the G-function is computed.
Defaults to the maximum observed nearest neighbor distance.
Returns
-------
NetworkG: object
A network G class instance.
"""
return NetworkG(self, pointpattern, nsteps=nsteps,
permutations=permutations,threshold=threshold,
distribution=distribution,lowerbound=lowerbound,
upperbound=upperbound)
def NetworkK(self, pointpattern, nsteps=10, permutations=99,
threshold=0.5, distribution='uniform',
lowerbound=None, upperbound=None):
"""
Computes a network constrained K-Function
Parameters
----------
pointpattern: object
A PySAL point pattern object.
nsteps: int
The number of steps at which the count of the nearest neighbors
is computed.
permutations: int
The number of permutations to perform (default 99).
threshold: float
The level at which significance is computed.
-- 0.5 would be 97.5% and 2.5%
distribution: str
The distribution from which random points are sampled:
-- uniform or poisson
lowerbound: float
The lower bound at which the K-function is computed. (Default 0)
upperbound: float
The upper bound at which the K-function is computed.
Defaults to the maximum observed nearest neighbor distance.
Returns
-------
NetworkK: object
A network K class instance.
"""
return NetworkK(self, pointpattern, nsteps=nsteps,
permutations=permutations,threshold=threshold,
distribution=distribution,lowerbound=lowerbound,
upperbound=upperbound)
def segment_edges(self, distance):
"""
Segment all of the edges in the network at either a fixed distance or a fixed
number of segments.
Parameters
-----------
distance: float
The distance at which edges are split.
Returns
-------
sn: object
PySAL Network Object.
Example
-------
>>> ntw = ps.Network(ps.examples.get_path('streets.shp'))
>>> n200 = ntw.segment_edges(200.0)
>>> len(n200.edges)
688
"""
sn = Network()
sn.adjacencylist = copy.deepcopy(self.adjacencylist)
sn.edge_lengths = copy.deepcopy(self.edge_lengths)
sn.edges = set(copy.deepcopy(self.edges))
sn.node_coords = copy.deepcopy(self.node_coords)
sn.node_list = copy.deepcopy(self.node_list)
sn.nodes = copy.deepcopy(self.nodes)
sn.pointpatterns = copy.deepcopy(self.pointpatterns)
sn.in_shp = self.in_shp
current_node_id = max(self.nodes.values())
newedges = set()
removeedges = set()
for e in sn.edges:
length = sn.edge_lengths[e]
interval = distance
totallength = 0
currentstart = startnode = e[0]
endnode = e[1]
# If the edge will be segmented remove the current edge from the adjacency list.
if interval < length:
sn.adjacencylist[e[0]].remove(e[1])
sn.adjacencylist[e[1]].remove(e[0])
sn.edge_lengths.pop(e, None)
removeedges.add(e)
else:
continue
while totallength < length:
currentstop = current_node_id
if totallength + interval > length:
currentstop = endnode
interval = length - totallength
totallength = length
else:
current_node_id += 1
currentstop = current_node_id
totallength += interval
# Compute the new node coordinate.
newx, newy = self._newpoint_coords(e, totallength)
# Update node_list.
if currentstop not in sn.node_list:
sn.node_list.append(currentstop)
# Update nodes and node_coords.
sn.node_coords[currentstop] = newx, newy
sn.nodes[(newx, newy)] = currentstop
# Update the adjacency list.
sn.adjacencylist[currentstart].append(currentstop)
sn.adjacencylist[currentstop].append(currentstart)
# Add the new edge to the edge dict.
# Iterating over this so we need to add after iterating.
newedges.add(tuple(sorted([currentstart, currentstop])))
# Modify edge_lengths.
sn.edge_lengths[tuple(sorted([currentstart, currentstop]))] = interval
# Increment the start to the stop.
currentstart = currentstop
sn.edges.update(newedges)
sn.edges.difference_update(removeedges)
sn.edges = list(sn.edges)
# Update the point pattern snapping.
for instance in sn.pointpatterns.itervalues():
sn._snap_to_edge(instance)
return sn
def savenetwork(self, filename):
"""
Save a network to disk as a binary file
Parameters
----------
filename: str
The filename where the network should be saved. This should be a full
path or the file is saved whereever this method is called from.
Example
--------
>>> ntw = ps.Network(ps.examples.get_path('streets.shp'))
>>> ntw.savenetwork('mynetwork.pkl')
"""
with open(filename, 'wb') as networkout:
cPickle.dump(self, networkout, protocol=2)
@staticmethod
def loadnetwork(filename):
with open(filename, 'rb') as networkin:
self = cPickle.load(networkin)
return self
class PointPattern():
"""
A stub point pattern class used to store a point pattern. This class is monkey patched
with network specific attributes when the points are snapped to a network.
In the future this class may be replaced with a generic point
pattern class.
Parameters
----------
shapefile: str
The input shapefile.
idvariable: str
Field in the shapefile to use as an id variable.
attribute: bool
{False, True}
A flag to indicate whether all attributes are tagged to this class.
Attributes
----------
points: dict
Keys are the point ids.
Values are the coordinates.
npoints: int
The number of points.
"""
def __init__(self, shapefile, idvariable=None, attribute=False):
self.points = {}
self.npoints = 0
if idvariable:
ids = get_ids(shapefile, idvariable)
else:
ids = None
pts = ps.open(shapefile)
# Get attributes if requested
if attribute == True:
dbname = os.path.splitext(shapefile)[0] + '.dbf'
db = ps.open(dbname)
else:
db = None
for i, pt in enumerate(pts):
if ids and db:
self.points[ids[i]] = {'coordinates':pt, 'properties':db[i]}
elif ids and not db:
self.points[ids[i]] = {'coordinates':pt, 'properties':None}
elif not ids and db:
self.points[i] = {'coordinates':pt, 'properties':db[i]}
else:
self.points[i] = {'coordinates':pt, 'properties':None}
pts.close()
if db:
db.close()
self.npoints = len(self.points.keys())
class SimulatedPointPattern():
"""
Struct style class to mirror the Point Pattern Class.
If the PointPattern class has methods, it might make sense to
make this a child of that class.
This class is not intended to be used by the external user.
"""
def __init__(self):
self.npoints = 0
self.obs_to_edge = {}
self.obs_to_node = defaultdict(list)
self.dist_to_node = {}
self.snapped_coordinates = {}
class SortedEdges(OrderedDict):
def next_key(self, key):
next = self._OrderedDict__map[key][1]
if next is self._OrderedDict__root:
raise ValueError("{!r} is the last key.".format(key))
return next[2]
def first_key(self):
for key in self: return key
raise ValueError("No sorted edges remain.")
|
mitsuhiko/jinja2
|
refs/heads/master
|
tests/test_utils.py
|
3
|
import pickle
import random
from collections import deque
from copy import copy as shallow_copy
import pytest
from markupsafe import Markup
from jinja2.utils import consume
from jinja2.utils import generate_lorem_ipsum
from jinja2.utils import LRUCache
from jinja2.utils import missing
from jinja2.utils import object_type_repr
from jinja2.utils import select_autoescape
from jinja2.utils import urlize
class TestLRUCache:
def test_simple(self):
d = LRUCache(3)
d["a"] = 1
d["b"] = 2
d["c"] = 3
d["a"]
d["d"] = 4
assert len(d) == 3
assert "a" in d and "c" in d and "d" in d and "b" not in d
def test_itervalues(self):
cache = LRUCache(3)
cache["b"] = 1
cache["a"] = 2
values = [v for v in cache.values()]
assert len(values) == 2
assert 1 in values
assert 2 in values
def test_itervalues_empty(self):
cache = LRUCache(2)
values = [v for v in cache.values()]
assert len(values) == 0
def test_pickleable(self):
cache = LRUCache(2)
cache["foo"] = 42
cache["bar"] = 23
cache["foo"]
for protocol in range(3):
copy = pickle.loads(pickle.dumps(cache, protocol))
assert copy.capacity == cache.capacity
assert copy._mapping == cache._mapping
assert copy._queue == cache._queue
@pytest.mark.parametrize("copy_func", [LRUCache.copy, shallow_copy])
def test_copy(self, copy_func):
cache = LRUCache(2)
cache["a"] = 1
cache["b"] = 2
copy = copy_func(cache)
assert copy._queue == cache._queue
copy["c"] = 3
assert copy._queue != cache._queue
assert "a" not in copy and "b" in copy and "c" in copy
def test_clear(self):
d = LRUCache(3)
d["a"] = 1
d["b"] = 2
d["c"] = 3
d.clear()
assert d.__getstate__() == {"capacity": 3, "_mapping": {}, "_queue": deque([])}
def test_repr(self):
d = LRUCache(3)
d["a"] = 1
d["b"] = 2
d["c"] = 3
# Sort the strings - mapping is unordered
assert sorted(repr(d)) == sorted("<LRUCache {'a': 1, 'b': 2, 'c': 3}>")
def test_items(self):
"""Test various items, keys, values and iterators of LRUCache."""
d = LRUCache(3)
d["a"] = 1
d["b"] = 2
d["c"] = 3
assert d.items() == [("c", 3), ("b", 2), ("a", 1)]
assert d.keys() == ["c", "b", "a"]
assert d.values() == [3, 2, 1]
assert list(reversed(d)) == ["a", "b", "c"]
# Change the cache a little
d["b"]
d["a"] = 4
assert d.items() == [("a", 4), ("b", 2), ("c", 3)]
assert d.keys() == ["a", "b", "c"]
assert d.values() == [4, 2, 3]
assert list(reversed(d)) == ["c", "b", "a"]
def test_setdefault(self):
d = LRUCache(3)
assert len(d) == 0
assert d.setdefault("a") is None
assert d.setdefault("a", 1) is None
assert len(d) == 1
assert d.setdefault("b", 2) == 2
assert len(d) == 2
class TestHelpers:
def test_object_type_repr(self):
class X:
pass
assert object_type_repr(42) == "int object"
assert object_type_repr([]) == "list object"
assert object_type_repr(X()) == "test_utils.X object"
assert object_type_repr(None) == "None"
assert object_type_repr(Ellipsis) == "Ellipsis"
def test_autoescape_select(self):
func = select_autoescape(
enabled_extensions=("html", ".htm"),
disabled_extensions=("txt",),
default_for_string="STRING",
default="NONE",
)
assert func(None) == "STRING"
assert func("unknown.foo") == "NONE"
assert func("foo.html")
assert func("foo.htm")
assert not func("foo.txt")
assert func("FOO.HTML")
assert not func("FOO.TXT")
class TestEscapeUrlizeTarget:
def test_escape_urlize_target(self):
url = "http://example.org"
target = "<script>"
assert urlize(url, target=target) == (
'<a href="http://example.org"'
' target="<script>">'
"http://example.org</a>"
)
class TestLoremIpsum:
def test_lorem_ipsum_markup(self):
"""Test that output of lorem_ipsum is Markup by default."""
assert isinstance(generate_lorem_ipsum(), Markup)
def test_lorem_ipsum_html(self):
"""Test that output of lorem_ipsum is a string_type when not html."""
assert isinstance(generate_lorem_ipsum(html=False), str)
def test_lorem_ipsum_n(self):
"""Test that the n (number of lines) works as expected."""
assert generate_lorem_ipsum(n=0, html=False) == ""
for n in range(1, 50):
assert generate_lorem_ipsum(n=n, html=False).count("\n") == (n - 1) * 2
def test_lorem_ipsum_min(self):
"""Test that at least min words are in the output of each line"""
for _ in range(5):
m = random.randrange(20, 99)
for _ in range(10):
assert generate_lorem_ipsum(n=1, min=m, html=False).count(" ") >= m - 1
def test_lorem_ipsum_max(self):
"""Test that at least max words are in the output of each line"""
for _ in range(5):
m = random.randrange(21, 100)
for _ in range(10):
assert generate_lorem_ipsum(n=1, max=m, html=False).count(" ") < m - 1
def test_missing():
"""Test the repr of missing."""
assert repr(missing) == "missing"
def test_consume():
"""Test that consume consumes an iterator."""
x = iter([1, 2, 3, 4, 5])
consume(x)
with pytest.raises(StopIteration):
next(x)
|
Chive/cookiecutter-aldryn-addon
|
refs/heads/master
|
{{cookiecutter.repo_name}}/setup.py
|
2
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from {{cookiecutter.package_name}} import __version__
REQUIREMENTS = []
CLASSIFIERS = [
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Application Frameworks',
]
setup(
name='{{cookiecutter.app_name}}',
version=__version__,
description='{{cookiecutter.description}}',
author='{{cookiecutter.author_name}}',
author_email='{{cookiecutter.author_email}}',
url='{{cookiecutter.repo_url}}',
packages=find_packages(),
license='LICENSE.txt',
platforms=['OS Independent'],
install_requires=REQUIREMENTS,
classifiers=CLASSIFIERS,
include_package_data=True,
zip_safe=False
)
|
ombt/analytics
|
refs/heads/master
|
books/programming_in_python_3/book_examples/py31eg/make_html_skeleton.py
|
2
|
#!/usr/bin/env python3
# Copyright (c) 2008-11 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. It is provided for educational
# purposes and is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import datetime
import xml.sax.saxutils
COPYRIGHT_TEMPLATE = "Copyright (c) {0} {1}. All rights reserved."
STYLESHEET_TEMPLATE = ('<link rel="stylesheet" type="text/css" '
'media="all" href="{0}" />\n')
HTML_TEMPLATE = """<?xml version="1.0"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" \
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en">
<head>
<title>{title}</title>
<!-- {copyright} -->
<meta name="Description" content="{description}" />
<meta name="Keywords" content="{keywords}" />
<meta equiv="content-type" content="text/html; charset=utf-8" />
{stylesheet}\
</head>
<body>
</body>
</html>
"""
class CancelledError(Exception): pass
def main():
information = dict(name=None, year=datetime.date.today().year,
filename=None, title=None, description=None,
keywords=None, stylesheet=None)
while True:
try:
print("\nMake HTML Skeleton\n")
populate_information(information)
make_html_skeleton(**information)
except CancelledError:
print("Cancelled")
if (get_string("\nCreate another (y/n)?", default="y").lower()
not in {"y", "yes"}):
break
def populate_information(information):
name = get_string("Enter your name (for copyright)", "name",
information["name"])
if not name:
raise CancelledError()
year = get_integer("Enter copyright year", "year",
information["year"], 2000,
datetime.date.today().year + 1, True)
if year == 0:
raise CancelledError()
filename = get_string("Enter filename", "filename")
if not filename:
raise CancelledError()
if not filename.endswith((".htm", ".html")):
filename += ".html"
title = get_string("Enter title", "title")
if not title:
raise CancelledError()
description = get_string("Enter description (optional)",
"description")
keywords = []
while True:
keyword = get_string("Enter a keyword (optional)", "keyword")
if keyword:
keywords.append(keyword)
else:
break
stylesheet = get_string("Enter the stylesheet filename "
"(optional)", "stylesheet")
if stylesheet and not stylesheet.endswith(".css"):
stylesheet += ".css"
information.update(name=name, year=year, filename=filename,
title=title, description=description,
keywords=keywords, stylesheet=stylesheet)
def make_html_skeleton(year, name, title, description, keywords,
stylesheet, filename):
copyright = COPYRIGHT_TEMPLATE.format(year,
xml.sax.saxutils.escape(name))
title = xml.sax.saxutils.escape(title)
description = xml.sax.saxutils.escape(description)
keywords = ",".join([xml.sax.saxutils.escape(k)
for k in keywords]) if keywords else ""
stylesheet = (STYLESHEET_TEMPLATE.format(stylesheet)
if stylesheet else "")
html = HTML_TEMPLATE.format(**locals())
fh = None
try:
fh = open(filename, "w", encoding="utf8")
fh.write(html)
except EnvironmentError as err:
print("ERROR", err)
else:
print("Saved skeleton", filename)
finally:
if fh is not None:
fh.close()
def get_string(message, name="string", default=None,
minimum_length=0, maximum_length=80):
message += ": " if default is None else " [{0}]: ".format(default)
while True:
try:
line = input(message)
if not line:
if default is not None:
return default
if minimum_length == 0:
return ""
else:
raise ValueError("{0} may not be empty".format(
name))
if not (minimum_length <= len(line) <= maximum_length):
raise ValueError("{name} must have at least "
"{minimum_length} and at most "
"{maximum_length} characters".format(
**locals()))
return line
except ValueError as err:
print("ERROR", err)
def get_integer(message, name="integer", default=None, minimum=0,
maximum=100, allow_zero=True):
class RangeError(Exception): pass
message += ": " if default is None else " [{0}]: ".format(default)
while True:
try:
line = input(message)
if not line and default is not None:
return default
i = int(line)
if i == 0:
if allow_zero:
return i
else:
raise RangeError("{0} may not be 0".format(name))
if not (minimum <= i <= maximum):
raise RangeError("{name} must be between {minimum} "
"and {maximum} inclusive{0}".format(
" (or 0)" if allow_zero else "", **locals()))
return i
except RangeError as err:
print("ERROR", err)
except ValueError as err:
print("ERROR {0} must be an integer".format(name))
main()
|
bl4ckic3/ARMSCGen
|
refs/heads/master
|
shellcodes/thumb/dupsh.py
|
3
|
import dup
import sh
def generate(sock=4, cmd='/bin/sh'):
"""Duplicates sock to stdin, stdout and stderr and spawns a shell
Args:
sock(int/str/reg): sock descriptor
cmd(str): executes a cmd (default: /bin/sh)
"""
sc = dup.generate(sock)
sc += sh.generate(cmd)
return sc
|
danithaca/mxnet
|
refs/heads/master
|
example/warpctc/toy_ctc.py
|
15
|
# pylint: disable=C0111,too-many-arguments,too-many-instance-attributes,too-many-locals,redefined-outer-name,fixme
# pylint: disable=superfluous-parens, no-member, invalid-name
from __future__ import print_function
import sys
sys.path.insert(0, "../../python")
import numpy as np
import mxnet as mx
import random
from lstm import lstm_unroll
class SimpleBatch(object):
def __init__(self, data_names, data, label_names, label):
self.data = data
self.label = label
self.data_names = data_names
self.label_names = label_names
self.pad = 0
self.index = None # TODO: what is index?
@property
def provide_data(self):
return [(n, x.shape) for n, x in zip(self.data_names, self.data)]
@property
def provide_label(self):
return [(n, x.shape) for n, x in zip(self.label_names, self.label)]
def gen_feature(n):
ret = np.zeros(10)
ret[n] = 1
return ret
def gen_rand():
num = random.randint(0, 9999)
buf = str(num)
while len(buf) < 4:
buf = "0" + buf
ret = []
for i in range(80):
c = int(buf[i // 20])
ret.append(gen_feature(c))
return buf, ret
def get_label(buf):
ret = np.zeros(4)
for i in range(4):
ret[i] = 1 + int(buf[i])
return ret
class DataIter(mx.io.DataIter):
def __init__(self, count, batch_size, num_label, init_states):
super(DataIter, self).__init__()
self.batch_size = batch_size
self.count = count
self.num_label = num_label
self.init_states = init_states
self.init_state_arrays = [mx.nd.zeros(x[1]) for x in init_states]
self.provide_data = [('data', (batch_size, 80, 10))] + init_states
self.provide_label = [('label', (self.batch_size, 4))]
def __iter__(self):
init_state_names = [x[0] for x in self.init_states]
for k in range(self.count):
data = []
label = []
for i in range(self.batch_size):
num, img = gen_rand()
data.append(img)
label.append(get_label(num))
data_all = [mx.nd.array(data)] + self.init_state_arrays
label_all = [mx.nd.array(label)]
data_names = ['data'] + init_state_names
label_names = ['label']
data_batch = SimpleBatch(data_names, data_all, label_names, label_all)
yield data_batch
def reset(self):
pass
BATCH_SIZE = 32
SEQ_LENGTH = 80
def ctc_label(p):
ret = []
p1 = [0] + p
for i in range(len(p)):
c1 = p1[i]
c2 = p1[i+1]
if c2 == 0 or c2 == c1:
continue
ret.append(c2)
return ret
def Accuracy(label, pred):
global BATCH_SIZE
global SEQ_LENGTH
hit = 0.
total = 0.
for i in range(BATCH_SIZE):
l = label[i]
p = []
for k in range(SEQ_LENGTH):
p.append(np.argmax(pred[k * BATCH_SIZE + i]))
p = ctc_label(p)
if len(p) == len(l):
match = True
for k in range(len(p)):
if p[k] != int(l[k]):
match = False
break
if match:
hit += 1.0
total += 1.0
return hit / total
if __name__ == '__main__':
num_hidden = 100
num_lstm_layer = 1
num_epoch = 10
learning_rate = 0.001
momentum = 0.9
num_label = 4
contexts = [mx.context.gpu(0)]
def sym_gen(seq_len):
return lstm_unroll(num_lstm_layer, seq_len,
num_hidden=num_hidden,
num_label = num_label)
init_c = [('l%d_init_c'%l, (BATCH_SIZE, num_hidden)) for l in range(num_lstm_layer)]
init_h = [('l%d_init_h'%l, (BATCH_SIZE, num_hidden)) for l in range(num_lstm_layer)]
init_states = init_c + init_h
data_train = DataIter(100000, BATCH_SIZE, num_label, init_states)
data_val = DataIter(1000, BATCH_SIZE, num_label, init_states)
symbol = sym_gen(SEQ_LENGTH)
model = mx.model.FeedForward(ctx=contexts,
symbol=symbol,
num_epoch=num_epoch,
learning_rate=learning_rate,
momentum=momentum,
wd=0.00001,
initializer=mx.init.Xavier(factor_type="in", magnitude=2.34))
import logging
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
print('begin fit')
model.fit(X=data_train, eval_data=data_val,
eval_metric = mx.metric.np(Accuracy),
batch_end_callback=mx.callback.Speedometer(BATCH_SIZE, 50),)
model.save("ocr")
|
nikgr95/scrapy
|
refs/heads/master
|
tests/test_http_cookies.py
|
94
|
from six.moves.urllib.parse import urlparse
from unittest import TestCase
from scrapy.http import Request, Response
from scrapy.http.cookies import WrappedRequest, WrappedResponse
class WrappedRequestTest(TestCase):
def setUp(self):
self.request = Request("http://www.example.com/page.html",
headers={"Content-Type": "text/html"})
self.wrapped = WrappedRequest(self.request)
def test_get_full_url(self):
self.assertEqual(self.wrapped.get_full_url(), self.request.url)
def test_get_host(self):
self.assertEqual(self.wrapped.get_host(), urlparse(self.request.url).netloc)
def test_get_type(self):
self.assertEqual(self.wrapped.get_type(), urlparse(self.request.url).scheme)
def test_is_unverifiable(self):
self.assertFalse(self.wrapped.is_unverifiable())
self.assertFalse(self.wrapped.unverifiable)
def test_is_unverifiable2(self):
self.request.meta['is_unverifiable'] = True
self.assertTrue(self.wrapped.is_unverifiable())
self.assertTrue(self.wrapped.unverifiable)
def test_get_origin_req_host(self):
self.assertEqual(self.wrapped.get_origin_req_host(), 'www.example.com')
def test_has_header(self):
self.assertTrue(self.wrapped.has_header('content-type'))
self.assertFalse(self.wrapped.has_header('xxxxx'))
def test_get_header(self):
self.assertEqual(self.wrapped.get_header('content-type'), 'text/html')
self.assertEqual(self.wrapped.get_header('xxxxx', 'def'), 'def')
def test_header_items(self):
self.assertEqual(self.wrapped.header_items(),
[('Content-Type', ['text/html'])])
def test_add_unredirected_header(self):
self.wrapped.add_unredirected_header('hello', 'world')
self.assertEqual(self.request.headers['hello'], b'world')
class WrappedResponseTest(TestCase):
def setUp(self):
self.response = Response("http://www.example.com/page.html",
headers={"Content-TYpe": "text/html"})
self.wrapped = WrappedResponse(self.response)
def test_info(self):
self.assert_(self.wrapped.info() is self.wrapped)
def test_getheaders(self):
self.assertEqual(self.wrapped.getheaders('content-type'), ['text/html'])
def test_get_all(self):
# get_all result must be native string
self.assertEqual(self.wrapped.get_all('content-type'), ['text/html'])
|
georgid/sms-tools
|
refs/heads/georgid-withMelodia
|
lectures/9-Sound-description/plots-code/spectralFlux-onsetFunction.py
|
25
|
import numpy as np
import matplotlib.pyplot as plt
import essentia.standard as ess
M = 1024
N = 1024
H = 512
fs = 44100
spectrum = ess.Spectrum(size=N)
window = ess.Windowing(size=M, type='hann')
flux = ess.Flux()
onsetDetection = ess.OnsetDetection(method='hfc')
x = ess.MonoLoader(filename = '../../../sounds/speech-male.wav', sampleRate = fs)()
fluxes = []
onsetDetections = []
for frame in ess.FrameGenerator(x, frameSize=M, hopSize=H, startFromZero=True):
mX = spectrum(window(frame))
flux_val = flux(mX)
fluxes.append(flux_val)
onsetDetection_val = onsetDetection(mX, mX)
onsetDetections.append(onsetDetection_val)
onsetDetections = np.array(onsetDetections)
fluxes = np.array(fluxes)
plt.figure(1, figsize=(9.5, 7))
plt.subplot(2,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.title('x (speech-male.wav)')
plt.subplot(2,1,2)
frmTime = H*np.arange(fluxes.size)/float(fs)
plt.plot(frmTime, fluxes/max(fluxes), 'g', lw=1.5, label ='normalized spectral flux')
plt.plot(frmTime, onsetDetections/max(onsetDetections), 'c', lw=1.5, label = 'normalized onset detection')
plt.axis([0, x.size/float(fs), 0, 1])
plt.legend()
plt.tight_layout()
plt.savefig('spectralFlux-onsetFunction.png')
plt.show()
|
Universal-Model-Converter/UMC3.0a
|
refs/heads/master
|
data/Python/x86/Lib/site-packages/numpy/distutils/fcompiler/sun.py
|
94
|
from numpy.distutils.ccompiler import simple_version_match
from numpy.distutils.fcompiler import FCompiler
compilers = ['SunFCompiler']
class SunFCompiler(FCompiler):
compiler_type = 'sun'
description = 'Sun or Forte Fortran 95 Compiler'
# ex:
# f90: Sun WorkShop 6 update 2 Fortran 95 6.2 Patch 111690-10 2003/08/28
version_match = simple_version_match(
start=r'f9[05]: (Sun|Forte|WorkShop).*Fortran 95')
executables = {
'version_cmd' : ["<F90>", "-V"],
'compiler_f77' : ["f90"],
'compiler_fix' : ["f90", "-fixed"],
'compiler_f90' : ["f90"],
'linker_so' : ["<F90>","-Bdynamic","-G"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = '-moddir='
module_include_switch = '-M'
pic_flags = ['-xcode=pic32']
def get_flags_f77(self):
ret = ["-ftrap=%none"]
if (self.get_version() or '') >= '7':
ret.append("-f77")
else:
ret.append("-fixed")
return ret
def get_opt(self):
return ['-fast','-dalign']
def get_arch(self):
return ['-xtarget=generic']
def get_libraries(self):
opt = []
opt.extend(['fsu','sunmath','mvec'])
return opt
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='sun')
compiler.customize()
print(compiler.get_version())
|
funkring/fdoo
|
refs/heads/8.0-fdoo
|
pygal/graph/stackedline.py
|
4
|
# -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2014 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
"""
Stacked Line chart
"""
from __future__ import division
from pygal.graph.line import Line
from pygal.adapters import none_to_zero
class StackedLine(Line):
"""Stacked Line graph"""
_adapters = [none_to_zero]
def __init__(self, *args, **kwargs):
self._previous_line = None
super(StackedLine, self).__init__(*args, **kwargs)
def _fill(self, values):
if not self._previous_line:
self._previous_line = values
return super(StackedLine, self)._fill(values)
new_values = values + list(reversed(self._previous_line))
self._previous_line = values
return new_values
def _points(self, x_pos):
for series_group in (self.series, self.secondary_series):
accumulation = [0] * self._len
for serie in series_group:
accumulation = list(map(sum, zip(accumulation, serie.values)))
serie.points = [
(x_pos[i], v)
for i, v in enumerate(accumulation)]
if serie.points and self.interpolate:
serie.interpolated = self._interpolate(x_pos, accumulation)
else:
serie.interpolated = []
|
watspidererik/testenv
|
refs/heads/master
|
flask/lib/python2.7/site-packages/pip/_vendor/cachecontrol/heuristics.py
|
22
|
import calendar
from email.utils import formatdate, parsedate
from datetime import datetime, timedelta
class BaseHeuristic(object):
def warning(self):
"""
Return a valid 1xx warning header value describing the cache adjustments.
"""
return '110 - "Response is Stale"'
def update_headers(self, response):
"""Update the response headers with any new headers.
NOTE: This SHOULD always include some Warning header to
signify that the response was cached by the client, not by way
of the provided headers.
return response.
"""
return {}
def apply(self, response):
response.headers.update(self.update_headers(response))
response.headers.update({'warning': self.warning()})
return response
class OneDayCache(BaseHeuristic):
"""
Cache the response by providing an expires 1 day in the
future.
"""
def update_headers(self, response):
headers = {}
if 'expires' not in response.headers:
date = parsedate(response.headers['date'])
expires = datetime(*date[:6]) + timedelta(days=1)
headers['expires'] = formatdate(calendar.timegm(expires.timetuple()))
return headers
|
JJGO/ProjectEuler
|
refs/heads/master
|
p006.py
|
1
|
#!/usr/bin/env python
"""
Project Euler Problem 6
=======================
The sum of the squares of the first ten natural numbers is,
1^2 + 2^2 + ... + 10^2 = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10)^2 = 55^2 = 3025
Hence the difference between the sum of the squares of the first ten
natural numbers and the square of the sum is 3025 − 385 = 2640.
Find the difference between the sum of the squares of the first one
hundred natural numbers and the square of the sum.
Reasoning
---------
(x_1+x_2+...+x_n)**2 = x_1**2 + .. + x_m**2 + 2*x_1*x_2+ .. + 2*x_{n-1}*x_n
"""
__solution__ = "867380888952c39a131fe1d832246ecc"
import numpy as np
import itertools
def sum_square_difference(n):
return 2*np.sum([a*b for a,b in itertools.combinations(range(1,n+1),2)])
if __name__ == '__main__':
assert(sum_square_difference(10)==2640)
print(sum_square_difference(100))
|
ormnv/os_final_project
|
refs/heads/master
|
django/contrib/localflavor/sk/sk_regions.py
|
543
|
"""
Slovak regions according to http://sk.wikipedia.org/wiki/Administrat%C3%ADvne_%C4%8Dlenenie_Slovenska
"""
from django.utils.translation import ugettext_lazy as _
REGION_CHOICES = (
('BB', _('Banska Bystrica region')),
('BA', _('Bratislava region')),
('KE', _('Kosice region')),
('NR', _('Nitra region')),
('PO', _('Presov region')),
('TN', _('Trencin region')),
('TT', _('Trnava region')),
('ZA', _('Zilina region')),
)
|
Kilhog/odoo
|
refs/heads/8.0
|
addons/website_event_sale/models/sale_order.py
|
197
|
# -*- coding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
# defined for access rules
class sale_order(osv.Model):
_inherit = "sale.order"
def _cart_find_product_line(self, cr, uid, ids, product_id=None, line_id=None, context=None, **kwargs):
line_ids = super(sale_order, self)._cart_find_product_line(cr, uid, ids, product_id, line_id, context=context)
if line_id:
return line_ids
for so in self.browse(cr, uid, ids, context=context):
domain = [('id', 'in', line_ids)]
if context.get("event_ticket_id"):
domain += [('event_ticket_id', '=', context.get("event_ticket_id"))]
return self.pool.get('sale.order.line').search(cr, SUPERUSER_ID, domain, context=context)
def _website_product_id_change(self, cr, uid, ids, order_id, product_id, qty=0, line_id=None, context=None):
values = super(sale_order,self)._website_product_id_change(cr, uid, ids, order_id, product_id, qty=qty, line_id=line_id, context=None)
event_ticket_id = None
if context.get("event_ticket_id"):
event_ticket_id = context.get("event_ticket_id")
elif line_id:
line = self.pool.get('sale.order.line').browse(cr, SUPERUSER_ID, line_id, context=context)
if line.event_ticket_id:
event_ticket_id = line.event_ticket_id.id
else:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
if product.event_ticket_ids:
event_ticket_id = product.event_ticket_ids[0].id
if event_ticket_id:
ticket = self.pool.get('event.event.ticket').browse(cr, uid, event_ticket_id, context=context)
if product_id != ticket.product_id.id:
raise osv.except_osv(_('Error!'),_("The ticket doesn't match with this product."))
values['product_id'] = ticket.product_id.id
values['event_id'] = ticket.event_id.id
values['event_ticket_id'] = ticket.id
values['price_unit'] = ticket.price
values['name'] = "%s\n%s" % (ticket.event_id.display_name, ticket.name)
return values
|
rgreinho/molecule
|
refs/heads/master
|
test/unit/command/test_destroy.py
|
1
|
# Copyright (c) 2015-2016 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import subprocess
import pytest
from molecule.command import destroy
def test_execute_deletes_instances(
patched_driver_destroy, patched_print_info, patched_remove_templates,
patched_remove_inventory, molecule_instance):
d = destroy.Destroy({}, {}, molecule_instance)
result = d.execute()
msg = 'Destroying instances...'
patched_print_info.assert_called_once_with(msg)
patched_driver_destroy.assert_called_once()
assert not molecule_instance.state.created
assert not molecule_instance.state.converged
(None, None) == result
patched_remove_templates.assert_called_once()
patched_remove_inventory.assert_called_once()
def test_execute_raises_on_exit(patched_driver_destroy, patched_print_info,
patched_print_error, patched_remove_templates,
patched_remove_inventory, molecule_instance):
patched_driver_destroy.side_effect = subprocess.CalledProcessError(1, None,
None)
d = destroy.Destroy({}, {}, molecule_instance)
with pytest.raises(SystemExit):
d.execute()
msg = "Command 'None' returned non-zero exit status 1"
patched_print_error.assert_called_with(msg)
assert not patched_remove_templates.called
assert not patched_remove_inventory.called
def test_execute_does_not_raise_on_exit(patched_driver_destroy,
patched_print_info, molecule_instance):
patched_driver_destroy.side_effect = subprocess.CalledProcessError(1, None,
None)
d = destroy.Destroy({}, {}, molecule_instance)
result = d.execute(exit=False)
assert (1, '') == result
|
ganeshrn/ansible
|
refs/heads/devel
|
test/integration/targets/module_utils_urls/library/test_peercert.py
|
29
|
#!/usr/bin/python
# Copyright: (c) 2020, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: test_perrcert
short_description: Test getting the peer certificate of a HTTP response
description: Test getting the peer certificate of a HTTP response.
options:
url:
description: The endpoint to get the peer cert for
required: true
type: str
author:
- Ansible Project
'''
EXAMPLES = r'''
#
'''
RETURN = r'''
#
'''
import base64
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.urls import getpeercert, Request
def get_x509_shorthand(name, value):
prefix = {
'countryName': 'C',
'stateOrProvinceName': 'ST',
'localityName': 'L',
'organizationName': 'O',
'commonName': 'CN',
'organizationalUnitName': 'OU',
}[name]
return '%s=%s' % (prefix, value)
def main():
module_args = dict(
url=dict(type='str', required=True),
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True,
)
result = {
'changed': False,
'cert': None,
'raw_cert': None,
}
req = Request().get(module.params['url'])
try:
cert = getpeercert(req)
b_cert = getpeercert(req, binary_form=True)
finally:
req.close()
if cert:
processed_cert = {
'issuer': '',
'not_after': cert.get('notAfter', None),
'not_before': cert.get('notBefore', None),
'serial_number': cert.get('serialNumber', None),
'subject': '',
'version': cert.get('version', None),
}
for field in ['issuer', 'subject']:
field_values = []
for x509_part in cert.get(field, []):
field_values.append(get_x509_shorthand(x509_part[0][0], x509_part[0][1]))
processed_cert[field] = ",".join(field_values)
result['cert'] = processed_cert
if b_cert:
result['raw_cert'] = to_text(base64.b64encode(b_cert))
module.exit_json(**result)
if __name__ == '__main__':
main()
|
daspecster/google-cloud-python
|
refs/heads/master
|
vision/google/cloud/vision/client.py
|
1
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Google Cloud Vision API."""
import os
from google.cloud.client import ClientWithProject
from google.cloud.environment_vars import DISABLE_GRPC
from google.cloud.vision._gax import _GAPICVisionAPI
from google.cloud.vision._http import _HTTPVisionAPI
from google.cloud.vision.batch import Batch
from google.cloud.vision.image import Image
_USE_GAX = not os.getenv(DISABLE_GRPC, False)
class Client(ClientWithProject):
"""Client to bundle configuration needed for API requests.
:type project: str
:param project: the project which the client acts on behalf of.
If not passed, falls back to the default inferred
from the environment.
:type credentials: :class:`~google.auth.credentials.Credentials`
:param credentials: (Optional) The OAuth2 Credentials to use for this
client. If not passed (and if no ``http`` object is
passed), falls back to the default inferred from the
environment.
:type http: :class:`~httplib2.Http`
:param http: (Optional) HTTP object to make requests. Can be any object
that defines ``request()`` with the same interface as
:meth:`~httplib2.Http.request`. If not passed, an
``http`` object is created that is bound to the
``credentials`` for the current object.
:type use_gax: bool
:param use_gax: (Optional) Explicitly specifies whether
to use the gRPC transport (via GAX) or HTTP. If unset,
falls back to the ``GOOGLE_CLOUD_DISABLE_GRPC`` environment
variable
"""
SCOPE = ('https://www.googleapis.com/auth/cloud-platform',)
"""The scopes required for authenticating as a Cloud Vision consumer."""
_vision_api_internal = None
def __init__(self, project=None, credentials=None, http=None,
use_gax=None):
super(Client, self).__init__(
project=project, credentials=credentials, http=http)
if use_gax is None:
self._use_gax = _USE_GAX
else:
self._use_gax = use_gax
def batch(self):
"""Batch multiple images into a single API request.
:rtype: :class:`google.cloud.vision.batch.Batch`
:returns: Instance of ``Batch``.
"""
return Batch(self)
def image(self, content=None, filename=None, source_uri=None):
"""Get instance of Image using current client.
:type content: bytes
:param content: Byte stream of an image.
:type filename: str
:param filename: Filename to image.
:type source_uri: str
:param source_uri: URL or Google Cloud Storage URI of image.
:rtype: :class:`~google.cloud.vision.image.Image`
:returns: Image instance with the current client attached.
"""
return Image(client=self, content=content, filename=filename,
source_uri=source_uri)
@property
def _vision_api(self):
"""Proxy method that handles which transport call Vision Annotate.
:rtype: :class:`~google.cloud.vision._http._HTTPVisionAPI`
or :class:`~google.cloud.vision._gax._GAPICVisionAPI`
:returns: Instance of ``_HTTPVisionAPI`` or ``_GAPICVisionAPI`` used to
make requests.
"""
if self._vision_api_internal is None:
if self._use_gax:
self._vision_api_internal = _GAPICVisionAPI(self)
else:
self._vision_api_internal = _HTTPVisionAPI(self)
return self._vision_api_internal
|
mrry/tensorflow
|
refs/heads/windows
|
tensorflow/contrib/layers/python/layers/optimizers_test.py
|
6
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for optimizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def _setup_model():
x = tf.placeholder(tf.float32, [])
var = tf.get_variable("test", [], initializer=tf.constant_initializer(10))
loss = tf.abs(var * x)
global_step = tf.get_variable("global_step",
[],
trainable=False,
initializer=tf.constant_initializer(0))
return x, var, loss, global_step
class OptimizersTest(tf.test.TestCase):
def testSGDOptimizer(self):
optimizers = [
"SGD", tf.train.GradientDescentOptimizer,
tf.train.GradientDescentOptimizer(learning_rate=0.1),
lambda lr: tf.train.GradientDescentOptimizer(learning_rate=lr)]
for optimizer in optimizers:
with tf.Graph().as_default() as g:
with self.test_session(graph=g) as session:
x, var, loss, global_step = _setup_model()
train = tf.contrib.layers.optimize_loss(loss,
global_step,
learning_rate=0.1,
optimizer=optimizer)
tf.initialize_all_variables().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
self.assertEqual(var_value, 9.5)
self.assertEqual(global_step_value, 1)
def testNoLrCallable(self):
def optimizer_fn():
return tf.train.GradientDescentOptimizer(learning_rate=0.1)
with tf.Graph().as_default() as g:
with self.test_session(graph=g) as session:
x, var, loss, global_step = _setup_model()
train = tf.contrib.layers.optimize_loss(loss,
global_step,
learning_rate=None,
optimizer=optimizer_fn)
tf.initialize_all_variables().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
self.assertEqual(var_value, 9.5)
self.assertEqual(global_step_value, 1)
def testWrongOptimizer(self):
optimizers = ["blah", tf.Variable, object(), lambda x: None]
for optimizer in optimizers:
with tf.Graph().as_default() as g:
with self.test_session(graph=g):
_, _, loss, global_step = _setup_model()
with self.assertRaises(ValueError):
tf.contrib.layers.optimize_loss(loss,
global_step,
learning_rate=0.1,
optimizer=optimizer)
def testGradientNoise(self):
tf.set_random_seed(42)
with self.test_session() as session:
x, var, loss, global_step = _setup_model()
train = tf.contrib.layers.optimize_loss(loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
gradient_noise_scale=10.0)
tf.initialize_all_variables().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
# Due to randomness the following number may change if graph is different.
self.assertAlmostEqual(var_value, 8.5591021, 4)
self.assertEqual(global_step_value, 1)
def testGradientNoiseWithClipping(self):
tf.set_random_seed(42)
with self.test_session() as session:
x, var, loss, global_step = _setup_model()
train = tf.contrib.layers.optimize_loss(loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
gradient_noise_scale=10.0,
clip_gradients=10.0)
tf.initialize_all_variables().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
self.assertAlmostEqual(var_value, 9.0, 4)
self.assertEqual(global_step_value, 1)
def testGradientClip(self):
with self.test_session() as session:
x, var, loss, global_step = _setup_model()
train = tf.contrib.layers.optimize_loss(loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
clip_gradients=0.1)
tf.initialize_all_variables().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
self.assertAlmostEqual(var_value, 9.98999, 4)
self.assertEqual(global_step_value, 1)
def testGradientMultiply(self):
with self.test_session() as session:
x, var, loss, global_step = _setup_model()
train = tf.contrib.layers.optimize_loss(loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
gradient_multipliers={var: 7.})
tf.initialize_all_variables().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
# var(0) = 10, x = 5, var(0)/dx = 5,
# var(1) = var(0) - learning_rate * gradient_multiplier * var(0)/dx
self.assertAlmostEqual(var_value, 6.5, 4)
self.assertEqual(global_step_value, 1)
def testIgnoreVariablesWithNoGradients(self):
_, _, loss, global_step = _setup_model()
unused_variable = tf.get_variable("ignore_me", [])
tf.contrib.layers.optimize_loss(
loss, global_step, learning_rate=0.1, optimizer="SGD",
gradient_noise_scale=10.0,
gradient_multipliers={unused_variable: 1.},
clip_gradients=10.0)
def testUpdateOp(self):
optimizers = ["SGD", tf.train.GradientDescentOptimizer,
tf.train.GradientDescentOptimizer(learning_rate=0.1)]
for optimizer in optimizers:
with tf.Graph().as_default() as g:
with self.test_session(graph=g) as session:
x, var, loss, global_step = _setup_model()
update_var = tf.get_variable(
"update", [], initializer=tf.constant_initializer(10))
update_op = tf.assign(update_var, 20)
train = tf.contrib.layers.optimize_loss(loss,
global_step,
learning_rate=0.1,
optimizer=optimizer,
update_ops=[update_op])
tf.initialize_all_variables().run()
session.run(train, feed_dict={x: 5})
var_value, update_var_value, global_step_value = session.run(
[var, update_var, global_step])
self.assertEqual(var_value, 9.5)
self.assertEqual(update_var_value, 20)
self.assertEqual(global_step_value, 1)
def testUpdateOpFromCollection(self):
optimizers = ["SGD", tf.train.GradientDescentOptimizer,
tf.train.GradientDescentOptimizer(learning_rate=0.1)]
for optimizer in optimizers:
with tf.Graph().as_default() as g:
with self.test_session(graph=g) as session:
x, var, loss, global_step = _setup_model()
update_var = tf.get_variable(
"update", [], initializer=tf.constant_initializer(10))
update_op = tf.assign(update_var, 20)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_op)
train = tf.contrib.layers.optimize_loss(
loss, global_step, learning_rate=0.1, optimizer=optimizer)
tf.initialize_all_variables().run()
session.run(train, feed_dict={x: 5})
var_value, update_var_value, global_step_value = session.run(
[var, update_var, global_step])
self.assertEqual(var_value, 9.5)
self.assertEqual(update_var_value, 20)
self.assertEqual(global_step_value, 1)
if __name__ == "__main__":
tf.test.main()
|
Ashatz/bcindex
|
refs/heads/master
|
__init__.py
|
1
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Andrew Shatz
#
# Created: 22/10/2014
# Copyright: (c) Andrew Shatz 2014
# Licence: <your licence>
#-------------------------------------------------------------------------------
import os, sys, shutil
import csv
from osgeo import gdal
from osgeo.gdalconst import *
from matplotlib import pyplot as plt
import numpy as np
from bciconst import *
from bciobjects import *
from bcidatabase import *
from bcivalid import *
#Declare input variables
db_name = "test3.db"
project_name = "ALB_BCITest"
sdm_path = r"C:\Users\Andrew Shatz\Documents\Scripts\Python\bcindex\Data\Model_Input\GeoTiff\ALB_MAHAL"
valid_path = r"C:\Users\Andrew Shatz\Documents\Scripts\Python\bcindex\Data\Model_Input\GeoTiff\ALB_VALIDATION"
mask_file = r"C:\Users\Andrew Shatz\Documents\Scripts\Python\bcindex\Data\Model_Input\GeoTiff\SA_2_Mask.tif"
species = "Asian_Longhorned_Beetle"
db_name = "test3.db"
#Declare file lists
sdm_files = os.listdir(sdm_path)
valid_files = os.listdir(valid_path)
#Declare database absolute path
db_path = os.path.join(DB_BPATH, db_name)
project = BCIProject(db_name, project_name, species, sdm_path, valid_path, True, mask_file)
bcidb = bcisqlitedb(db_path)
bcidb.pLoadBCIProject(project)
#Create RasterDataset object
gdal.AllRegister()
def LoadRasterDatasets(sdm_path, sdm_files):
print(" * * * Loading SDM datasets to " + db_name + "... * * * ")
os.chdir(sdm_path)
for dataset in sdm_files:
i = sdm_files.index(dataset) + 1
rst = RasterDataset("raster", dataset, i)
rstTile = TileSet(rst, 100, 100)
#Test connection
print(" * * * Establishing database connection... * * * ")
bcidb = bcisqlitedb(DB_ABSPATH)
#Test raster dataset loading
rst_sql = rst.toSqliteList()
bcidb.pLoadRasterDataset(rst_sql)
#Test tile set loading
tile_sql = rstTile.toSqliteList()
bcidb.pLoadTileSet(tile_sql)
#Test raster tile loading
data_sql = formatRasterTileSqliteData(rst, rstTile)
#print datasql #Test
bcidb.pLoadRasterTile(rst, rstTile, data_sql)
"""
###This section is meant to test the raster loading procedure for the validation datasets
os.chdir(valid_path)
valid_file = valid_files[0]
valid_ds = LocDataset("Asian Longhorned Beetle", valid_file, 1)
"""
|
sinesiobittencourt/explainshell
|
refs/heads/master
|
tests/test-integration.py
|
6
|
import unittest, subprocess, pymongo, os
from explainshell import manager, matcher
class test_integration(unittest.TestCase):
def test(self):
mngr = manager.manager('localhost', 'explainshell_tests', [os.path.join(os.path.dirname(__file__), 'echo.1.gz')], drop=True)
mngr.run()
cmd = 'echo -en foobar --version'
m = matcher.matcher(cmd, mngr.store)
group = m.match()[1]
matchprog, matches = group.manpage.name, group.results
self.assertEquals(matchprog, 'echo')
#self.assertEquals(matches[0].text, 'display a line of text')
self.assertEquals(matches[0].match, 'echo')
self.assertEquals(matches[1].text, '<b>-e</b> enable interpretation of backslash escapes')
self.assertEquals(matches[1].match, '-e')
self.assertEquals(matches[2].text, '<b>-n</b> do not output the trailing newline')
self.assertEquals(matches[2].match, 'n')
self.assertEquals(matches[3].text, None)
self.assertEquals(matches[3].match, 'foobar')
self.assertEquals(matches[4].text, '<b>--version</b>\n output version information and exit')
self.assertEquals(matches[4].match, '--version')
|
paran0ids0ul/infernal-twin
|
refs/heads/master
|
build/pillow/PIL/ImageSequence.py
|
45
|
#
# The Python Imaging Library.
# $Id$
#
# sequence support classes
#
# history:
# 1997-02-20 fl Created
#
# Copyright (c) 1997 by Secret Labs AB.
# Copyright (c) 1997 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
##
class Iterator(object):
"""
This class implements an iterator object that can be used to loop
over an image sequence.
You can use the ``[]`` operator to access elements by index. This operator
will raise an :py:exc:`IndexError` if you try to access a nonexistent
frame.
:param im: An image object.
"""
def __init__(self, im):
if not hasattr(im, "seek"):
raise AttributeError("im must have seek method")
self.im = im
def __getitem__(self, ix):
try:
if ix:
self.im.seek(ix)
return self.im
except EOFError:
raise IndexError # end of sequence
|
meteorfox/PerfKitBenchmarker
|
refs/heads/master
|
perfkitbenchmarker/linux_packages/sysbench.py
|
8
|
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing sysbench installation and cleanup functions."""
def _Install(vm):
"""Installs the sysbench package on the VM."""
vm.InstallPackages('sysbench')
def YumInstall(vm):
"""Installs the sysbench package on the VM."""
vm.InstallEpelRepo()
_Install(vm)
def AptInstall(vm):
"""Installs the sysbench package on the VM."""
_Install(vm)
|
kenshay/ImageScript
|
refs/heads/master
|
ProgramData/SystemFiles/Python/Lib/site-packages/chardet/mbcharsetprober.py
|
2923
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
|
DylanMcCall/Empathy---Hide-contact-groups
|
refs/heads/master
|
tools/make-version-script.py
|
14
|
#!/usr/bin/python
"""Construct a GNU ld or Debian dpkg version-script from a set of
RFC822-style symbol lists.
Usage:
make-version-script.py [--symbols SYMBOLS] [--unreleased-version VER]
[--dpkg "LIBRARY.so.0 LIBRARY0 #MINVER#"]
[--dpkg-build-depends-package LIBRARY-dev]
[FILES...]
Each FILE starts with RFC822-style headers "Version:" (the name of the
symbol version, e.g. FOO_1.2.3) and "Extends:" (either the previous
version, or "-" if this is the first version). Next there is a blank
line, then a list of C symbols one per line.
Comments (lines starting with whitespace + "#") are allowed and ignored.
If --symbols is given, SYMBOLS lists the symbols actually exported by
the library (one per line). If --unreleased-version is given, any symbols
in SYMBOLS but not in FILES are assigned to that version; otherwise, any
such symbols cause an error.
If --dpkg is given, produce a Debian dpkg-gensymbols file instead of a
GNU ld version-script. The argument to --dpkg is the first line of the
resulting symbols file, and --dpkg-build-depends-package can optionally
be used to set the Build-Depends-Package field.
This script originates in telepathy-glib <http://telepathy.freedesktop.org/> -
please send us any changes that are needed.
"""
# Copyright (C) 2008 Collabora Ltd. <http://www.collabora.co.uk/>
# Copyright (C) 2008 Nokia Corporation
#
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
import sys
from getopt import gnu_getopt
from sets import Set as set
def e(format, *args):
sys.stderr.write((format + '\n') % args)
def main(abifiles, symbols=None, unreleased_version=None,
dpkg=False, dpkg_first_line=None, dpkg_build_depends_package=None):
gnuld = not dpkg
symbol_set = None
if symbols is not None:
symbol_set = open(symbols, 'r').readlines()
symbol_set = map(str.strip, symbol_set)
symbol_set = set(symbol_set)
versioned_symbols = set()
dpkg_symbols = []
dpkg_versions = []
if dpkg:
assert dpkg_first_line is not None
print dpkg_first_line
if dpkg_build_depends_package is not None:
print "* Build-Depends-Package: %s" % dpkg_build_depends_package
for filename in abifiles:
lines = open(filename, 'r').readlines()
version = None
extends = None
release = None
for i, line in enumerate(lines):
line = line.strip()
if line.startswith('#'):
continue
elif not line:
# the transition betwen headers and symbols
cut = i + 1
break
elif line.lower().startswith('version:'):
line = line[8:].strip()
version = line
continue
elif line.lower().startswith('extends:'):
line = line[8:].strip()
extends = line
continue
elif line.lower().startswith('release:'):
release = line[8:].strip()
continue
else:
e('Could not understand line in %s header: %s', filename, line)
raise SystemExit(1)
else:
e('No symbols in %s', filename)
raise SystemExit(1)
if version is None:
e('No Versions: header in %s', filename)
raise SystemExit(1)
if extends is None:
e('No Extends: header in %s', filename)
raise SystemExit(1)
if release is None and dpkg:
e('No Release: header in %s', filename)
raise SystemExit(1)
if dpkg:
dpkg_versions.append('%s@%s %s' % (version, version, release))
lines = lines[cut:]
if gnuld:
print "%s {" % version
print " global:"
for symbol in lines:
symbol = symbol.strip()
if symbol.startswith('#'):
continue
if gnuld:
print " %s;" % symbol
elif dpkg:
dpkg_symbols.append('%s@%s %s' % (symbol, version, release))
versioned_symbols.add(symbol)
if gnuld:
if extends == '-':
print " local:"
print " *;"
print "};"
else:
print "} %s;" % extends
print
if dpkg:
dpkg_symbols.sort()
dpkg_versions.sort()
for x in dpkg_versions:
print " %s" % x
for x in dpkg_symbols:
print " %s" % x
if symbol_set is not None:
missing = versioned_symbols - symbol_set
if missing:
e('These symbols have disappeared:')
for symbol in missing:
e(' %s', symbol)
raise SystemExit(1)
unreleased = symbol_set - versioned_symbols
if unreleased:
if unreleased_version is None:
e('Unversioned symbols are not allowed in releases:')
for symbol in unreleased:
e(' %s', symbol)
raise SystemExit(1)
if gnuld:
print "%s {" % unreleased_version
print " global:"
for symbol in unreleased:
print " %s;" % symbol
print "} %s;" % version
if __name__ == '__main__':
options, argv = gnu_getopt (sys.argv[1:], '',
['symbols=', 'unreleased-version=',
'dpkg=', 'dpkg-build-depends-package='])
opts = {'dpkg': False}
for option, value in options:
if option == '--dpkg':
opts['dpkg'] = True
opts['dpkg_first_line'] = value
else:
opts[option.lstrip('-').replace('-', '_')] = value
main(argv, **opts)
|
edx/edx-enterprise
|
refs/heads/master
|
test_utils/fake_catalog_api.py
|
1
|
# -*- coding: utf-8 -*-
"""
Fake responses for course catalog api.
"""
import copy
from collections import OrderedDict
import mock
from six.moves import reduce as six_reduce
from test_utils import FAKE_UUIDS
FAKE_URL = 'https://fake.url'
FAKE_COURSE_RUN = {
'key': 'course-v1:edX+DemoX+Demo_Course',
'uuid': '785b11f5-fad5-4ce1-9233-e1a3ed31aadb',
'title': 'edX Demonstration Course',
'image': {
'description': None,
'height': None,
'src': 'http://edx.devstack.lms:18000/asset-v1:edX+DemoX+Demo_Course+type@asset+block@images_course_image.jpg',
'width': None
},
'short_description': 'This course demonstrates many features of the edX platform.',
'marketing_url': 'course/demo-course?utm_=test_enterprise&utm_medium=enterprise',
'seats': [
{
'type': 'audit',
'price': '0.00',
'currency': 'USD',
'upgrade_deadline': None,
'credit_provider': None,
'credit_hours': None,
'sku': '68EFFFF'
},
{
'type': 'verified',
'price': '149.00',
'currency': 'USD',
'upgrade_deadline': '2018-08-03T16:44:26.595896Z',
'credit_provider': None,
'credit_hours': None,
'sku': '8CF08E5'
}
],
'start': '2013-02-05T05:00:00Z',
'end': '3000-12-31T18:00:00Z',
'enrollment_start': None,
'enrollment_end': None,
'enrollment_url': FAKE_URL,
'pacing_type': 'instructor_paced',
'type': 'verified',
'status': 'published',
'course': 'edX+DemoX',
'full_description': 'Lorem ipsum dolor sit amet, consectetur adipiscing elit.',
'announcement': None,
'video': None,
'content_language': None,
'transcript_languages': [],
'instructors': [],
'staff': [
{
'uuid': '51df1077-1b8d-4f86-8305-8adbc82b72e9',
'given_name': 'Anant',
'family_name': 'Agarwal',
'bio': "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
'profile_image_url': 'https://www.edx.org/sites/default/files/executive/photo/anant-agarwal.jpg',
'slug': 'anant-agarwal',
'position': {
'title': 'CEO',
'organization_name': 'edX'
},
'profile_image': {},
'works': [],
'urls': {
'twitter': None,
'facebook': None,
'blog': None
},
'email': None
}
],
'min_effort': 5,
'max_effort': 6,
'weeks_to_complete': 10,
'modified': '2017-08-18T00:32:33.754662Z',
'level_type': 'Type 1',
'availability': 'Current',
'mobile_available': False,
'hidden': False,
'reporting_type': 'mooc',
'eligible_for_financial_aid': True,
'content_type': 'courserun',
'has_enrollable_seats': True
}
FAKE_COURSE_RUN2 = copy.deepcopy(FAKE_COURSE_RUN)
FAKE_COURSE_RUN2['key'] = 'course-v1:edX+DemoX+Demo_Course2'
FAKE_COURSE = {
'key': 'edX+DemoX',
'uuid': 'a9e8bb52-0c8d-4579-8496-1a8becb0a79c',
'title': 'edX Demonstration Course',
'course_runs': [FAKE_COURSE_RUN],
'owners': [
{
'uuid': '2bd367cf-c58e-400c-ac99-fb175405f7fa',
'key': 'edX',
'name': 'edX',
'certificate_logo_image_url': None,
'description': '',
'homepage_url': None,
'tags': [],
'logo_image_url': 'https://foo.com/bar.png',
'marketing_url': None
}
],
'image': None,
'short_description': 'This course demonstrates many features of the edX platform.',
'full_description': 'Lorem ipsum dolor sit amet, consectetur adipiscing elit.',
'level_type': None,
'subjects': [],
'prerequisites': [],
'expected_learning_items': [
'XBlocks',
'Peer Assessment'
],
'video': None,
'sponsors': [],
'modified': '2017-08-18T00:23:21.111991Z',
'marketing_url': None,
'content_type': 'course',
'enrollment_url': FAKE_URL,
'programs': []
}
FAKE_PROGRAM_RESPONSE1 = {
"uuid": FAKE_UUIDS[2],
"title": "Program1",
"subtitle": "",
"type": "All types",
"status": "active",
"marketing_slug": "program1",
"marketing_url": "all types/program1",
"banner_image": {}, # skipped
"courses": [
{
"key": "Organization+DNDv2",
"uuid": "7fe4f68a-abd0-433f-ac9e-1559c12b96e7",
"title": "Drag and Drop Demos",
"course_runs": [{"key": "course-v1:Organization+DNDv2+Run1"}],
},
{
"key": "Organization+VD1",
"uuid": "6d681f1d-856d-4955-8786-a9f3fed6a48f",
"title": "VectorDraw",
"course_runs": [{"key": "course-v1:Organization+VD1+Run1"}],
},
{
"key": "Organization+ENT-1",
"uuid": "7f27580c-f475-413b-851a-529ac90f0bb8",
"title": "Enterprise Tests",
"course_runs": [{"key": "course-v1:Organization+ENT-1+Run1"}],
}
],
"authoring_organizations": [
{
"uuid": "12de950c-6fae-49f7-aaa9-778c2fbdae56",
"key": "edX",
"name": "",
"certificate_logo_image_url": None,
"description": None,
"homepage_url": None,
"tags": [],
"logo_image_url": None,
"marketing_url": None
}
],
"card_image_url": "http://wowslider.com/sliders/demo-10/data/images/dock.jpg",
"is_program_eligible_for_one_click_purchase": False,
"overview": "This is a test Program.",
"min_hours_effort_per_week": 5,
"max_hours_effort_per_week": 10,
"video": {
"src": "http://www.youtube.com/watch?v=3_yD_cEKoCk",
"description": None,
"image": None
},
"expected_learning_items": [],
"faq": [],
"credit_backing_organizations": [
{
"uuid": "12de950c-6fae-49f7-aaa9-778c2fbdae56",
"key": "edX",
"name": "",
"certificate_logo_image_url": None,
"description": None,
"homepage_url": None,
"tags": [],
"logo_image_url": None,
"marketing_url": None
}
],
"corporate_endorsements": [],
"job_outlook_items": [],
"individual_endorsements": [],
"languages": [
"en-us"
],
"transcript_languages": [
"en-us"
],
"subjects": [],
"price_ranges": [],
"staff": [],
"credit_redemption_overview": "This is a test Program.",
"applicable_seat_types": [
"audit"
],
}
FAKE_PROGRAM_RESPONSE2 = {
"uuid": "40732b09-2345-6789-10bc-9e03f9304cdc",
"title": "Program2",
"subtitle": "",
"type": "Prof only",
"status": "active",
"marketing_slug": "program2",
"marketing_url": "prof only/program2",
"banner_image": {}, # skipped
"courses": [
{
"key": "Organization+VD1",
"uuid": "6d681f1d-856d-4955-8786-a9f3fed6a48f",
"title": "VectorDraw",
"course_runs": [
{
"key": "course-v1:Organization+VD1+VD1",
"uuid": "5b949fc1-aa05-42b0-8c9f-8e6114848ae9",
"title": "VectorDraw",
"image": {}, # skipped
"short_description": None,
"marketing_url": None,
"start": "2030-01-01T00:00:00Z",
}
],
},
{
"key": "Organization+ENT-1",
"uuid": "7f27580c-f475-413b-851a-529ac90f0bb8",
"title": "Enterprise Tests",
"course_runs": [
{
"key": "course-v1:Organization+ENT-1+T1",
"uuid": "a2128a84-6e20-4fce-958e-80d9461ef835",
"title": "Enterprise Tests",
"image": {}, # skipped
"short_description": "",
"marketing_url": None
}
],
}
],
}
FAKE_PROGRAM_RESPONSE3 = {
"uuid": "52ad909b-c57d-4ff1-bab3-999813a2479b",
"title": "Program Title 1",
"subtitle": "Program Subtitle 1",
"type": "Professional Certificate",
"status": "active",
"marketing_slug": "marketingslug1",
"marketing_url": "verified-certificate/marketingslug1",
"courses": [
{
"key": 'course-v1:edX+DemoX+Demo_Course',
"uuid": "a312ec52-74ef-434b-b848-f110eb90b672",
"title": "edX Demonstration Course",
"course_runs": [
{
"key": 'course-v1:edX+DemoX+Demo_Course',
"uuid": "a276c25f-c640-4943-98dd-6c9ad8c71bb9",
"title": "edX Demonstration Course",
"short_description": "",
"marketing_url": "course/edxdemo?utm_medium=affiliate_partner&utm_source=staff",
"seats": [],
"start": "2016-01-01T00:00:00Z",
"end": "2018-01-01T00:00:00Z",
"enrollment_start": None,
"enrollment_end": None,
"pacing_type": "self_paced",
"type": None,
"status": "published",
},
],
},
{
"key": 'course-v1:edX+DemoX+Demo_Course2',
"uuid": "b312ec52-74ef-434b-b848-f110eb90b672",
"title": "edX Demonstration Course 2",
"course_runs": [
{
"key": 'course-v1:edX+DemoX+Demo_Course2',
"uuid": "b276c25f-c640-4943-98dd-6c9ad8c71bb9",
"title": "edX Demonstration Course 2",
"short_description": "",
"marketing_url": "course/edxdemo?utm_medium=affiliate_partner&utm_source=staff",
"seats": [],
"start": "2016-01-01T00:00:00Z",
"end": "2018-01-01T00:00:00Z",
"enrollment_start": None,
"enrollment_end": None,
"pacing_type": "self_paced",
"type": None,
"status": "published",
},
],
},
],
"authoring_organizations": [
{
"uuid": "12de950c-6fae-49f7-aaa9-778c2fbdae56",
"key": "edX",
"name": "Authoring Organization",
"certificate_logo_image_url": 'awesome/certificate/logo/url.jpg',
"description": 'Such author, much authoring',
"homepage_url": 'homepage.com/url',
"logo_image_url": 'images/logo_image_url.jpg',
"marketing_url": 'marketing/url',
},
],
"expected_learning_items": [
"Blocks",
"XBlocks",
"Peer Assessment"
],
'corporate_endorsements': [
{
"corporation_name": "Bob's Company",
"statement": "",
"image": {
"src": "http://evonexus.org/wp-content/uploads/2016/01/IBM-logo-1024x576.jpg",
"description": None,
"height": None,
"width": None,
},
"individual_endorsements": [
{
"endorser": {
"uuid": "789aa881-e44b-4675-9377-fa103c12bbfc",
"given_name": "Bob",
"family_name": "the Builder",
"bio": "Working hard on a daily basis!",
"profile_image_url": None,
"slug": "bob-the-builder",
"position": {
"title": "Engineer",
"organization_name": "Bob's Company",
"organization_id": 1
},
"profile_image": {},
"works": [],
"urls": {
"facebook": None,
"twitter": None,
"blog": None,
},
"email": None
},
"quote": "Life is hard for us engineers. Period."
}
]
}
],
"is_program_eligible_for_one_click_purchase": True,
"overview": "This is a test Program.",
"weeks_to_complete_min": 4,
"weeks_to_complete_max": 6,
"min_hours_effort_per_week": 5,
"max_hours_effort_per_week": 10,
"applicable_seat_types": [
"verified",
"professional",
"credit",
],
}
FAKE_PROGRAM_TYPE = {
"name": "Professional Certificate",
"logo_image": {
"small": {
"height": 64,
"width": 64,
"url": "http://localhost:18381/media/media/program_types/logo_images/professional-certificate.small.png"
},
"medium": {
"height": 128,
"width": 128,
"url": "http://localhost:18381/media/media/program_types/logo_images/professional-certificate.medium.png"
},
"large": {
"height": 256,
"width": 256,
"url": "http://localhost:18381/media/media/program_types/logo_images/professional-certificate.large.png"
},
"x-small": {
"height": 32,
"width": 32,
"url": "http://localhost:18381/media/media/program_types/logo_images/professional-certificate.x-small.png"
}
},
"applicable_seat_types": [
"verified",
"professional",
"credit"
],
"slug": "professional-certificate"
}
FAKE_COURSE_RUNS_RESPONSE = [
{
"key": "course-v1:edX+DemoX+Demo_Course",
"uuid": "9f9093b0-58e9-480c-a619-5af5000507bb",
"title": "edX Demonstration Course",
"course": "edX+DemoX",
"start": "2013-02-05T05:00:00Z",
"end": None,
"seats": [
{
"type": "professional",
"price": "1000.00",
"currency": "EUR",
"upgrade_deadline": "2018-01-13T11:19:02Z",
"credit_provider": "",
"credit_hours": None
},
{
"type": "audit",
"price": "0.00",
"currency": "USD",
"upgrade_deadline": None,
"credit_provider": "",
"credit_hours": None
}
],
"programs": []
},
{
"key": "course-v1:Organization+DNDv2+T1",
"uuid": "076cb917-06d6-4713-8a6c-c1712ce2e421",
"title": "Drag and Drop Demos",
"course": "Organization+DNDv2",
"start": "2015-01-01T00:00:00Z",
"end": None,
"video": None,
"seats": [],
"programs": [
{
"uuid": "40782a06-1c37-4779-86aa-0a081f014d4d",
"title": "Program1",
"type": "Prof only",
"marketing_slug": "program1",
"marketing_url": "prof only/program1"
}
]
},
{
"key": "course-v1:Organization+ENT-1+T1",
"uuid": "a2128a84-6e20-4fce-958e-80d9461ef835",
"title": "Enterprise Tests",
"course": "course-v1:Organization+ENT-1+T1",
"start": None,
"end": None,
"seats": [
{
"type": "professional",
"price": "0.00",
"currency": "AZN",
"upgrade_deadline": None,
"credit_provider": "",
"credit_hours": None
},
{
"type": "audit",
"price": "0.00",
"currency": "AED",
"upgrade_deadline": None,
"credit_provider": "",
"credit_hours": None
}
],
"programs": [
{
"uuid": "40782a06-1c37-4779-86aa-0a081f014d4d",
"title": "Program1",
"type": "Prof only",
"marketing_slug": "program1",
"marketing_url": "prof only/program1"
}
]
},
{
"key": "course-v1:Organization+VD1+VD1",
"uuid": "5b949fc1-aa05-42b0-8c9f-8e6114848ae9",
"title": "VectorDraw",
"course": "Organization+VD1",
"start": "2030-01-01T00:00:00Z",
"end": None,
"seats": [
{
"type": "professional",
"price": "12.00",
"currency": "BOB",
"upgrade_deadline": None,
"credit_provider": "",
"credit_hours": None
}
],
"programs": [
{
"uuid": "40782a06-1c37-4779-86aa-0a081f014d4d",
"title": "Program1",
"type": "Prof only",
"marketing_slug": "program1",
"marketing_url": "prof only/program1"
}
]
}
]
FAKE_PROGRAM_RESPONSES = {
FAKE_PROGRAM_RESPONSE1["uuid"]: FAKE_PROGRAM_RESPONSE1,
FAKE_PROGRAM_RESPONSE2["uuid"]: FAKE_PROGRAM_RESPONSE2,
}
FAKE_CATALOG_COURSES_RESPONSE = {
1: [
{
"key": "edX+DemoX",
"uuid": "cf8f5cce-1370-46aa-8162-31fdff55dc7e",
"title": "Fancy Course",
"course_runs": [],
"owners": [
{
"uuid": "366e7739-fb3a-42d0-8351-8c3dbab3e339",
"key": "edX",
"name": "",
"certificate_logo_image_url": None,
"description": None,
"homepage_url": None,
"tags": [],
"logo_image_url": None,
"marketing_url": None
}
],
"image": None,
"short_description": None,
"full_description": None,
"level_type": None,
"subjects": [],
"prerequisites": [],
"expected_learning_items": [],
"video": None,
"sponsors": [],
"modified": "2017-01-16T14:07:47.327605Z",
"marketing_url": "http://lms.example.com/course/edxdemox?utm_source=admin&utm_medium=affiliate_partner"
},
{
"key": "foobar+fb1",
"uuid": "c08c1e43-307c-444b-acc7-aea4a7b9f8f6",
"title": "FooBar Ventures",
"course_runs": [],
"owners": [
{
"uuid": "8d920bc3-a1b2-44db-9380-1d3ca728c275",
"key": "foobar",
"name": "",
"certificate_logo_image_url": None,
"description": None,
"homepage_url": None,
"tags": [],
"logo_image_url": None,
"marketing_url": None
}
],
"image": {
"src": "",
"height": None,
"width": None,
"description": None
},
"short_description": "",
"full_description": "This is a really cool course.",
"level_type": None,
"subjects": [],
"prerequisites": [],
"expected_learning_items": [],
"video": None,
"sponsors": [],
"modified": "2017-03-07T18:37:45.238722Z",
"marketing_url": "http://lms.example.com/course/foobarfb1?utm_source=admin&utm_medium=affiliate_partner"
},
{
"key": "test+course3",
"uuid": "c08c1e43-307c-444b-acc7-aea4a7b9f8f7",
"title": "Test Course for unexpected data",
"course_runs": [],
"owners": [
{
"uuid": "8d920bc3-a1b2-44db-9380-1d3ca728c275",
"key": "foobar",
"name": "",
"certificate_logo_image_url": None,
"description": None,
"homepage_url": None,
"tags": [],
"logo_image_url": None,
"marketing_url": None
}
],
"image": None,
"short_description": "",
"full_description": "This is a really cool course.",
"level_type": None,
"subjects": [],
"prerequisites": [],
"expected_learning_items": [],
"video": None,
"sponsors": [],
"modified": "2017-03-07T18:37:45.238722Z",
"marketing_url": "http://lms.example.com/course/testcourse3?utm_source=admin&utm_medium=affiliate_partner"
},
]
}
FAKE_CATALOG_COURSE_DETAILS_RESPONSES = {
'edX+DemoX': {
"key": "edX+DemoX",
"uuid": "cf8f5cce-1370-46aa-8162-31fdff55dc7e",
"title": "Fancy Course",
"course_runs": [
{
"key": "course-v1:edX+DemoX+Demo_Course",
"uuid": "0a25b789-86d0-43bd-972b-3858a985462e",
"title": "edX Demonstration Course",
"image": {
"src": (
"http://192.168.1.187:8000/asset-v1:edX+DemoX+Demo_"
"Course+type@asset+block@images_course_image.jpg"
),
"height": None,
"width": None,
"description": None
},
"short_description": None,
"marketing_url": None,
"start": "2013-02-05T05:00:00Z",
"end": None,
"enrollment_start": None,
"enrollment_end": None,
"pacing_type": "instructor_paced",
"type": "audit",
"course": "edX+DemoX",
"full_description": None,
"announcement": None,
"video": None,
"seats": [
{
"type": "audit",
"price": "0.00",
"currency": "USD",
"upgrade_deadline": None,
"credit_provider": "",
"credit_hours": None,
"sku": ""
}
],
"content_language": 'en-us',
"transcript_languages": [],
"instructors": [],
"staff": [],
"min_effort": None,
"max_effort": None,
"modified": "2017-03-07T18:37:43.992494Z",
"level_type": None,
"availability": "Upcoming",
"mobile_available": False,
"hidden": False,
"reporting_type": "mooc"
},
],
"owners": [
{
"uuid": "366e7739-fb3a-42d0-8351-8c3dbab3e339",
"key": "edX",
"name": "",
"certificate_logo_image_url": None,
"description": None,
"homepage_url": None,
"tags": [],
"logo_image_url": None,
"marketing_url": None
}
],
"image": None,
"short_description": None,
"full_description": None,
"level_type": None,
"subjects": [],
"prerequisites": [],
"expected_learning_items": [],
"video": None,
"sponsors": [],
"modified": "2017-01-16T14:07:47.327605Z",
"marketing_url": "http://lms.example.com/course/edxdemox?utm_source=admin&utm_medium=affiliate_partner",
"programs": [
{
"uuid": "643b89e6-04bc-4367-b292-9d3991d86b8e",
"title": "My Cool Program",
"type": "SuperAwesome",
"marketing_slug": "coolstuff",
"marketing_url": "http://lms.example.com/coolstuff"
}
]
},
'foobar+fb1': {
"key": "foobar+fb1",
"uuid": "c08c1e43-307c-444b-acc7-aea4a7b9f8f6",
"title": "FooBar Ventures",
"course_runs": [
{
"key": "course-v1:foobar+fb1+fbv1",
"uuid": "3550853f-e65a-492e-8781-d0eaa16dd538",
"title": "Other Course Name",
"image": {
"src": (
"http://192.168.1.187:8000/asset-v1:foobar+fb1+fbv1"
"+type@asset+block@images_course_image.jpg"
),
"height": None,
"width": None,
"description": None
},
"short_description": "",
"marketing_url": None,
"start": "2015-01-01T00:00:00Z",
"end": None,
"enrollment_start": None,
"enrollment_end": None,
"pacing_type": "instructor_paced",
"type": None,
"course": "foobar+fb1",
"full_description": "This is a really cool course. Like, we promise.",
"announcement": None,
"video": None,
"seats": [],
"content_language": None,
"transcript_languages": [],
"instructors": [],
"staff": [],
"min_effort": None,
"max_effort": None,
"modified": "2017-03-07T18:37:45.082681Z",
"level_type": None,
"availability": "Upcoming",
"mobile_available": False,
"hidden": False,
"reporting_type": "mooc"
}
],
"owners": [
{
"uuid": "8d920bc3-a1b2-44db-9380-1d3ca728c275",
"key": "foobar",
"name": "",
"certificate_logo_image_url": None,
"description": None,
"homepage_url": None,
"tags": [],
"logo_image_url": None,
"marketing_url": None
}
],
"image": {
"src": "",
"height": None,
"width": None,
"description": None
},
"short_description": "",
"full_description": "This is a really cool course.",
"level_type": None,
"subjects": [],
"prerequisites": [],
"expected_learning_items": [],
"video": None,
"sponsors": [],
"modified": "2017-03-07T18:37:45.238722Z",
"marketing_url": "http://lms.example.com/course/foobarfb1?utm_source=admin&utm_medium=affiliate_partner",
"programs": []
},
'test+course3': {
"key": "test+course3",
"uuid": "c08c1e43-307c-444b-acc7-aea4a7b9f8f6",
"title": "Test Course with unexpected data",
"course_runs": [
{
"key": "course-v1:test+course3+fbv1",
"uuid": "3550853f-e65a-492e-8781-d0eaa16dd538",
"title": "Other Course Name",
"image": None,
"short_description": "",
"marketing_url": None,
"start": "2015-01-01T00:00:00Z",
"end": None,
"enrollment_start": None,
"enrollment_end": None,
"pacing_type": "instructor_paced",
"type": None,
"course": "foobar+fb1",
"full_description": "This is a really cool course. Like, we promise.",
"announcement": None,
"video": None,
"seats": [],
"content_language": None,
"transcript_languages": [],
"instructors": [],
"staff": [],
"min_effort": None,
"max_effort": None,
"modified": "2017-03-07T18:37:45.082681Z",
"level_type": None,
"availability": "Upcoming",
"mobile_available": False,
"hidden": False,
"reporting_type": "mooc"
}
],
"owners": [
{
"uuid": "8d920bc3-a1b2-44db-9380-1d3ca728c275",
"key": "foobar",
"name": "",
"certificate_logo_image_url": None,
"description": None,
"homepage_url": None,
"tags": [],
"logo_image_url": None,
"marketing_url": None
}
],
"image": None,
"short_description": "",
"full_description": "This is a really cool course.",
"level_type": None,
"subjects": [],
"prerequisites": [],
"expected_learning_items": [],
"video": None,
"sponsors": [],
"modified": "2017-03-07T18:37:45.238722Z",
"marketing_url": "http://lms.example.com/course/test+course3?utm_source=admin&utm_medium=affiliate_partner",
"programs": []
}
}
FAKE_CATALOG_COURSE_PAGINATED_RESPONSE = {
'count': 3,
'next': 'http://testserver/api/v1/catalogs/1/courses?page=3',
'previous': 'http://testserver/api/v1/catalogs/1/courses?page=1',
'results': [
{
'owners': [
{
'description': None,
'tags': [],
'name': '',
'homepage_url': None,
'key': 'edX',
'certificate_logo_image_url': None,
'marketing_url': None,
'logo_image_url': None,
'uuid': FAKE_UUIDS[1]
}
],
'uuid': FAKE_UUIDS[2],
'title': 'edX Demonstration Course',
'prerequisites': [],
'image': None,
'expected_learning_items': [],
'sponsors': [],
'modified': '2017-03-03T07:34:19.322916Z',
'full_description': None,
'subjects': [],
'video': None,
'key': 'edX+DemoX',
'short_description': None,
'marketing_url': None,
'level_type': None,
'course_runs': []
}
]
}
FAKE_SEARCH_ALL_COURSE_RESULT = {
"title": "edX Demonstration Course",
"min_effort": None,
"marketing_url": "course/course-v1:edX+DemoX+Demo_Course/about",
"image_url": "https://business.sandbox.edx.org/asset-v1:edX+DemoX+Demo_Course+type"
"@asset+block@images_course_image.jpg",
"pacing_type": "instructor_paced",
"short_description": None,
"subject_uuids": [],
"transcript_languages": [],
"course_runs": [],
"full_description": None,
"seat_types": [
"audit",
"verified"
],
"mobile_available": False,
"end": None,
"partner": "edx",
"max_effort": None,
"start": "2013-02-05T05:00:00",
"weeks_to_complete": None,
"published": True,
"content_type": "courserun",
"has_enrollable_seats": True,
"authoring_organization_uuids": [
"12de950c-6fae-49f7-aaa9-778c2fbdae56"
],
"enrollment_start": None,
"staff_uuids": [],
"language": None,
"number": "DemoX",
"type": "verified",
"key": "course-v1:edX+DemoX+Demo_Course",
"org": "edX",
"level_type": None,
"program_types": [],
"aggregation_key": "courserun:edX+DemoX",
"logo_image_urls": [
None
],
"enrollment_end": None,
"availability": "Upcoming"
}
FAKE_SEARCH_ALL_SHORT_COURSE_RESULT = {
"title": "edX Demonstration Course",
"full_description": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
"key": "edX+DemoX",
"short_description": None,
"aggregation_key": "course:edX+DemoX",
"content_type": "course",
"course_runs": [],
}
FAKE_SEARCH_ALL_SHORT_COURSE_RESULT_LIST = [
{
"title": "edX Demonstration Course 2",
"full_description": None,
"key": "edX+DemoX+2",
"short_description": None,
"aggregation_key": "course:edX+DemoX",
"content_type": "course",
"course_runs": [],
},
{
"title": "edX Demonstration Course 3",
"full_description": None,
"key": "edX+DemoX+3",
"short_description": None,
"aggregation_key": "course:edX+DemoX",
"content_type": "course",
"course_runs": [],
},
]
FAKE_SEARCH_ALL_PROGRAM_RESULT_1 = {
"title": "Program Title 1",
"marketing_url": "professional-certificate/marketingslug1",
"content_type": "program",
"card_image_url": "http://wowslider.com/sliders/demo-10/data/images/dock.jpg",
"min_hours_effort_per_week": 5,
"authoring_organization_uuids": [
"12de950c-6fae-49f7-aaa9-778c2fbdae56"
],
"hidden": False,
"authoring_organizations": [
{
"marketing_url": None,
"homepage_url": None,
"tags": [],
"certificate_logo_image_url": None,
"name": "",
"key": "edX",
"description": None,
"uuid": "12de950c-6fae-49f7-aaa9-778c2fbdae56",
"logo_image_url": None
}
],
"staff_uuids": [],
"published": True,
"uuid": FAKE_UUIDS[3],
"max_hours_effort_per_week": 10,
"subject_uuids": [],
"weeks_to_complete_min": None,
"type": "Professional Certificate",
"language": [
"English"
],
"partner": "edx",
"subtitle": "Program Subtitle 1",
"status": "active",
"weeks_to_complete_max": None,
"aggregation_key": "program:" + FAKE_UUIDS[3],
'enrollment_url': FAKE_URL,
"is_program_eligible_for_one_click_purchase": True
}
FAKE_SEARCH_ALL_PROGRAM_RESULT_2 = {
"title": "Program Title 2",
"marketing_url": "professional-certificate/marketingslug2",
"content_type": "program",
"card_image_url": "http://wowslider.com/sliders/demo-10/data/images/dock.jpg",
"min_hours_effort_per_week": 5,
"authoring_organization_uuids": [
"12de950c-6fae-49f7-aaa9-778c2fbdae56"
],
"hidden": False,
"authoring_organizations": [
{
"marketing_url": None,
"homepage_url": None,
"tags": [],
"certificate_logo_image_url": None,
"name": "",
"key": "edX",
"description": None,
"uuid": "12de950c-6fae-49f7-aaa9-778c2fbdae56",
"logo_image_url": None
}
],
"staff_uuids": [],
"published": True,
"uuid": FAKE_UUIDS[2],
"max_hours_effort_per_week": 10,
"subject_uuids": [],
"weeks_to_complete_min": None,
"type": "Professional Certificate",
"language": [
"English"
],
"partner": "edx",
"subtitle": "Program Subtitle 1",
"status": "active",
"weeks_to_complete_max": None,
"aggregation_key": "program:" + FAKE_UUIDS[3],
"is_program_eligible_for_one_click_purchase": True
}
FAKE_SEARCH_ALL_RESULTS = {
"count": 3,
"next": None,
"previous": None,
"results": [
FAKE_SEARCH_ALL_COURSE_RESULT,
FAKE_SEARCH_ALL_SHORT_COURSE_RESULT,
FAKE_SEARCH_ALL_PROGRAM_RESULT_1,
]
}
FAKE_SEARCH_ALL_RESULTS_2 = {
"count": 2,
"next": None,
"previous": None,
"results": [
FAKE_SEARCH_ALL_COURSE_RESULT,
FAKE_SEARCH_ALL_PROGRAM_RESULT_1,
]
}
FAKE_SEARCH_ALL_COURSE_RESULT_1 = copy.deepcopy(FAKE_SEARCH_ALL_COURSE_RESULT)
FAKE_SEARCH_ALL_COURSE_RESULT_1['marketing_url'] = None
FAKE_SEARCH_ALL_COURSE_RESULT_1['key'] = "course-v1:test+test+DemoX+Demo_Course"
FAKE_SEARCH_ALL_RESULTS_3 = {
"count": 3,
"next": None,
"previous": None,
"results": [
FAKE_SEARCH_ALL_COURSE_RESULT_1,
FAKE_SEARCH_ALL_SHORT_COURSE_RESULT,
FAKE_SEARCH_ALL_PROGRAM_RESULT_1,
]
}
FAKE_SEARCH_ALL_RESULTS_WITH_PAGINATION = {
"count": 2,
"next": "https://fake.server/api/v1/?page=1",
"previous": "https://fake.server/api/v1/?page=2",
"results": [
FAKE_SEARCH_ALL_COURSE_RESULT,
FAKE_SEARCH_ALL_PROGRAM_RESULT_1,
]
}
FAKE_SEARCH_ALL_COURSE_RESULT_2 = copy.deepcopy(FAKE_SEARCH_ALL_COURSE_RESULT)
FAKE_SEARCH_ALL_COURSE_RESULT_2['has_enrollable_seats'] = False
FAKE_SEARCH_ALL_COURSE_RESULT_2["key"] = "course-v1:test+DemoX+Demo_Course"
FAKE_SEARCH_ALL_PROGRAM_RESULT_3 = copy.deepcopy(FAKE_SEARCH_ALL_PROGRAM_RESULT_2)
FAKE_SEARCH_ALL_PROGRAM_RESULT_3['is_program_eligible_for_one_click_purchase'] = False
FAKE_SEARCH_ALL_PROGRAM_RESULT_3['uuid'] = FAKE_UUIDS[1]
FAKE_SEARCH_ALL_RESULTS_WITH_PAGINATION_1 = {
"count": 5,
"next": "https://fake.server/api/v1/?page=1",
"previous": "https://fake.server/api/v1/?page=4",
"results": [
FAKE_SEARCH_ALL_COURSE_RESULT_1,
FAKE_SEARCH_ALL_COURSE_RESULT_2,
FAKE_SEARCH_ALL_SHORT_COURSE_RESULT,
FAKE_SEARCH_ALL_PROGRAM_RESULT_1,
FAKE_SEARCH_ALL_PROGRAM_RESULT_3,
]
}
FAKE_SEARCH_ALL_COURSE_RESULT_3 = {
"content_type": "course",
"full_description": "This is full description of course",
"aggregation_key": "course:edX+DemoX",
"key": "edX+DemoX",
"short_description": "This is short description of course",
"title": "edX Demonstration Course",
"card_image_url": "http://local:18000/asset-v1:edX+DemoX+Demo_Course+type@asset+block@images_course_image.jpg",
"subjects": [],
"organizations": [
"edX: "
],
"uuid": "4424529e-23aa-489b-b25a-800f52e05b66",
"languages": [],
"course_runs": [
{
"enrollment_end": None,
"enrollment_mode": "verified",
"key": "course-v1:edX+DemoX+Demo_Course",
"enrollment_start": None,
"pacing_type": "instructor_paced",
"end": None,
"start": "2013-02-05T05:00:00Z",
"go_live_date": None,
"availability": "Current"
}
],
}
def get_catalog_courses(catalog_id):
"""
Fake implementation returning catalog courses by ID.
Arguments:
catalog_id (int): Catalog ID
Returns:
list: Details of the courses included in the catalog
"""
return FAKE_CATALOG_COURSES_RESPONSE.get(catalog_id, [])
def get_course_details(course_key):
"""
Fake implementation returning course details by key.
Arguments:
course_key (str): The course key of the course; not the unique-per-run key.
Returns:
dict: Details of the course.
"""
return FAKE_CATALOG_COURSE_DETAILS_RESPONSES.get(course_key, {}).copy()
def get_program_by_uuid(program_uuid):
"""
Fake implementation returning program by UUID.
Arguments:
program_uuid(string): Program UUID in string form
Returns:
dict: Program data provided by Course Catalog API
"""
return FAKE_PROGRAM_RESPONSES.get(program_uuid).copy()
def get_program_by_title(program_title):
"""
Fake implementation returning program by title.
Arguments:
program_title(str): Program title as seen by students and in Course Catalog Admin
Returns:
dict: Program data provided by Course Catalog API
"""
try:
return next(response for response in FAKE_PROGRAM_RESPONSES.values() if response["title"] == program_title)
except StopIteration:
return None
def get_common_course_modes(course_runs):
"""
Fake implementation returning common course modes.
Arguments:
course_run_ids(Iterable[str]): Target Course run IDs.
Returns:
set: course modes found in all given course runs
"""
course_run_modes = [
set(seat.get("type") for seat in course_run.get("seats"))
for course_run in FAKE_COURSE_RUNS_RESPONSE
if course_run.get("key") in course_runs
]
return six_reduce(lambda left, right: left & right, course_run_modes)
def setup_course_catalog_api_client_mock(
client_mock,
course_overrides=None,
course_run_overrides=None,
program_overrides=None,
program_type_overrides=None,
):
"""
Set up the Course Catalog API client mock.
Args:
----
client_mock (Mock): The mock course catalog api client.
course_overrides (dict): Dictionary containing overrides of the fake course metadata values.
course_run_overrides (dict): Dictionary containing overrides of the fake course run metadata values.
program_overrides (dict): Dictionary containing overrides of the fake program metadata values.
program_type_overrides (dict): Dictionary containing overrides of the fake program type metadata values.
"""
client = client_mock.return_value
fake_course = FAKE_COURSE.copy()
fake_course_run = FAKE_COURSE_RUN.copy()
fake_program = FAKE_PROGRAM_RESPONSE3.copy()
fake_program_type = FAKE_PROGRAM_TYPE.copy()
fake_search_all_course_result = FAKE_SEARCH_ALL_COURSE_RESULT.copy()
# Apply overrides to default fake course catalog metadata.
if course_overrides:
fake_course.update(course_overrides)
if course_run_overrides:
fake_course_run.update(course_run_overrides)
if program_overrides:
fake_program.update(program_overrides)
if program_type_overrides:
fake_program_type.update(program_type_overrides)
# Mock course catalog api functions.
client.get_course_details.return_value = fake_course
client.get_course_run.return_value = fake_course_run
client.get_course_id.return_value = fake_course['key']
client.get_course_and_course_run.return_value = (fake_course, fake_course_run)
client.get_program_course_keys.return_value = [course['key'] for course in fake_program['courses']]
client.get_program_by_uuid.return_value = fake_program
client.get_program_type_by_slug.return_value = fake_program_type
client.get_catalog_results.return_value = {'results': [fake_search_all_course_result]}
def create_course_run_dict(start="2014-10-14T13:11:03Z", end="3000-10-13T13:11:01Z",
enrollment_start="2014-10-13T13:11:03Z",
enrollment_end="2999-10-13T13:11:04Z",
upgrade_deadline="3000-10-13T13:11:04Z",
availability="Starting Soon",
status="published",
weeks_to_complete=1):
"""
Return enrollable and upgradeable course run dict.
"""
return {
"start": start,
"end": end,
"status": status,
"enrollment_start": enrollment_start,
"enrollment_end": enrollment_end,
"seats": [{"type": "verified", "upgrade_deadline": upgrade_deadline}],
"availability": availability,
"weeks_to_complete": weeks_to_complete
}
def get_fake_content_metadata():
"""
Returns a fake response from EnterpriseCatalogApiClient.get_content_metadata.
"""
content_metadata = OrderedDict()
content_metadata[FAKE_COURSE_RUN['key']] = FAKE_COURSE_RUN
content_metadata[FAKE_COURSE['key']] = FAKE_COURSE
content_metadata[FAKE_SEARCH_ALL_PROGRAM_RESULT_1['uuid']] = FAKE_SEARCH_ALL_PROGRAM_RESULT_1
return list(content_metadata.values())
class CourseDiscoveryApiTestMixin:
"""
Mixin for course discovery API test classes.
"""
CATALOG_API_PATCH_PREFIX = "enterprise.api_client.discovery"
def _make_catalog_api_location(self, catalog_api_member):
"""
Return path for `catalog_api_member` to mock.
"""
return "{}.{}".format(self.CATALOG_API_PATCH_PREFIX, catalog_api_member)
def _make_patch(self, patch_location, new=None):
"""
Patch `patch_location`, register the patch to stop at test cleanup and return mock object.
"""
patch_mock = new if new is not None else mock.Mock()
patcher = mock.patch(patch_location, patch_mock)
patcher.start()
self.addCleanup(patcher.stop)
return patch_mock
@staticmethod
def _get_important_parameters(get_data_mock):
"""
Return important (i.e. varying) parameters to get_edx_api_data.
"""
args, kwargs = get_data_mock.call_args
# This test is to make sure that all calls to get_edx_api_data are made using kwargs
# and there is no positional argument. This is required as changes in get_edx_api_data's
# signature are breaking edx-enterprise and using kwargs would reduce that.
assert args == ()
return kwargs.get('resource', None), kwargs.get('resource_id', None)
|
renhaoqi/gem5-stable
|
refs/heads/master
|
src/cpu/testers/directedtest/RubyDirectedTester.py
|
69
|
# Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Brad Beckmann
from m5.SimObject import SimObject
from MemObject import MemObject
from m5.params import *
from m5.proxy import *
class DirectedGenerator(SimObject):
type = 'DirectedGenerator'
abstract = True
cxx_header = "cpu/testers/directedtest/DirectedGenerator.hh"
num_cpus = Param.Int("num of cpus")
system = Param.System(Parent.any, "System we belong to")
class SeriesRequestGenerator(DirectedGenerator):
type = 'SeriesRequestGenerator'
cxx_header = "cpu/testers/directedtest/SeriesRequestGenerator.hh"
addr_increment_size = Param.Int(64, "address increment size")
num_series = Param.UInt32(1,
"number of different address streams to generate")
percent_writes = Param.Percent(50, "percent of access that are writes")
class InvalidateGenerator(DirectedGenerator):
type = 'InvalidateGenerator'
cxx_header = "cpu/testers/directedtest/InvalidateGenerator.hh"
addr_increment_size = Param.Int(64, "address increment size")
class RubyDirectedTester(MemObject):
type = 'RubyDirectedTester'
cxx_header = "cpu/testers/directedtest/RubyDirectedTester.hh"
cpuPort = VectorMasterPort("the cpu ports")
requests_to_complete = Param.Int("checks to complete")
generator = Param.DirectedGenerator("the request generator")
|
weblyzard/ewrt
|
refs/heads/develop
|
src/eWRT/lib/Result.py
|
1
|
#!/usr/bin/env python
# Result is an item of ResultSet
from builtins import object
class Result(object):
# constructor
# @parameter id, name
def __init__(self, id, name):
self.id = id
self.name = name
# get the ID of Result
# @return Id
def getId(self):
return self.id
# get the Name of the Result
# @return Name
def getName(self):
return self.name
def getAttributes(self):
""" todo: return attributes of Result """
|
brian-l/django-1.4.10
|
refs/heads/master
|
tests/regressiontests/custom_columns_regress/models.py
|
34
|
"""
Regression for #9736.
Checks some pathological column naming to make sure it doesn't break
table creation or queries.
"""
from django.db import models
class Article(models.Model):
Article_ID = models.AutoField(primary_key=True, db_column='Article ID')
headline = models.CharField(max_length=100)
authors = models.ManyToManyField('Author', db_table='my m2m table')
primary_author = models.ForeignKey('Author', db_column='Author ID', related_name='primary_set')
def __unicode__(self):
return self.headline
class Meta:
ordering = ('headline',)
class Author(models.Model):
Author_ID = models.AutoField(primary_key=True, db_column='Author ID')
first_name = models.CharField(max_length=30, db_column='first name')
last_name = models.CharField(max_length=30, db_column='last name')
def __unicode__(self):
return u'%s %s' % (self.first_name, self.last_name)
class Meta:
db_table = 'my author table'
ordering = ('last_name','first_name')
|
kustodian/ansible
|
refs/heads/devel
|
lib/ansible/plugins/doc_fragments/acme.py
|
12
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2016 Michael Gruener <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = r'''
notes:
- "If a new enough version of the C(cryptography) library
is available (see Requirements for details), it will be used
instead of the C(openssl) binary. This can be explicitly disabled
or enabled with the C(select_crypto_backend) option. Note that using
the C(openssl) binary will be slower and less secure, as private key
contents always have to be stored on disk (see
C(account_key_content))."
- "Although the defaults are chosen so that the module can be used with
the L(Let's Encrypt,https://letsencrypt.org/) CA, the module can in
principle be used with any CA providing an ACME endpoint, such as
L(Buypass Go SSL,https://www.buypass.com/ssl/products/acme)."
requirements:
- python >= 2.6
- either openssl or L(cryptography,https://cryptography.io/) >= 1.5
options:
account_key_src:
description:
- "Path to a file containing the ACME account RSA or Elliptic Curve
key."
- "RSA keys can be created with C(openssl genrsa ...). Elliptic curve keys can
be created with C(openssl ecparam -genkey ...). Any other tool creating
private keys in PEM format can be used as well."
- "Mutually exclusive with C(account_key_content)."
- "Required if C(account_key_content) is not used."
type: path
aliases: [ account_key ]
account_key_content:
description:
- "Content of the ACME account RSA or Elliptic Curve key."
- "Mutually exclusive with C(account_key_src)."
- "Required if C(account_key_src) is not used."
- "*Warning:* the content will be written into a temporary file, which will
be deleted by Ansible when the module completes. Since this is an
important private key — it can be used to change the account key,
or to revoke your certificates without knowing their private keys
—, this might not be acceptable."
- "In case C(cryptography) is used, the content is not written into a
temporary file. It can still happen that it is written to disk by
Ansible in the process of moving the module with its argument to
the node where it is executed."
type: str
version_added: "2.5"
account_uri:
description:
- "If specified, assumes that the account URI is as given. If the
account key does not match this account, or an account with this
URI does not exist, the module fails."
type: str
version_added: "2.7"
acme_version:
description:
- "The ACME version of the endpoint."
- "Must be 1 for the classic Let's Encrypt and Buypass ACME endpoints,
or 2 for standardized ACME v2 endpoints."
- "The default value is 1. Note that in Ansible 2.14, this option *will
be required* and will no longer have a default."
- "Please also note that we will deprecate ACME v1 support eventually."
type: int
choices: [ 1, 2 ]
version_added: "2.5"
acme_directory:
description:
- "The ACME directory to use. This is the entry point URL to access
CA server API."
- "For safety reasons the default is set to the Let's Encrypt staging
server (for the ACME v1 protocol). This will create technically correct,
but untrusted certificates."
- "The default value is U(https://acme-staging.api.letsencrypt.org/directory).
Note that in Ansible 2.14, this option *will be required* and will no longer
have a default."
- "For Let's Encrypt, all staging endpoints can be found here:
U(https://letsencrypt.org/docs/staging-environment/). For Buypass, all
endpoints can be found here:
U(https://community.buypass.com/t/63d4ay/buypass-go-ssl-endpoints)"
- "For Let's Encrypt, the production directory URL for ACME v1 is
U(https://acme-v01.api.letsencrypt.org/directory), and the production
directory URL for ACME v2 is U(https://acme-v02.api.letsencrypt.org/directory)."
- "For Buypass, the production directory URL for ACME v2 and v1 is
U(https://api.buypass.com/acme/directory)."
- "*Warning:* So far, the module has only been tested against Let's Encrypt
(staging and production), Buypass (staging and production), and
L(Pebble testing server,https://github.com/letsencrypt/Pebble)."
type: str
validate_certs:
description:
- Whether calls to the ACME directory will validate TLS certificates.
- "*Warning:* Should *only ever* be set to C(no) for testing purposes,
for example when testing against a local Pebble server."
type: bool
default: yes
version_added: "2.5"
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to
C(openssl).
- If set to C(openssl), will try to use the C(openssl) binary.
- If set to C(cryptography), will try to use the
L(cryptography,https://cryptography.io/) library.
type: str
default: auto
choices: [ auto, cryptography, openssl ]
version_added: "2.7"
'''
|
fkmhrk/kiilib_python
|
refs/heads/master
|
kiilib/demo/uploadFile.py
|
1
|
#!/usr/bin/python
import sys
import os
# Python Tutorial 6.1.2. "The directory containing the script being run is placed at the beginning of the search path, ahead of the standard library path."
sys.path.append(sys.path[0] + "/../..")
import kiilib
from config import *
def main():
context = kiilib.KiiContext(APP_ID, APP_KEY, BASE_URL)
api = kiilib.AppAPI(context)
user = api.login('fkmtest', 'password1234')
print 'access token = %s' % (context.access_token)
# create object
bucket = kiilib.KiiBucket(user, 'images')
obj = api.objectAPI.create(bucket, {})
print 'object id = %s' % (obj.id)
# upload body
filePath = sys.path[0] + '/image.jpg'
api.objectAPI.updateBody(obj, 'image/jpeg',
open(filePath, 'rb'), os.path.getsize(filePath))
print 'file uploaded'
# download body
with open('downloaded.jpg', 'wb') as target:
api.objectAPI.downloadBody(obj, target)
print 'file downloaded'
if __name__ == '__main__':
main()
|
Ishiihara/kafka
|
refs/heads/trunk
|
tests/kafkatest/tests/core/transactions_test.py
|
6
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.services.kafka import KafkaService
from kafkatest.services.console_consumer import ConsoleConsumer
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.transactional_message_copier import TransactionalMessageCopier
from kafkatest.utils import is_int
from ducktape.tests.test import Test
from ducktape.mark import matrix
from ducktape.mark.resource import cluster
from ducktape.utils.util import wait_until
class TransactionsTest(Test):
"""Tests transactions by transactionally copying data from a source topic to
a destination topic and killing the copy process as well as the broker
randomly through the process. In the end we verify that the final output
topic contains exactly one committed copy of each message in the input
topic
"""
def __init__(self, test_context):
""":type test_context: ducktape.tests.test.TestContext"""
super(TransactionsTest, self).__init__(test_context=test_context)
self.input_topic = "input-topic"
self.output_topic = "output-topic"
self.num_brokers = 3
# Test parameters
self.num_input_partitions = 2
self.num_output_partitions = 3
self.num_seed_messages = 100000
self.transaction_size = 750
self.consumer_group = "transactions-test-consumer-group"
self.zk = ZookeeperService(test_context, num_nodes=1)
self.kafka = KafkaService(test_context,
num_nodes=self.num_brokers,
zk=self.zk)
def setUp(self):
self.zk.start()
def seed_messages(self, topic, num_seed_messages):
seed_timeout_sec = 10000
seed_producer = VerifiableProducer(context=self.test_context,
num_nodes=1,
kafka=self.kafka,
topic=topic,
message_validator=is_int,
max_messages=num_seed_messages,
enable_idempotence=True)
seed_producer.start()
wait_until(lambda: seed_producer.num_acked >= num_seed_messages,
timeout_sec=seed_timeout_sec,
err_msg="Producer failed to produce messages %d in %ds." %\
(self.num_seed_messages, seed_timeout_sec))
return seed_producer.acked
def get_messages_from_topic(self, topic, num_messages):
consumer = self.start_consumer(topic, group_id="verifying_consumer")
return self.drain_consumer(consumer, num_messages)
def bounce_brokers(self, clean_shutdown):
for node in self.kafka.nodes:
if clean_shutdown:
self.kafka.restart_node(node, clean_shutdown = True)
else:
self.kafka.stop_node(node, clean_shutdown = False)
wait_until(lambda: len(self.kafka.pids(node)) == 0 and not self.kafka.is_registered(node),
timeout_sec=self.kafka.zk_session_timeout + 5,
err_msg="Failed to see timely deregistration of \
hard-killed broker %s" % str(node.account))
self.kafka.start_node(node)
def create_and_start_message_copier(self, input_topic, input_partition, output_topic, transactional_id):
message_copier = TransactionalMessageCopier(
context=self.test_context,
num_nodes=1,
kafka=self.kafka,
transactional_id=transactional_id,
consumer_group=self.consumer_group,
input_topic=input_topic,
input_partition=input_partition,
output_topic=output_topic,
max_messages=-1,
transaction_size=self.transaction_size
)
message_copier.start()
wait_until(lambda: message_copier.alive(message_copier.nodes[0]),
timeout_sec=10,
err_msg="Message copier failed to start after 10 s")
return message_copier
def bounce_copiers(self, copiers, clean_shutdown):
for _ in range(3):
for copier in copiers:
wait_until(lambda: copier.progress_percent() >= 20.0,
timeout_sec=30,
err_msg="%s : Message copier didn't make enough progress in 30s. Current progress: %s" \
% (copier.transactional_id, str(copier.progress_percent())))
self.logger.info("%s - progress: %s" % (copier.transactional_id,
str(copier.progress_percent())))
copier.restart(clean_shutdown)
def create_and_start_copiers(self, input_topic, output_topic, num_copiers):
copiers = []
for i in range(0, num_copiers):
copiers.append(self.create_and_start_message_copier(
input_topic=input_topic,
output_topic=output_topic,
input_partition=i,
transactional_id="copier-" + str(i)
))
return copiers
def start_consumer(self, topic_to_read, group_id):
consumer = ConsoleConsumer(context=self.test_context,
num_nodes=1,
kafka=self.kafka,
topic=topic_to_read,
group_id=group_id,
new_consumer=True,
message_validator=is_int,
from_beginning=True,
isolation_level="read_committed")
consumer.start()
# ensure that the consumer is up.
wait_until(lambda: (len(consumer.messages_consumed[1]) > 0) == True,
timeout_sec=60,
err_msg="Consumer failed to consume any messages for %ds" %\
60)
return consumer
def drain_consumer(self, consumer, num_messages):
# wait until we read at least the expected number of messages.
# This is a safe check because both failure modes will be caught:
# 1. If we have 'num_seed_messages' but there are duplicates, then
# this is checked for later.
#
# 2. If we never reach 'num_seed_messages', then this will cause the
# test to fail.
wait_until(lambda: len(consumer.messages_consumed[1]) >= num_messages,
timeout_sec=90,
err_msg="Consumer consumed only %d out of %d messages in %ds" %\
(len(consumer.messages_consumed[1]), num_messages, 90))
consumer.stop()
return consumer.messages_consumed[1]
def copy_messages_transactionally(self, failure_mode, bounce_target,
input_topic, output_topic,
num_copiers, num_messages_to_copy):
"""Copies messages transactionally from the seeded input topic to the
output topic, either bouncing brokers or clients in a hard and soft
way as it goes.
This method also consumes messages in read_committed mode from the
output topic while the bounces and copy is going on.
It returns the concurrently consumed messages.
"""
copiers = self.create_and_start_copiers(input_topic=input_topic,
output_topic=output_topic,
num_copiers=num_copiers)
concurrent_consumer = self.start_consumer(output_topic,
group_id="concurrent_consumer")
clean_shutdown = False
if failure_mode == "clean_bounce":
clean_shutdown = True
if bounce_target == "brokers":
self.bounce_brokers(clean_shutdown)
elif bounce_target == "clients":
self.bounce_copiers(copiers, clean_shutdown)
for copier in copiers:
wait_until(lambda: copier.is_done,
timeout_sec=120,
err_msg="%s - Failed to copy all messages in %ds." %\
(copier.transactional_id, 120))
self.logger.info("finished copying messages")
return self.drain_consumer(concurrent_consumer, num_messages_to_copy)
def setup_topics(self):
self.kafka.topics = {
self.input_topic: {
"partitions": self.num_input_partitions,
"replication-factor": 3,
"configs": {
"min.insync.replicas": 2
}
},
self.output_topic: {
"partitions": self.num_output_partitions,
"replication-factor": 3,
"configs": {
"min.insync.replicas": 2
}
}
}
@cluster(num_nodes=9)
@matrix(failure_mode=["hard_bounce", "clean_bounce"],
bounce_target=["brokers", "clients"],
check_order=[True, False])
def test_transactions(self, failure_mode, bounce_target, check_order):
security_protocol = 'PLAINTEXT'
self.kafka.security_protocol = security_protocol
self.kafka.interbroker_security_protocol = security_protocol
self.kafka.logs["kafka_data_1"]["collect_default"] = True
self.kafka.logs["kafka_data_2"]["collect_default"] = True
self.kafka.logs["kafka_operational_logs_debug"]["collect_default"] = True
if check_order:
# To check ordering, we simply create input and output topics
# with a single partition.
# We reduce the number of seed messages to copy to account for the fewer output
# partitions, and thus lower parallelism. This helps keep the test
# time shorter.
self.num_seed_messages = self.num_seed_messages / 3
self.num_input_partitions = 1
self.num_output_partitions = 1
self.setup_topics()
self.kafka.start()
input_messages = self.seed_messages(self.input_topic, self.num_seed_messages)
concurrently_consumed_messages = self.copy_messages_transactionally(
failure_mode, bounce_target, input_topic=self.input_topic,
output_topic=self.output_topic, num_copiers=self.num_input_partitions,
num_messages_to_copy=self.num_seed_messages)
output_messages = self.get_messages_from_topic(self.output_topic, self.num_seed_messages)
concurrently_consumed_message_set = set(concurrently_consumed_messages)
output_message_set = set(output_messages)
input_message_set = set(input_messages)
num_dups = abs(len(output_messages) - len(output_message_set))
num_dups_in_concurrent_consumer = abs(len(concurrently_consumed_messages)
- len(concurrently_consumed_message_set))
assert num_dups == 0, "Detected %d duplicates in the output stream" % num_dups
assert input_message_set == output_message_set, "Input and output message sets are not equal. Num input messages %d. Num output messages %d" %\
(len(input_message_set), len(output_message_set))
assert num_dups_in_concurrent_consumer == 0, "Detected %d dups in concurrently consumed messages" % num_dups_in_concurrent_consumer
assert input_message_set == concurrently_consumed_message_set, \
"Input and concurrently consumed output message sets are not equal. Num input messages: %d. Num concurrently_consumed_messages: %d" %\
(len(input_message_set), len(concurrently_consumed_message_set))
if check_order:
assert input_messages == sorted(input_messages), "The seed messages themselves were not in order"
assert output_messages == input_messages, "Output messages are not in order"
assert concurrently_consumed_messages == output_messages, "Concurrently consumed messages are not in order"
|
shamangeorge/beets
|
refs/heads/master
|
test/test_query.py
|
4
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Various tests for querying the library database.
"""
from __future__ import division, absolute_import, print_function
from functools import partial
from mock import patch
import os
import sys
import unittest
from test import _common
from test import helper
import beets.library
from beets import dbcore
from beets.dbcore import types
from beets.dbcore.query import (NoneQuery, ParsingError,
InvalidQueryArgumentValueError)
from beets.library import Library, Item
from beets import util
import platform
import six
class TestHelper(helper.TestHelper):
def assertInResult(self, item, results): # noqa
result_ids = [i.id for i in results]
self.assertIn(item.id, result_ids)
def assertNotInResult(self, item, results): # noqa
result_ids = [i.id for i in results]
self.assertNotIn(item.id, result_ids)
class AnyFieldQueryTest(_common.LibTestCase):
def test_no_restriction(self):
q = dbcore.query.AnyFieldQuery(
'title', beets.library.Item._fields.keys(),
dbcore.query.SubstringQuery
)
self.assertEqual(self.lib.items(q).get().title, 'the title')
def test_restriction_completeness(self):
q = dbcore.query.AnyFieldQuery('title', [u'title'],
dbcore.query.SubstringQuery)
self.assertEqual(self.lib.items(q).get().title, u'the title')
def test_restriction_soundness(self):
q = dbcore.query.AnyFieldQuery('title', [u'artist'],
dbcore.query.SubstringQuery)
self.assertEqual(self.lib.items(q).get(), None)
def test_eq(self):
q1 = dbcore.query.AnyFieldQuery('foo', [u'bar'],
dbcore.query.SubstringQuery)
q2 = dbcore.query.AnyFieldQuery('foo', [u'bar'],
dbcore.query.SubstringQuery)
self.assertEqual(q1, q2)
q2.query_class = None
self.assertNotEqual(q1, q2)
class AssertsMixin(object):
def assert_items_matched(self, results, titles):
self.assertEqual(set([i.title for i in results]), set(titles))
def assert_albums_matched(self, results, albums):
self.assertEqual(set([a.album for a in results]), set(albums))
# A test case class providing a library with some dummy data and some
# assertions involving that data.
class DummyDataTestCase(_common.TestCase, AssertsMixin):
def setUp(self):
super(DummyDataTestCase, self).setUp()
self.lib = beets.library.Library(':memory:')
items = [_common.item() for _ in range(3)]
items[0].title = u'foo bar'
items[0].artist = u'one'
items[0].album = u'baz'
items[0].year = 2001
items[0].comp = True
items[1].title = u'baz qux'
items[1].artist = u'two'
items[1].album = u'baz'
items[1].year = 2002
items[1].comp = True
items[2].title = u'beets 4 eva'
items[2].artist = u'three'
items[2].album = u'foo'
items[2].year = 2003
items[2].comp = False
for item in items:
self.lib.add(item)
self.lib.add_album(items[:2])
def assert_items_matched_all(self, results):
self.assert_items_matched(results, [
u'foo bar',
u'baz qux',
u'beets 4 eva',
])
class GetTest(DummyDataTestCase):
def test_get_empty(self):
q = u''
results = self.lib.items(q)
self.assert_items_matched_all(results)
def test_get_none(self):
q = None
results = self.lib.items(q)
self.assert_items_matched_all(results)
def test_get_one_keyed_term(self):
q = u'title:qux'
results = self.lib.items(q)
self.assert_items_matched(results, [u'baz qux'])
def test_get_one_keyed_regexp(self):
q = u'artist::t.+r'
results = self.lib.items(q)
self.assert_items_matched(results, [u'beets 4 eva'])
def test_get_one_unkeyed_term(self):
q = u'three'
results = self.lib.items(q)
self.assert_items_matched(results, [u'beets 4 eva'])
def test_get_one_unkeyed_regexp(self):
q = u':x$'
results = self.lib.items(q)
self.assert_items_matched(results, [u'baz qux'])
def test_get_no_matches(self):
q = u'popebear'
results = self.lib.items(q)
self.assert_items_matched(results, [])
def test_invalid_key(self):
q = u'pope:bear'
results = self.lib.items(q)
# Matches nothing since the flexattr is not present on the
# objects.
self.assert_items_matched(results, [])
def test_term_case_insensitive(self):
q = u'oNE'
results = self.lib.items(q)
self.assert_items_matched(results, [u'foo bar'])
def test_regexp_case_sensitive(self):
q = u':oNE'
results = self.lib.items(q)
self.assert_items_matched(results, [])
q = u':one'
results = self.lib.items(q)
self.assert_items_matched(results, [u'foo bar'])
def test_term_case_insensitive_with_key(self):
q = u'artist:thrEE'
results = self.lib.items(q)
self.assert_items_matched(results, [u'beets 4 eva'])
def test_key_case_insensitive(self):
q = u'ArTiST:three'
results = self.lib.items(q)
self.assert_items_matched(results, [u'beets 4 eva'])
def test_unkeyed_term_matches_multiple_columns(self):
q = u'baz'
results = self.lib.items(q)
self.assert_items_matched(results, [
u'foo bar',
u'baz qux',
])
def test_unkeyed_regexp_matches_multiple_columns(self):
q = u':z$'
results = self.lib.items(q)
self.assert_items_matched(results, [
u'foo bar',
u'baz qux',
])
def test_keyed_term_matches_only_one_column(self):
q = u'title:baz'
results = self.lib.items(q)
self.assert_items_matched(results, [u'baz qux'])
def test_keyed_regexp_matches_only_one_column(self):
q = u'title::baz'
results = self.lib.items(q)
self.assert_items_matched(results, [
u'baz qux',
])
def test_multiple_terms_narrow_search(self):
q = u'qux baz'
results = self.lib.items(q)
self.assert_items_matched(results, [
u'baz qux',
])
def test_multiple_regexps_narrow_search(self):
q = u':baz :qux'
results = self.lib.items(q)
self.assert_items_matched(results, [u'baz qux'])
def test_mixed_terms_regexps_narrow_search(self):
q = u':baz qux'
results = self.lib.items(q)
self.assert_items_matched(results, [u'baz qux'])
def test_single_year(self):
q = u'year:2001'
results = self.lib.items(q)
self.assert_items_matched(results, [u'foo bar'])
def test_year_range(self):
q = u'year:2000..2002'
results = self.lib.items(q)
self.assert_items_matched(results, [
u'foo bar',
u'baz qux',
])
def test_singleton_true(self):
q = u'singleton:true'
results = self.lib.items(q)
self.assert_items_matched(results, [u'beets 4 eva'])
def test_singleton_false(self):
q = u'singleton:false'
results = self.lib.items(q)
self.assert_items_matched(results, [u'foo bar', u'baz qux'])
def test_compilation_true(self):
q = u'comp:true'
results = self.lib.items(q)
self.assert_items_matched(results, [u'foo bar', u'baz qux'])
def test_compilation_false(self):
q = u'comp:false'
results = self.lib.items(q)
self.assert_items_matched(results, [u'beets 4 eva'])
def test_unknown_field_name_no_results(self):
q = u'xyzzy:nonsense'
results = self.lib.items(q)
titles = [i.title for i in results]
self.assertEqual(titles, [])
def test_unknown_field_name_no_results_in_album_query(self):
q = u'xyzzy:nonsense'
results = self.lib.albums(q)
names = [a.album for a in results]
self.assertEqual(names, [])
def test_item_field_name_matches_nothing_in_album_query(self):
q = u'format:nonsense'
results = self.lib.albums(q)
names = [a.album for a in results]
self.assertEqual(names, [])
def test_unicode_query(self):
item = self.lib.items().get()
item.title = u'caf\xe9'
item.store()
q = u'title:caf\xe9'
results = self.lib.items(q)
self.assert_items_matched(results, [u'caf\xe9'])
def test_numeric_search_positive(self):
q = dbcore.query.NumericQuery('year', u'2001')
results = self.lib.items(q)
self.assertTrue(results)
def test_numeric_search_negative(self):
q = dbcore.query.NumericQuery('year', u'1999')
results = self.lib.items(q)
self.assertFalse(results)
def test_invalid_query(self):
with self.assertRaises(InvalidQueryArgumentValueError) as raised:
dbcore.query.NumericQuery('year', u'199a')
self.assertIn(u'not an int', six.text_type(raised.exception))
with self.assertRaises(InvalidQueryArgumentValueError) as raised:
dbcore.query.RegexpQuery('year', u'199(')
exception_text = six.text_type(raised.exception)
self.assertIn(u'not a regular expression', exception_text)
if sys.version_info >= (3, 5):
self.assertIn(u'unterminated subpattern', exception_text)
else:
self.assertIn(u'unbalanced parenthesis', exception_text)
self.assertIsInstance(raised.exception, ParsingError)
class MatchTest(_common.TestCase):
def setUp(self):
super(MatchTest, self).setUp()
self.item = _common.item()
def test_regex_match_positive(self):
q = dbcore.query.RegexpQuery('album', u'^the album$')
self.assertTrue(q.match(self.item))
def test_regex_match_negative(self):
q = dbcore.query.RegexpQuery('album', u'^album$')
self.assertFalse(q.match(self.item))
def test_regex_match_non_string_value(self):
q = dbcore.query.RegexpQuery('disc', u'^6$')
self.assertTrue(q.match(self.item))
def test_substring_match_positive(self):
q = dbcore.query.SubstringQuery('album', u'album')
self.assertTrue(q.match(self.item))
def test_substring_match_negative(self):
q = dbcore.query.SubstringQuery('album', u'ablum')
self.assertFalse(q.match(self.item))
def test_substring_match_non_string_value(self):
q = dbcore.query.SubstringQuery('disc', u'6')
self.assertTrue(q.match(self.item))
def test_year_match_positive(self):
q = dbcore.query.NumericQuery('year', u'1')
self.assertTrue(q.match(self.item))
def test_year_match_negative(self):
q = dbcore.query.NumericQuery('year', u'10')
self.assertFalse(q.match(self.item))
def test_bitrate_range_positive(self):
q = dbcore.query.NumericQuery('bitrate', u'100000..200000')
self.assertTrue(q.match(self.item))
def test_bitrate_range_negative(self):
q = dbcore.query.NumericQuery('bitrate', u'200000..300000')
self.assertFalse(q.match(self.item))
def test_open_range(self):
dbcore.query.NumericQuery('bitrate', u'100000..')
def test_eq(self):
q1 = dbcore.query.MatchQuery('foo', u'bar')
q2 = dbcore.query.MatchQuery('foo', u'bar')
q3 = dbcore.query.MatchQuery('foo', u'baz')
q4 = dbcore.query.StringFieldQuery('foo', u'bar')
self.assertEqual(q1, q2)
self.assertNotEqual(q1, q3)
self.assertNotEqual(q1, q4)
self.assertNotEqual(q3, q4)
class PathQueryTest(_common.LibTestCase, TestHelper, AssertsMixin):
def setUp(self):
super(PathQueryTest, self).setUp()
# This is the item we'll try to match.
self.i.path = util.normpath('/a/b/c.mp3')
self.i.title = u'path item'
self.i.album = u'path album'
self.i.store()
self.lib.add_album([self.i])
# A second item for testing exclusion.
i2 = _common.item()
i2.path = util.normpath('/x/y/z.mp3')
i2.title = 'another item'
i2.album = 'another album'
self.lib.add(i2)
self.lib.add_album([i2])
# Unadorned path queries with path separators in them are considered
# path queries only when the path in question actually exists. So we
# mock the existence check to return true.
self.patcher_exists = patch('beets.library.os.path.exists')
self.patcher_exists.start().return_value = True
# We have to create function samefile as it does not exist on
# Windows and python 2.7
self.patcher_samefile = patch('beets.library.os.path.samefile',
create=True)
self.patcher_samefile.start().return_value = True
def tearDown(self):
super(PathQueryTest, self).tearDown()
self.patcher_samefile.stop()
self.patcher_exists.stop()
def test_path_exact_match(self):
q = u'path:/a/b/c.mp3'
results = self.lib.items(q)
self.assert_items_matched(results, [u'path item'])
results = self.lib.albums(q)
self.assert_albums_matched(results, [])
def test_parent_directory_no_slash(self):
q = u'path:/a'
results = self.lib.items(q)
self.assert_items_matched(results, [u'path item'])
results = self.lib.albums(q)
self.assert_albums_matched(results, [u'path album'])
def test_parent_directory_with_slash(self):
q = u'path:/a/'
results = self.lib.items(q)
self.assert_items_matched(results, [u'path item'])
results = self.lib.albums(q)
self.assert_albums_matched(results, [u'path album'])
def test_no_match(self):
q = u'path:/xyzzy/'
results = self.lib.items(q)
self.assert_items_matched(results, [])
results = self.lib.albums(q)
self.assert_albums_matched(results, [])
def test_fragment_no_match(self):
q = u'path:/b/'
results = self.lib.items(q)
self.assert_items_matched(results, [])
results = self.lib.albums(q)
self.assert_albums_matched(results, [])
def test_nonnorm_path(self):
q = u'path:/x/../a/b'
results = self.lib.items(q)
self.assert_items_matched(results, [u'path item'])
results = self.lib.albums(q)
self.assert_albums_matched(results, [u'path album'])
def test_slashed_query_matches_path(self):
q = u'/a/b'
results = self.lib.items(q)
self.assert_items_matched(results, [u'path item'])
results = self.lib.albums(q)
self.assert_albums_matched(results, [u'path album'])
@unittest.skip('unfixed (#1865)')
def test_path_query_in_or_query(self):
q = '/a/b , /a/b'
results = self.lib.items(q)
self.assert_items_matched(results, ['path item'])
def test_non_slashed_does_not_match_path(self):
q = u'c.mp3'
results = self.lib.items(q)
self.assert_items_matched(results, [])
results = self.lib.albums(q)
self.assert_albums_matched(results, [])
def test_slashes_in_explicit_field_does_not_match_path(self):
q = u'title:/a/b'
results = self.lib.items(q)
self.assert_items_matched(results, [])
def test_path_item_regex(self):
q = u'path::c\\.mp3$'
results = self.lib.items(q)
self.assert_items_matched(results, [u'path item'])
def test_path_album_regex(self):
q = u'path::b'
results = self.lib.albums(q)
self.assert_albums_matched(results, [u'path album'])
def test_escape_underscore(self):
self.add_album(path=b'/a/_/title.mp3', title=u'with underscore',
album=u'album with underscore')
q = u'path:/a/_'
results = self.lib.items(q)
self.assert_items_matched(results, [u'with underscore'])
results = self.lib.albums(q)
self.assert_albums_matched(results, [u'album with underscore'])
def test_escape_percent(self):
self.add_album(path=b'/a/%/title.mp3', title=u'with percent',
album=u'album with percent')
q = u'path:/a/%'
results = self.lib.items(q)
self.assert_items_matched(results, [u'with percent'])
results = self.lib.albums(q)
self.assert_albums_matched(results, [u'album with percent'])
def test_escape_backslash(self):
self.add_album(path=br'/a/\x/title.mp3', title=u'with backslash',
album=u'album with backslash')
q = u'path:/a/\\\\x'
results = self.lib.items(q)
self.assert_items_matched(results, [u'with backslash'])
results = self.lib.albums(q)
self.assert_albums_matched(results, [u'album with backslash'])
def test_case_sensitivity(self):
self.add_album(path=b'/A/B/C2.mp3', title=u'caps path')
makeq = partial(beets.library.PathQuery, u'path', '/A/B')
results = self.lib.items(makeq(case_sensitive=True))
self.assert_items_matched(results, [u'caps path'])
results = self.lib.items(makeq(case_sensitive=False))
self.assert_items_matched(results, [u'path item', u'caps path'])
# Check for correct case sensitivity selection (this check
# only works on non-Windows OSes).
with _common.system_mock('Darwin'):
# exists = True and samefile = True => Case insensitive
q = makeq()
self.assertEqual(q.case_sensitive, False)
# exists = True and samefile = False => Case sensitive
self.patcher_samefile.stop()
self.patcher_samefile.start().return_value = False
try:
q = makeq()
self.assertEqual(q.case_sensitive, True)
finally:
self.patcher_samefile.stop()
self.patcher_samefile.start().return_value = True
# Test platform-aware default sensitivity when the library path
# does not exist. For the duration of this check, we change the
# `os.path.exists` mock to return False.
self.patcher_exists.stop()
self.patcher_exists.start().return_value = False
try:
with _common.system_mock('Darwin'):
q = makeq()
self.assertEqual(q.case_sensitive, True)
with _common.system_mock('Windows'):
q = makeq()
self.assertEqual(q.case_sensitive, False)
finally:
# Restore the `os.path.exists` mock to its original state.
self.patcher_exists.stop()
self.patcher_exists.start().return_value = True
@patch('beets.library.os')
def test_path_sep_detection(self, mock_os):
mock_os.sep = '/'
mock_os.altsep = None
mock_os.path.exists = lambda p: True
is_path = beets.library.PathQuery.is_path_query
self.assertTrue(is_path('/foo/bar'))
self.assertTrue(is_path('foo/bar'))
self.assertTrue(is_path('foo/'))
self.assertFalse(is_path('foo'))
self.assertTrue(is_path('foo/:bar'))
self.assertFalse(is_path('foo:bar/'))
self.assertFalse(is_path('foo:/bar'))
def test_detect_absolute_path(self):
if platform.system() == 'Windows':
# Because the absolute path begins with something like C:, we
# can't disambiguate it from an ordinary query.
self.skipTest('Windows absolute paths do not work as queries')
# Don't patch `os.path.exists`; we'll actually create a file when
# it exists.
self.patcher_exists.stop()
is_path = beets.library.PathQuery.is_path_query
try:
path = self.touch(os.path.join(b'foo', b'bar'))
path = path.decode('utf-8')
# The file itself.
self.assertTrue(is_path(path))
# The parent directory.
parent = os.path.dirname(path)
self.assertTrue(is_path(parent))
# Some non-existent path.
self.assertFalse(is_path(path + u'baz'))
finally:
# Restart the `os.path.exists` patch.
self.patcher_exists.start()
def test_detect_relative_path(self):
self.patcher_exists.stop()
is_path = beets.library.PathQuery.is_path_query
try:
self.touch(os.path.join(b'foo', b'bar'))
# Temporarily change directory so relative paths work.
cur_dir = os.getcwd()
try:
os.chdir(self.temp_dir)
self.assertTrue(is_path(u'foo/'))
self.assertTrue(is_path(u'foo/bar'))
self.assertTrue(is_path(u'foo/bar:tagada'))
self.assertFalse(is_path(u'bar'))
finally:
os.chdir(cur_dir)
finally:
self.patcher_exists.start()
class IntQueryTest(unittest.TestCase, TestHelper):
def setUp(self):
self.lib = Library(':memory:')
def tearDown(self):
Item._types = {}
def test_exact_value_match(self):
item = self.add_item(bpm=120)
matched = self.lib.items(u'bpm:120').get()
self.assertEqual(item.id, matched.id)
def test_range_match(self):
item = self.add_item(bpm=120)
self.add_item(bpm=130)
matched = self.lib.items(u'bpm:110..125')
self.assertEqual(1, len(matched))
self.assertEqual(item.id, matched.get().id)
def test_flex_range_match(self):
Item._types = {'myint': types.Integer()}
item = self.add_item(myint=2)
matched = self.lib.items(u'myint:2').get()
self.assertEqual(item.id, matched.id)
def test_flex_dont_match_missing(self):
Item._types = {'myint': types.Integer()}
self.add_item()
matched = self.lib.items(u'myint:2').get()
self.assertIsNone(matched)
def test_no_substring_match(self):
self.add_item(bpm=120)
matched = self.lib.items(u'bpm:12').get()
self.assertIsNone(matched)
class BoolQueryTest(unittest.TestCase, TestHelper):
def setUp(self):
self.lib = Library(':memory:')
Item._types = {'flexbool': types.Boolean()}
def tearDown(self):
Item._types = {}
def test_parse_true(self):
item_true = self.add_item(comp=True)
item_false = self.add_item(comp=False)
matched = self.lib.items(u'comp:true')
self.assertInResult(item_true, matched)
self.assertNotInResult(item_false, matched)
def test_flex_parse_true(self):
item_true = self.add_item(flexbool=True)
item_false = self.add_item(flexbool=False)
matched = self.lib.items(u'flexbool:true')
self.assertInResult(item_true, matched)
self.assertNotInResult(item_false, matched)
def test_flex_parse_false(self):
item_true = self.add_item(flexbool=True)
item_false = self.add_item(flexbool=False)
matched = self.lib.items(u'flexbool:false')
self.assertInResult(item_false, matched)
self.assertNotInResult(item_true, matched)
def test_flex_parse_1(self):
item_true = self.add_item(flexbool=True)
item_false = self.add_item(flexbool=False)
matched = self.lib.items(u'flexbool:1')
self.assertInResult(item_true, matched)
self.assertNotInResult(item_false, matched)
def test_flex_parse_0(self):
item_true = self.add_item(flexbool=True)
item_false = self.add_item(flexbool=False)
matched = self.lib.items(u'flexbool:0')
self.assertInResult(item_false, matched)
self.assertNotInResult(item_true, matched)
def test_flex_parse_any_string(self):
# TODO this should be the other way around
item_true = self.add_item(flexbool=True)
item_false = self.add_item(flexbool=False)
matched = self.lib.items(u'flexbool:something')
self.assertInResult(item_false, matched)
self.assertNotInResult(item_true, matched)
class DefaultSearchFieldsTest(DummyDataTestCase):
def test_albums_matches_album(self):
albums = list(self.lib.albums(u'baz'))
self.assertEqual(len(albums), 1)
def test_albums_matches_albumartist(self):
albums = list(self.lib.albums([u'album artist']))
self.assertEqual(len(albums), 1)
def test_items_matches_title(self):
items = self.lib.items(u'beets')
self.assert_items_matched(items, [u'beets 4 eva'])
def test_items_does_not_match_year(self):
items = self.lib.items(u'2001')
self.assert_items_matched(items, [])
class NoneQueryTest(unittest.TestCase, TestHelper):
def setUp(self):
self.lib = Library(':memory:')
def test_match_singletons(self):
singleton = self.add_item()
album_item = self.add_album().items().get()
matched = self.lib.items(NoneQuery(u'album_id'))
self.assertInResult(singleton, matched)
self.assertNotInResult(album_item, matched)
def test_match_after_set_none(self):
item = self.add_item(rg_track_gain=0)
matched = self.lib.items(NoneQuery(u'rg_track_gain'))
self.assertNotInResult(item, matched)
item['rg_track_gain'] = None
item.store()
matched = self.lib.items(NoneQuery(u'rg_track_gain'))
self.assertInResult(item, matched)
def test_match_slow(self):
item = self.add_item()
matched = self.lib.items(NoneQuery(u'rg_track_peak', fast=False))
self.assertInResult(item, matched)
def test_match_slow_after_set_none(self):
item = self.add_item(rg_track_gain=0)
matched = self.lib.items(NoneQuery(u'rg_track_gain', fast=False))
self.assertNotInResult(item, matched)
item['rg_track_gain'] = None
item.store()
matched = self.lib.items(NoneQuery(u'rg_track_gain', fast=False))
self.assertInResult(item, matched)
class NotQueryMatchTest(_common.TestCase):
"""Test `query.NotQuery` matching against a single item, using the same
cases and assertions as on `MatchTest`, plus assertion on the negated
queries (ie. assertTrue(q) -> assertFalse(NotQuery(q))).
"""
def setUp(self):
super(NotQueryMatchTest, self).setUp()
self.item = _common.item()
def test_regex_match_positive(self):
q = dbcore.query.RegexpQuery(u'album', u'^the album$')
self.assertTrue(q.match(self.item))
self.assertFalse(dbcore.query.NotQuery(q).match(self.item))
def test_regex_match_negative(self):
q = dbcore.query.RegexpQuery(u'album', u'^album$')
self.assertFalse(q.match(self.item))
self.assertTrue(dbcore.query.NotQuery(q).match(self.item))
def test_regex_match_non_string_value(self):
q = dbcore.query.RegexpQuery(u'disc', u'^6$')
self.assertTrue(q.match(self.item))
self.assertFalse(dbcore.query.NotQuery(q).match(self.item))
def test_substring_match_positive(self):
q = dbcore.query.SubstringQuery(u'album', u'album')
self.assertTrue(q.match(self.item))
self.assertFalse(dbcore.query.NotQuery(q).match(self.item))
def test_substring_match_negative(self):
q = dbcore.query.SubstringQuery(u'album', u'ablum')
self.assertFalse(q.match(self.item))
self.assertTrue(dbcore.query.NotQuery(q).match(self.item))
def test_substring_match_non_string_value(self):
q = dbcore.query.SubstringQuery(u'disc', u'6')
self.assertTrue(q.match(self.item))
self.assertFalse(dbcore.query.NotQuery(q).match(self.item))
def test_year_match_positive(self):
q = dbcore.query.NumericQuery(u'year', u'1')
self.assertTrue(q.match(self.item))
self.assertFalse(dbcore.query.NotQuery(q).match(self.item))
def test_year_match_negative(self):
q = dbcore.query.NumericQuery(u'year', u'10')
self.assertFalse(q.match(self.item))
self.assertTrue(dbcore.query.NotQuery(q).match(self.item))
def test_bitrate_range_positive(self):
q = dbcore.query.NumericQuery(u'bitrate', u'100000..200000')
self.assertTrue(q.match(self.item))
self.assertFalse(dbcore.query.NotQuery(q).match(self.item))
def test_bitrate_range_negative(self):
q = dbcore.query.NumericQuery(u'bitrate', u'200000..300000')
self.assertFalse(q.match(self.item))
self.assertTrue(dbcore.query.NotQuery(q).match(self.item))
def test_open_range(self):
q = dbcore.query.NumericQuery(u'bitrate', u'100000..')
dbcore.query.NotQuery(q)
class NotQueryTest(DummyDataTestCase):
"""Test `query.NotQuery` against the dummy data:
- `test_type_xxx`: tests for the negation of a particular XxxQuery class.
- `test_get_yyy`: tests on query strings (similar to `GetTest`)
"""
def assertNegationProperties(self, q): # noqa
"""Given a Query `q`, assert that:
- q OR not(q) == all items
- q AND not(q) == 0
- not(not(q)) == q
"""
not_q = dbcore.query.NotQuery(q)
# assert using OrQuery, AndQuery
q_or = dbcore.query.OrQuery([q, not_q])
q_and = dbcore.query.AndQuery([q, not_q])
self.assert_items_matched_all(self.lib.items(q_or))
self.assert_items_matched(self.lib.items(q_and), [])
# assert manually checking the item titles
all_titles = set([i.title for i in self.lib.items()])
q_results = set([i.title for i in self.lib.items(q)])
not_q_results = set([i.title for i in self.lib.items(not_q)])
self.assertEqual(q_results.union(not_q_results), all_titles)
self.assertEqual(q_results.intersection(not_q_results), set())
# round trip
not_not_q = dbcore.query.NotQuery(not_q)
self.assertEqual(set([i.title for i in self.lib.items(q)]),
set([i.title for i in self.lib.items(not_not_q)]))
def test_type_and(self):
# not(a and b) <-> not(a) or not(b)
q = dbcore.query.AndQuery([
dbcore.query.BooleanQuery(u'comp', True),
dbcore.query.NumericQuery(u'year', u'2002')],
)
not_results = self.lib.items(dbcore.query.NotQuery(q))
self.assert_items_matched(not_results, [u'foo bar', u'beets 4 eva'])
self.assertNegationProperties(q)
def test_type_anyfield(self):
q = dbcore.query.AnyFieldQuery(u'foo', [u'title', u'artist', u'album'],
dbcore.query.SubstringQuery)
not_results = self.lib.items(dbcore.query.NotQuery(q))
self.assert_items_matched(not_results, [u'baz qux'])
self.assertNegationProperties(q)
def test_type_boolean(self):
q = dbcore.query.BooleanQuery(u'comp', True)
not_results = self.lib.items(dbcore.query.NotQuery(q))
self.assert_items_matched(not_results, [u'beets 4 eva'])
self.assertNegationProperties(q)
def test_type_date(self):
q = dbcore.query.DateQuery(u'added', u'2000-01-01')
not_results = self.lib.items(dbcore.query.NotQuery(q))
# query date is in the past, thus the 'not' results should contain all
# items
self.assert_items_matched(not_results, [u'foo bar', u'baz qux',
u'beets 4 eva'])
self.assertNegationProperties(q)
def test_type_false(self):
q = dbcore.query.FalseQuery()
not_results = self.lib.items(dbcore.query.NotQuery(q))
self.assert_items_matched_all(not_results)
self.assertNegationProperties(q)
def test_type_match(self):
q = dbcore.query.MatchQuery(u'year', u'2003')
not_results = self.lib.items(dbcore.query.NotQuery(q))
self.assert_items_matched(not_results, [u'foo bar', u'baz qux'])
self.assertNegationProperties(q)
def test_type_none(self):
q = dbcore.query.NoneQuery(u'rg_track_gain')
not_results = self.lib.items(dbcore.query.NotQuery(q))
self.assert_items_matched(not_results, [])
self.assertNegationProperties(q)
def test_type_numeric(self):
q = dbcore.query.NumericQuery(u'year', u'2001..2002')
not_results = self.lib.items(dbcore.query.NotQuery(q))
self.assert_items_matched(not_results, [u'beets 4 eva'])
self.assertNegationProperties(q)
def test_type_or(self):
# not(a or b) <-> not(a) and not(b)
q = dbcore.query.OrQuery([dbcore.query.BooleanQuery(u'comp', True),
dbcore.query.NumericQuery(u'year', u'2002')])
not_results = self.lib.items(dbcore.query.NotQuery(q))
self.assert_items_matched(not_results, [u'beets 4 eva'])
self.assertNegationProperties(q)
def test_type_regexp(self):
q = dbcore.query.RegexpQuery(u'artist', u'^t')
not_results = self.lib.items(dbcore.query.NotQuery(q))
self.assert_items_matched(not_results, [u'foo bar'])
self.assertNegationProperties(q)
def test_type_substring(self):
q = dbcore.query.SubstringQuery(u'album', u'ba')
not_results = self.lib.items(dbcore.query.NotQuery(q))
self.assert_items_matched(not_results, [u'beets 4 eva'])
self.assertNegationProperties(q)
def test_type_true(self):
q = dbcore.query.TrueQuery()
not_results = self.lib.items(dbcore.query.NotQuery(q))
self.assert_items_matched(not_results, [])
self.assertNegationProperties(q)
def test_get_prefixes_keyed(self):
"""Test both negation prefixes on a keyed query."""
q0 = u'-title:qux'
q1 = u'^title:qux'
results0 = self.lib.items(q0)
results1 = self.lib.items(q1)
self.assert_items_matched(results0, [u'foo bar', u'beets 4 eva'])
self.assert_items_matched(results1, [u'foo bar', u'beets 4 eva'])
def test_get_prefixes_unkeyed(self):
"""Test both negation prefixes on an unkeyed query."""
q0 = u'-qux'
q1 = u'^qux'
results0 = self.lib.items(q0)
results1 = self.lib.items(q1)
self.assert_items_matched(results0, [u'foo bar', u'beets 4 eva'])
self.assert_items_matched(results1, [u'foo bar', u'beets 4 eva'])
def test_get_one_keyed_regexp(self):
q = u'-artist::t.+r'
results = self.lib.items(q)
self.assert_items_matched(results, [u'foo bar', u'baz qux'])
def test_get_one_unkeyed_regexp(self):
q = u'-:x$'
results = self.lib.items(q)
self.assert_items_matched(results, [u'foo bar', u'beets 4 eva'])
def test_get_multiple_terms(self):
q = u'baz -bar'
results = self.lib.items(q)
self.assert_items_matched(results, [u'baz qux'])
def test_get_mixed_terms(self):
q = u'baz -title:bar'
results = self.lib.items(q)
self.assert_items_matched(results, [u'baz qux'])
def test_fast_vs_slow(self):
"""Test that the results are the same regardless of the `fast` flag
for negated `FieldQuery`s.
TODO: investigate NoneQuery(fast=False), as it is raising
AttributeError: type object 'NoneQuery' has no attribute 'field'
at NoneQuery.match() (due to being @classmethod, and no self?)
"""
classes = [(dbcore.query.DateQuery, [u'added', u'2001-01-01']),
(dbcore.query.MatchQuery, [u'artist', u'one']),
# (dbcore.query.NoneQuery, ['rg_track_gain']),
(dbcore.query.NumericQuery, [u'year', u'2002']),
(dbcore.query.StringFieldQuery, [u'year', u'2001']),
(dbcore.query.RegexpQuery, [u'album', u'^.a']),
(dbcore.query.SubstringQuery, [u'title', u'x'])]
for klass, args in classes:
q_fast = dbcore.query.NotQuery(klass(*(args + [True])))
q_slow = dbcore.query.NotQuery(klass(*(args + [False])))
try:
self.assertEqual([i.title for i in self.lib.items(q_fast)],
[i.title for i in self.lib.items(q_slow)])
except NotImplementedError:
# ignore classes that do not provide `fast` implementation
pass
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
trondhindenes/ansible
|
refs/heads/devel
|
test/units/modules/network/edgeos/test_edgeos_config.py
|
66
|
#
# (c) 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.edgeos import edgeos_config
from units.modules.utils import set_module_args
from .edgeos_module import TestEdgeosModule, load_fixture
class TestEdgeosConfigModule(TestEdgeosModule):
module = edgeos_config
def setUp(self):
super(TestEdgeosConfigModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.edgeos.edgeos_config.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.edgeos.edgeos_config.load_config')
self.load_config = self.mock_load_config.start()
self.mock_run_commands = patch('ansible.modules.network.edgeos.edgeos_config.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestEdgeosConfigModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
config_file = 'edgeos_config_config.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
def test_edgeos_config_unchanged(self):
src = load_fixture('edgeos_config_config.cfg')
set_module_args(dict(src=src))
self.execute_module()
def test_edgeos_config_src(self):
src = load_fixture('edgeos_config_src.cfg')
set_module_args(dict(src=src))
commands = ['set system host-name er01', 'delete interfaces ethernet eth0 address']
self.execute_module(changed=True, commands=commands)
def test_edgeos_config_src_brackets(self):
src = load_fixture('edgeos_config_src_brackets.cfg')
set_module_args(dict(src=src))
commands = ['set interfaces ethernet eth0 address 10.10.10.10/24', 'set system host-name er01']
self.execute_module(changed=True, commands=commands)
def test_edgeos_config_backup(self):
set_module_args(dict(backup=True))
result = self.execute_module()
self.assertIn('__backup__', result)
def test_edgeos_config_lines(self):
commands = ['set system host-name er01']
set_module_args(dict(lines=commands))
self.execute_module(changed=True, commands=commands)
def test_edgeos_config_config(self):
config = 'set system host-name localhost'
new_config = ['set system host-name er01']
set_module_args(dict(lines=new_config, config=config))
self.execute_module(changed=True, commands=new_config)
def test_edgeos_config_match_none(self):
lines = ['set system interfaces ethernet eth0 address 1.2.3.4/24',
'set system interfaces ethernet eth0 description Outside']
set_module_args(dict(lines=lines, match='none'))
self.execute_module(changed=True, commands=lines, sort=False)
|
isaachenrion/jets
|
refs/heads/master
|
src/data_ops/SupervisedDataset.py
|
1
|
from torch.utils.data import Dataset
class SupervisedDataset(Dataset):
def __init__(self, x, y):
super().__init__()
self.x = x
self.y = y
def shuffle(self):
perm = np.random.permutation(len(self.x))
self.x = [self.x[i] for i in perm]
self.y = [self.y[i] for i in perm]
@classmethod
def concatenate(cls, dataset1, dataset2):
return cls(dataset1.x + dataset2.x, dataset1.y + dataset2.y)
@property
def dim(self):
return self.x[0].size()[1]
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
def extend(self, new_dataset):
self.x = self.x + new_dataset.x
self.y = self.y + new_dataset.y
|
k3nnyfr/s2a_fr-nsis
|
refs/heads/master
|
s2a/Python/Lib/xml/parsers/expat.py
|
230
|
"""Interface to the Expat non-validating XML parser."""
__version__ = '$Revision: 17640 $'
from pyexpat import *
|
ankurankan/scikit-learn
|
refs/heads/master
|
sklearn/neighbors/tests/test_kde.py
|
17
|
import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
def check_results(kernel, bandwidth, atol, rtol):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
"""Smoke test for various metrics and algorithms"""
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
if __name__ == '__main__':
import nose
nose.runmodule()
|
filipposantovito/suds-jurko
|
refs/heads/master
|
suds/umx/core.py
|
18
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Provides base classes for XML->object I{unmarshalling}.
"""
from suds import *
from suds.umx import *
from suds.umx.attrlist import AttrList
from suds.sax.text import Text
from suds.sudsobject import Factory, merge
reserved = {'class':'cls', 'def':'dfn'}
class Core:
"""
The abstract XML I{node} unmarshaller. This class provides the
I{core} unmarshalling functionality.
"""
def process(self, content):
"""
Process an object graph representation of the xml I{node}.
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: A suds object.
@rtype: L{Object}
"""
self.reset()
return self.append(content)
def append(self, content):
"""
Process the specified node and convert the XML document into
a I{suds} L{object}.
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: A I{append-result} tuple as: (L{Object}, I{value})
@rtype: I{append-result}
@note: This is not the proper entry point.
@see: L{process()}
"""
self.start(content)
self.append_attributes(content)
self.append_children(content)
self.append_text(content)
self.end(content)
return self.postprocess(content)
def postprocess(self, content):
"""
Perform final processing of the resulting data structure as follows:
- Mixed values (children and text) will have a result of the I{content.node}.
- Simi-simple values (attributes, no-children and text) will have a result of a
property object.
- Simple values (no-attributes, no-children with text nodes) will have a string
result equal to the value of the content.node.getText().
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: The post-processed result.
@rtype: I{any}
"""
node = content.node
if len(node.children) and node.hasText():
return node
attributes = AttrList(node.attributes)
if attributes.rlen() and \
not len(node.children) and \
node.hasText():
p = Factory.property(node.name, node.getText())
return merge(content.data, p)
if len(content.data):
return content.data
lang = attributes.lang()
if content.node.isnil():
return None
if not len(node.children) and content.text is None:
if self.nillable(content):
return None
else:
return Text('', lang=lang)
if isinstance(content.text, basestring):
return Text(content.text, lang=lang)
else:
return content.text
def append_attributes(self, content):
"""
Append attribute nodes into L{Content.data}.
Attributes in the I{schema} or I{xml} namespaces are skipped.
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
attributes = AttrList(content.node.attributes)
for attr in attributes.real():
name = attr.name
value = attr.value
self.append_attribute(name, value, content)
def append_attribute(self, name, value, content):
"""
Append an attribute name/value into L{Content.data}.
@param name: The attribute name
@type name: basestring
@param value: The attribute's value
@type value: basestring
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
key = name
key = '_%s' % reserved.get(key, key)
setattr(content.data, key, value)
def append_children(self, content):
"""
Append child nodes into L{Content.data}
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
for child in content.node:
cont = Content(child)
cval = self.append(cont)
key = reserved.get(child.name, child.name)
if key in content.data:
v = getattr(content.data, key)
if isinstance(v, list):
v.append(cval)
else:
setattr(content.data, key, [v, cval])
continue
if self.multi_occurrence(cont):
if cval is None:
setattr(content.data, key, [])
else:
setattr(content.data, key, [cval,])
else:
setattr(content.data, key, cval)
def append_text(self, content):
"""
Append text nodes into L{Content.data}
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
if content.node.hasText():
content.text = content.node.getText()
def reset(self):
pass
def start(self, content):
"""
Processing on I{node} has started. Build and return
the proper object.
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: A subclass of Object.
@rtype: L{Object}
"""
content.data = Factory.object(content.node.name)
def end(self, content):
"""
Processing on I{node} has ended.
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
pass
def single_occurrence(self, content):
"""
Get whether the content has at most a single occurrence (not a list).
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: True if content has at most a single occurrence, else False.
@rtype: boolean
'"""
return not self.multi_occurrence(content)
def multi_occurrence(self, content):
"""
Get whether the content has more than one occurrence (a list).
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: True if content has more than one occurrence, else False.
@rtype: boolean
'"""
return False
def nillable(self, content):
"""
Get whether the object is nillable.
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: True if nillable, else False
@rtype: boolean
'"""
return False
|
ESS-LLP/erpnext-healthcare
|
refs/heads/master
|
erpnext/patches/v4_2/update_project_milestones.py
|
121
|
from __future__ import unicode_literals
import frappe
def execute():
for project in frappe.db.sql_list("select name from tabProject"):
frappe.reload_doc("projects", "doctype", "project")
p = frappe.get_doc("Project", project)
p.update_milestones_completed()
p.db_set("percent_milestones_completed", p.percent_milestones_completed)
|
peterfpeterson/mantid
|
refs/heads/master
|
Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSLoad.py
|
3
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# pylint: disable=invalid-name
""" SANSLoad algorithm which handles loading SANS files"""
from mantid.api import (ParallelDataProcessorAlgorithm, MatrixWorkspaceProperty, AlgorithmFactory, PropertyMode,
Progress, WorkspaceProperty)
from mantid.kernel import (Direction, FloatArrayProperty)
from sans.algorithm_detail.load_data import SANSLoadDataFactory
from sans.algorithm_detail.move_sans_instrument_component import move_component, MoveTypes
from sans.common.enums import SANSDataType
from sans.state.Serializer import Serializer
class SANSLoad(ParallelDataProcessorAlgorithm):
def category(self):
return 'SANS\\Load'
def summary(self):
return 'Load SANS data'
def PyInit(self):
# ----------
# INPUT
# ----------
self.declareProperty('SANSState', "",
doc='A JSON String which fulfills the SANSState contract.')
self.declareProperty("PublishToCache", True, direction=Direction.Input,
doc="Publish the calibration workspace to a cache, in order to avoid reloading "
"for subsequent runs.")
self.declareProperty("UseCached", True, direction=Direction.Input,
doc="Checks if there are loaded files available. If they are, those files are used.")
# Beam coordinates if an initial move of the workspace is requested
self.declareProperty(FloatArrayProperty(name='BeamCoordinates', values=[]),
doc='The coordinates which is used to position the instrument component(s). '
'If the workspaces should be loaded with an initial move, then this '
'needs to be specified.')
# Components which are to be moved
self.declareProperty('Component', '', direction=Direction.Input,
doc='Component that should be moved. '
'If the workspaces should be loaded with an initial move, then this '
'needs to be specified.')
# ------------
# OUTPUT
# ------------
default_number_of_workspaces = 0
# Sample Scatter Workspaces
self.declareProperty(WorkspaceProperty('SampleScatterWorkspace', '',
optional=PropertyMode.Optional, direction=Direction.Output),
doc='The sample scatter workspace. This workspace does not contain monitors.')
self.declareProperty(WorkspaceProperty('SampleScatterMonitorWorkspace', '',
optional=PropertyMode.Optional, direction=Direction.Output),
doc='The sample scatter monitor workspace. This workspace only contains monitors.')
self.declareProperty(MatrixWorkspaceProperty('SampleTransmissionWorkspace', '',
optional=PropertyMode.Optional, direction=Direction.Output),
doc='The sample transmission workspace.')
self.declareProperty(MatrixWorkspaceProperty('SampleDirectWorkspace', '',
optional=PropertyMode.Optional, direction=Direction.Output),
doc='The sample scatter direct workspace.')
self.setPropertyGroup("SampleScatterWorkspace", 'Sample')
self.setPropertyGroup("SampleScatterMonitorWorkspace", 'Sample')
self.setPropertyGroup("SampleTransmissionWorkspace", 'Sample')
self.setPropertyGroup("SampleDirectWorkspace", 'Sample')
# Number of sample workspaces
self.declareProperty('NumberOfSampleScatterWorkspaces', defaultValue=default_number_of_workspaces,
direction=Direction.Output,
doc='The number of workspace for sample scatter.')
self.declareProperty('NumberOfSampleTransmissionWorkspaces', defaultValue=default_number_of_workspaces,
direction=Direction.Output,
doc='The number of workspace for sample transmission.')
self.declareProperty('NumberOfSampleDirectWorkspaces', defaultValue=default_number_of_workspaces,
direction=Direction.Output,
doc='The number of workspace for sample direct.')
self.declareProperty(MatrixWorkspaceProperty('CanScatterWorkspace', '',
optional=PropertyMode.Optional, direction=Direction.Output),
doc='The can scatter workspace. This workspace does not contain monitors.')
self.declareProperty(MatrixWorkspaceProperty('CanScatterMonitorWorkspace', '',
optional=PropertyMode.Optional, direction=Direction.Output),
doc='The can scatter monitor workspace. This workspace only contains monitors.')
self.declareProperty(MatrixWorkspaceProperty('CanTransmissionWorkspace', '',
optional=PropertyMode.Optional, direction=Direction.Output),
doc='The can transmission workspace.')
self.declareProperty(MatrixWorkspaceProperty('CanDirectWorkspace', '',
optional=PropertyMode.Optional, direction=Direction.Output),
doc='The sample scatter direct workspace.')
self.setPropertyGroup("CanScatterWorkspace", 'Can')
self.setPropertyGroup("CanScatterMonitorWorkspace", 'Can')
self.setPropertyGroup("CanTransmissionWorkspace", 'Can')
self.setPropertyGroup("CanDirectWorkspace", 'Can')
self.declareProperty('NumberOfCanScatterWorkspaces', defaultValue=default_number_of_workspaces,
direction=Direction.Output,
doc='The number of workspace for can scatter.')
self.declareProperty('NumberOfCanTransmissionWorkspaces', defaultValue=default_number_of_workspaces,
direction=Direction.Output,
doc='The number of workspace for can transmission.')
self.declareProperty('NumberOfCanDirectWorkspaces', defaultValue=default_number_of_workspaces,
direction=Direction.Output,
doc='The number of workspace for can direct.')
def PyExec(self):
# Read the state
state_property_manager = self.getProperty("SANSState").value
state = Serializer.from_json(state_property_manager)
# Run the appropriate SANSLoader and get the workspaces and the workspace monitors
# Note that cache optimization is only applied to the calibration workspace since it is not available as a
# return property and it is also something which is most likely not to change between different reductions.
use_cached = self.getProperty("UseCached").value
publish_to_ads = self.getProperty("PublishToCache").value
data = state.data
state_adjustment = state.adjustment
progress = self._get_progress_for_file_loading(data, state_adjustment)
# Get the correct SANSLoader from the SANSLoaderFactory
load_factory = SANSLoadDataFactory()
loader = load_factory.create_loader(state)
workspaces, workspace_monitors = loader.execute(data_info=data, use_cached=use_cached,
publish_to_ads=publish_to_ads, progress=progress,
parent_alg=self, adjustment_info=state.adjustment)
progress.report("Loaded the data.")
progress_move = Progress(self, start=0.8, end=1.0, nreports=2)
progress_move.report("Starting to move the workspaces.")
self._perform_initial_move(workspaces, state)
progress_move.report("Finished moving the workspaces.")
# Set output workspaces
for workspace_type, workspace in workspaces.items():
self.set_output_for_workspaces(workspace_type, workspace)
# Set the output monitor workspaces
for workspace_type, workspace in workspace_monitors.items():
self.set_output_for_monitor_workspaces(workspace_type, workspace)
def validateInputs(self):
errors = dict()
# Check that the input can be converted into the right state object
state_json = self.getProperty("SANSState").value
try:
state = Serializer.from_json(state_json)
state.validate()
except ValueError as err:
errors.update({"SANSState": str(err)})
return errors
# We need to validate that the for each expected output workspace of the SANSState a output workspace name
# was supplied in the PyInit
# For sample scatter
sample_scatter = self.getProperty("SampleScatterWorkspace").value
sample_scatter_as_string = self.getProperty("SampleScatterWorkspace").valueAsStr
if sample_scatter is None and not sample_scatter_as_string:
errors.update({"SampleScatterWorkspace": "A sample scatter output workspace needs to be specified."})
# For sample scatter monitor
sample_scatter_monitor = self.getProperty("SampleScatterMonitorWorkspace").value
sample_scatter_monitor_as_string = self.getProperty("SampleScatterMonitorWorkspace").valueAsStr
if sample_scatter_monitor is None and not sample_scatter_monitor_as_string:
errors.update({"SampleScatterMonitorWorkspace": "A sample scatter output workspace needs to be specified."})
# ------------------------------------
# Check the optional output workspaces
# If they are specified in the SANSState, then we require them to be set on the output as well.
data_info = state.data
# For sample transmission
sample_transmission = self.getProperty("SampleTransmissionWorkspace").value
sample_transmission_as_string = self.getProperty("SampleTransmissionWorkspace").valueAsStr
sample_transmission_was_set = sample_transmission is not None or len(sample_transmission_as_string) > 0
sample_transmission_from_state = data_info.sample_transmission
if not sample_transmission_was_set and sample_transmission_from_state is not None:
errors.update({"SampleTransmissionWorkspace": "You need to set the output for the sample transmission"
" workspace since it is specified to be loaded in your "
"reduction configuration."})
if sample_transmission_was_set and sample_transmission_from_state is None:
errors.update({"SampleTransmissionWorkspace": "You set an output workspace for sample transmission, "
"although none is specified in the reduction configuration."})
# For sample direct
sample_direct = self.getProperty("SampleDirectWorkspace").value
sample_direct_as_string = self.getProperty("SampleDirectWorkspace").valueAsStr
sample_direct_was_set = sample_direct is not None or len(sample_direct_as_string) > 0
sample_direct_from_state = data_info.sample_direct
if not sample_direct_was_set and sample_direct_from_state is not None:
errors.update({"SampleDirectWorkspace": "You need to set the output for the sample direct"
" workspace since it is specified to be loaded in your "
"reduction configuration."})
if sample_direct_was_set and sample_direct_from_state is None:
errors.update({"SampleDirectWorkspace": "You set an output workspace for sample direct, "
"although none is specified in the reduction configuration."})
# For can scatter + monitor
can_scatter = self.getProperty("CanScatterWorkspace").value
can_scatter_as_string = self.getProperty("CanScatterWorkspace").valueAsStr
can_scatter_was_set = can_scatter is not None or len(can_scatter_as_string) > 0
can_scatter_from_state = data_info.can_scatter
if not can_scatter_was_set and can_scatter_from_state is not None:
errors.update({"CanScatterWorkspace": "You need to set the output for the can scatter"
" workspace since it is specified to be loaded in your "
"reduction configuration."})
if can_scatter_was_set and can_scatter_from_state is None:
errors.update({"CanScatterWorkspace": "You set an output workspace for can scatter, "
"although none is specified in the reduction configuration."})
# For can scatter monitor
can_scatter_monitor = self.getProperty("CanScatterMonitorWorkspace").value
can_scatter_monitor_as_string = self.getProperty("CanScatterMonitorWorkspace").valueAsStr
can_scatter_monitor_was_set = can_scatter_monitor is not None or len(can_scatter_monitor_as_string) > 0
if not can_scatter_monitor_was_set and can_scatter_from_state is not None:
errors.update({"CanScatterMonitorWorkspace": "You need to set the output for the can scatter monitor"
" workspace since it is specified to be loaded in your "
"reduction configuration."})
if can_scatter_monitor_was_set and can_scatter_from_state is None:
errors.update({"CanScatterMonitorWorkspace": "You set an output workspace for can scatter monitor, "
"although none is specified in the reduction configuration."})
# For sample transmission
can_transmission = self.getProperty("CanTransmissionWorkspace").value
can_transmission_as_string = self.getProperty("CanTransmissionWorkspace").valueAsStr
can_transmission_was_set = can_transmission is not None or len(can_transmission_as_string) > 0
can_transmission_from_state = data_info.can_transmission
if not can_transmission_was_set and can_transmission_from_state is not None:
errors.update({"CanTransmissionWorkspace": "You need to set the output for the can transmission"
" workspace since it is specified to be loaded in your "
"reduction configuration."})
if can_transmission_was_set and can_transmission_from_state is None:
errors.update({"CanTransmissionWorkspace": "You set an output workspace for can transmission, "
"although none is specified in the reduction configuration."})
# For can direct
can_direct = self.getProperty("CanDirectWorkspace").value
can_direct_as_string = self.getProperty("CanDirectWorkspace").valueAsStr
can_direct_was_set = can_direct is not None or len(can_direct_as_string) > 0
can_direct_from_state = data_info.can_direct
if not can_direct_was_set and can_direct_from_state is not None:
errors.update({"CanDirectWorkspace": "You need to set the output for the can direct"
" workspace since it is specified to be loaded in your "
"reduction configuration."})
if can_direct_was_set and can_direct_from_state is None:
errors.update({"CanDirectWorkspace": "You set an output workspace for can direct, "
"although none is specified in the reduction configuration."})
return errors
def set_output_for_workspaces(self, workspace_type, workspaces):
if workspace_type is SANSDataType.SAMPLE_SCATTER:
self.set_property_with_number_of_workspaces("SampleScatterWorkspace", workspaces)
elif workspace_type is SANSDataType.SAMPLE_TRANSMISSION:
self.set_property_with_number_of_workspaces("SampleTransmissionWorkspace", workspaces)
elif workspace_type is SANSDataType.SAMPLE_DIRECT:
self.set_property_with_number_of_workspaces("SampleDirectWorkspace", workspaces)
elif workspace_type is SANSDataType.CAN_SCATTER:
self.set_property_with_number_of_workspaces("CanScatterWorkspace", workspaces)
elif workspace_type is SANSDataType.CAN_TRANSMISSION:
self.set_property_with_number_of_workspaces("CanTransmissionWorkspace", workspaces)
elif workspace_type is SANSDataType.CAN_DIRECT:
self.set_property_with_number_of_workspaces("CanDirectWorkspace", workspaces)
else:
raise RuntimeError("SANSLoad: Unknown data output workspace format: {0}".format(str(workspace_type)))
def set_output_for_monitor_workspaces(self, workspace_type, workspaces):
if workspace_type is SANSDataType.SAMPLE_SCATTER:
self.set_property("SampleScatterMonitorWorkspace", workspaces)
elif workspace_type is SANSDataType.CAN_SCATTER:
self.set_property("CanScatterMonitorWorkspace", workspaces)
else:
raise RuntimeError("SANSLoad: Unknown data output workspace format: {0}".format(str(workspace_type)))
def set_property(self, name, workspace_collection):
"""
We receive a name for a property and a collection of workspaces. If the workspace is a group workspace, then
we dynamically create output properties and inform the user that he needs to query the output workspaces
individually and we need to communicate how many there are.
:param name: The name of the output property
:param workspace_collection: A list of workspaces which corresponds to the name. Note that normally there
there will be only one element in this list. Only when dealing with multiperiod
data can we expected to see more workspaces in the list.
"""
if len(workspace_collection) > 1:
# Note that the first output is the same as we have set above.
counter = 1
for workspace in workspace_collection:
output_name = name + "_" + str(counter)
self.declareProperty(MatrixWorkspaceProperty(output_name, '',
optional=PropertyMode.Optional,
direction=Direction.Output),
doc='A child workspace of a multi-period file.')
# We need to set a name on here if one was set
user_specified_name = self.getProperty(name).valueAsStr
if user_specified_name:
user_specified_name += "_" + str(counter)
self.setProperty(output_name, user_specified_name)
self.setProperty(output_name, workspace)
counter += 1
else:
self.setProperty(name, workspace_collection[0])
return len(workspace_collection)
def set_property_with_number_of_workspaces(self, name, workspace_collection):
counter = self.set_property(name, workspace_collection)
# The property name for the number of workspaces
number_of_workspaces_name = "NumberOf" + name + "s"
self.setProperty(number_of_workspaces_name, counter)
def _perform_initial_move(self, workspaces, state):
# If beam centre was specified then use it
beam_coordinates = self.getProperty("BeamCoordinates").value
# The workspaces are stored in a dict: workspace_names (sample_scatter, etc) : ListOfWorkspaces
for key, workspace_list in workspaces.items():
is_trans = key in [SANSDataType.CAN_DIRECT, SANSDataType.CAN_TRANSMISSION,
SANSDataType.SAMPLE_TRANSMISSION, SANSDataType.SAMPLE_DIRECT]
for workspace in workspace_list:
move_component(component_name="", state=state,
workspace=workspace, move_type=MoveTypes.RESET_POSITION)
move_component(component_name="LAB", state=state,
beam_coordinates=beam_coordinates, move_type=MoveTypes.INITIAL_MOVE,
workspace=workspace, is_transmission_workspace=is_trans)
def _get_progress_for_file_loading(self, data, state_adjustment):
# Get the number of workspaces which are to be loaded
number_of_files_to_load = sum(x is not None for x in [data.sample_scatter, data.sample_transmission,
data.sample_direct, data.can_transmission,
data.can_transmission, data.can_direct,
state_adjustment.calibration])
progress_steps = number_of_files_to_load + 1
# Check if there is a move operation to be performed
# The partitioning of the progress bar is 80% for loading if there is a move else 100%
end = 1.0
progress = Progress(self, start=0.0, end=end, nreports=progress_steps)
return progress
# Register algorithm with Mantid
AlgorithmFactory.subscribe(SANSLoad)
|
santisiri/popego
|
refs/heads/master
|
envs/ALPHA-POPEGO/lib/python2.5/site-packages/twisted/trial/test/weird.py
|
82
|
from twisted.trial import unittest
from twisted.internet import defer
# Used in test_tests.TestUnhandledDeferred
class TestBleeding(unittest.TestCase):
"""This test creates an unhandled Deferred and leaves it in a cycle.
The Deferred is left in a cycle so that the garbage collector won't pick it
up immediately. We were having some problems where unhandled Deferreds in
one test were failing random other tests. (See #1507, #1213)
"""
def test_unhandledDeferred(self):
try:
1/0
except ZeroDivisionError:
f = defer.fail()
# these two lines create the cycle. don't remove them
l = [f]
l.append(l)
|
sinkuri256/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/encodings/cp037.py
|
266
|
""" Python Character Mapping Codec cp037 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP037.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp037',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x9c' # 0x04 -> CONTROL
'\t' # 0x05 -> HORIZONTAL TABULATION
'\x86' # 0x06 -> CONTROL
'\x7f' # 0x07 -> DELETE
'\x97' # 0x08 -> CONTROL
'\x8d' # 0x09 -> CONTROL
'\x8e' # 0x0A -> CONTROL
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x9d' # 0x14 -> CONTROL
'\x85' # 0x15 -> CONTROL
'\x08' # 0x16 -> BACKSPACE
'\x87' # 0x17 -> CONTROL
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x92' # 0x1A -> CONTROL
'\x8f' # 0x1B -> CONTROL
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
'\x80' # 0x20 -> CONTROL
'\x81' # 0x21 -> CONTROL
'\x82' # 0x22 -> CONTROL
'\x83' # 0x23 -> CONTROL
'\x84' # 0x24 -> CONTROL
'\n' # 0x25 -> LINE FEED
'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
'\x1b' # 0x27 -> ESCAPE
'\x88' # 0x28 -> CONTROL
'\x89' # 0x29 -> CONTROL
'\x8a' # 0x2A -> CONTROL
'\x8b' # 0x2B -> CONTROL
'\x8c' # 0x2C -> CONTROL
'\x05' # 0x2D -> ENQUIRY
'\x06' # 0x2E -> ACKNOWLEDGE
'\x07' # 0x2F -> BELL
'\x90' # 0x30 -> CONTROL
'\x91' # 0x31 -> CONTROL
'\x16' # 0x32 -> SYNCHRONOUS IDLE
'\x93' # 0x33 -> CONTROL
'\x94' # 0x34 -> CONTROL
'\x95' # 0x35 -> CONTROL
'\x96' # 0x36 -> CONTROL
'\x04' # 0x37 -> END OF TRANSMISSION
'\x98' # 0x38 -> CONTROL
'\x99' # 0x39 -> CONTROL
'\x9a' # 0x3A -> CONTROL
'\x9b' # 0x3B -> CONTROL
'\x14' # 0x3C -> DEVICE CONTROL FOUR
'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
'\x9e' # 0x3E -> CONTROL
'\x1a' # 0x3F -> SUBSTITUTE
' ' # 0x40 -> SPACE
'\xa0' # 0x41 -> NO-BREAK SPACE
'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
'\xa2' # 0x4A -> CENT SIGN
'.' # 0x4B -> FULL STOP
'<' # 0x4C -> LESS-THAN SIGN
'(' # 0x4D -> LEFT PARENTHESIS
'+' # 0x4E -> PLUS SIGN
'|' # 0x4F -> VERTICAL LINE
'&' # 0x50 -> AMPERSAND
'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
'!' # 0x5A -> EXCLAMATION MARK
'$' # 0x5B -> DOLLAR SIGN
'*' # 0x5C -> ASTERISK
')' # 0x5D -> RIGHT PARENTHESIS
';' # 0x5E -> SEMICOLON
'\xac' # 0x5F -> NOT SIGN
'-' # 0x60 -> HYPHEN-MINUS
'/' # 0x61 -> SOLIDUS
'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
'\xa6' # 0x6A -> BROKEN BAR
',' # 0x6B -> COMMA
'%' # 0x6C -> PERCENT SIGN
'_' # 0x6D -> LOW LINE
'>' # 0x6E -> GREATER-THAN SIGN
'?' # 0x6F -> QUESTION MARK
'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
'`' # 0x79 -> GRAVE ACCENT
':' # 0x7A -> COLON
'#' # 0x7B -> NUMBER SIGN
'@' # 0x7C -> COMMERCIAL AT
"'" # 0x7D -> APOSTROPHE
'=' # 0x7E -> EQUALS SIGN
'"' # 0x7F -> QUOTATION MARK
'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
'a' # 0x81 -> LATIN SMALL LETTER A
'b' # 0x82 -> LATIN SMALL LETTER B
'c' # 0x83 -> LATIN SMALL LETTER C
'd' # 0x84 -> LATIN SMALL LETTER D
'e' # 0x85 -> LATIN SMALL LETTER E
'f' # 0x86 -> LATIN SMALL LETTER F
'g' # 0x87 -> LATIN SMALL LETTER G
'h' # 0x88 -> LATIN SMALL LETTER H
'i' # 0x89 -> LATIN SMALL LETTER I
'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
'\xb1' # 0x8F -> PLUS-MINUS SIGN
'\xb0' # 0x90 -> DEGREE SIGN
'j' # 0x91 -> LATIN SMALL LETTER J
'k' # 0x92 -> LATIN SMALL LETTER K
'l' # 0x93 -> LATIN SMALL LETTER L
'm' # 0x94 -> LATIN SMALL LETTER M
'n' # 0x95 -> LATIN SMALL LETTER N
'o' # 0x96 -> LATIN SMALL LETTER O
'p' # 0x97 -> LATIN SMALL LETTER P
'q' # 0x98 -> LATIN SMALL LETTER Q
'r' # 0x99 -> LATIN SMALL LETTER R
'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
'\xb8' # 0x9D -> CEDILLA
'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
'\xa4' # 0x9F -> CURRENCY SIGN
'\xb5' # 0xA0 -> MICRO SIGN
'~' # 0xA1 -> TILDE
's' # 0xA2 -> LATIN SMALL LETTER S
't' # 0xA3 -> LATIN SMALL LETTER T
'u' # 0xA4 -> LATIN SMALL LETTER U
'v' # 0xA5 -> LATIN SMALL LETTER V
'w' # 0xA6 -> LATIN SMALL LETTER W
'x' # 0xA7 -> LATIN SMALL LETTER X
'y' # 0xA8 -> LATIN SMALL LETTER Y
'z' # 0xA9 -> LATIN SMALL LETTER Z
'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
'\xbf' # 0xAB -> INVERTED QUESTION MARK
'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
'\xae' # 0xAF -> REGISTERED SIGN
'^' # 0xB0 -> CIRCUMFLEX ACCENT
'\xa3' # 0xB1 -> POUND SIGN
'\xa5' # 0xB2 -> YEN SIGN
'\xb7' # 0xB3 -> MIDDLE DOT
'\xa9' # 0xB4 -> COPYRIGHT SIGN
'\xa7' # 0xB5 -> SECTION SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
'[' # 0xBA -> LEFT SQUARE BRACKET
']' # 0xBB -> RIGHT SQUARE BRACKET
'\xaf' # 0xBC -> MACRON
'\xa8' # 0xBD -> DIAERESIS
'\xb4' # 0xBE -> ACUTE ACCENT
'\xd7' # 0xBF -> MULTIPLICATION SIGN
'{' # 0xC0 -> LEFT CURLY BRACKET
'A' # 0xC1 -> LATIN CAPITAL LETTER A
'B' # 0xC2 -> LATIN CAPITAL LETTER B
'C' # 0xC3 -> LATIN CAPITAL LETTER C
'D' # 0xC4 -> LATIN CAPITAL LETTER D
'E' # 0xC5 -> LATIN CAPITAL LETTER E
'F' # 0xC6 -> LATIN CAPITAL LETTER F
'G' # 0xC7 -> LATIN CAPITAL LETTER G
'H' # 0xC8 -> LATIN CAPITAL LETTER H
'I' # 0xC9 -> LATIN CAPITAL LETTER I
'\xad' # 0xCA -> SOFT HYPHEN
'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
'}' # 0xD0 -> RIGHT CURLY BRACKET
'J' # 0xD1 -> LATIN CAPITAL LETTER J
'K' # 0xD2 -> LATIN CAPITAL LETTER K
'L' # 0xD3 -> LATIN CAPITAL LETTER L
'M' # 0xD4 -> LATIN CAPITAL LETTER M
'N' # 0xD5 -> LATIN CAPITAL LETTER N
'O' # 0xD6 -> LATIN CAPITAL LETTER O
'P' # 0xD7 -> LATIN CAPITAL LETTER P
'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
'R' # 0xD9 -> LATIN CAPITAL LETTER R
'\xb9' # 0xDA -> SUPERSCRIPT ONE
'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
'\\' # 0xE0 -> REVERSE SOLIDUS
'\xf7' # 0xE1 -> DIVISION SIGN
'S' # 0xE2 -> LATIN CAPITAL LETTER S
'T' # 0xE3 -> LATIN CAPITAL LETTER T
'U' # 0xE4 -> LATIN CAPITAL LETTER U
'V' # 0xE5 -> LATIN CAPITAL LETTER V
'W' # 0xE6 -> LATIN CAPITAL LETTER W
'X' # 0xE7 -> LATIN CAPITAL LETTER X
'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
'\xb2' # 0xEA -> SUPERSCRIPT TWO
'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
'0' # 0xF0 -> DIGIT ZERO
'1' # 0xF1 -> DIGIT ONE
'2' # 0xF2 -> DIGIT TWO
'3' # 0xF3 -> DIGIT THREE
'4' # 0xF4 -> DIGIT FOUR
'5' # 0xF5 -> DIGIT FIVE
'6' # 0xF6 -> DIGIT SIX
'7' # 0xF7 -> DIGIT SEVEN
'8' # 0xF8 -> DIGIT EIGHT
'9' # 0xF9 -> DIGIT NINE
'\xb3' # 0xFA -> SUPERSCRIPT THREE
'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
HPPTECH/hpp_IOSTressTest
|
refs/heads/master
|
IOST_0.23/Libs/IOST_WRun/IOST_WRun_StationInfo.py
|
3
|
#!/usr/bin/python
#======================================================================
#
# Project : hpp_IOStressTest
# File : Libs/IOST_WRun/IOST_WRun_StationInfo.py
# Date : Oct 25, 2016
# Author : HuuHoang Nguyen
# Contact : [email protected]
# : [email protected]
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import sys
import time
from IOST_Basic import *
from IOST_Config import *
import gtk
import gobject
import gtk.glade
import vte
#======================================================================
try:
IOST_DBG_EN
if IOST_DBG_EN:
IOST_WRunStationInfo_DebugEnable =0
else:
IOST_WRunStationInfo_DebugEnable =0
except:
IOST_DBG_EN = False
IOST_WRunStationInfo_DebugEnable =0
#======================================================================
class IOST_WRun_StationInfo():
"""
This is class to get all informtation of Station object from IOST_WRun_Skylark window and control to these
component
"""
#----------------------------------------------------------------------
def __init__(self, glade_filename, window_name, builder=None):
self.IOST_WRunStationInfo_WindowName = window_name
if not builder:
self.IOST_WRunStationInfo_Builder = gtk.Builder()
self.IOST_WRunStationInfo_Builder.add_from_file(glade_filename)
self.IOST_WRunStationInfo_Builder.connect_signals(self)
else:
self.IOST_WRunStationInfo_Builder = builder
#----------------------------------------------------------------------
def WRun_GetStationInfo_Obj(self, window_name):
"Get all Station info objecs on WRun window and store into self.IOST_Objs"
# self.IOST_Objs[window_name]["_Summary_StationInfo_ConsoleIP_Value_L"] = self.IOST_WRunStationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_ConsoleIP_Value_L"])
# self.IOST_Objs[window_name]["_Summary_StationInfo_ConsolePort_Value_L"] = self.IOST_WRunStationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_ConsolePort_Value_L"])
# self.IOST_Objs[window_name]["_Summary_StationInfo_SlimproPort_Value_L"] = self.IOST_WRunStationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_SlimproPort_Value_L"])
# self.IOST_Objs[window_name]["_Summary_StationInfo_NPS_IP_Value_L"] = self.IOST_WRunStationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_NPS_IP_Value_L"])
# self.IOST_Objs[window_name]["_Summary_StationInfo_NPS_Port_Value_L"] = self.IOST_WRunStationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_NPS_Port_Value_L"])
# self.IOST_Objs[window_name]["_Summary_StationInfo_ThermalIP_Value_L"] = self.IOST_WRunStationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_ThermalIP_Value_L"])
# self.IOST_Objs[window_name]["_Summary_StationInfo_ThermalPort_Value_L"] = self.IOST_WRunStationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_ThermalPort_Value_L"])
# self.IOST_Objs[window_name]["_Summary_StationInfo_Temperature_Value_L"] = self.IOST_WRunStationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_Temperature_Value_L"])
# self.IOST_Objs[window_name]["_Summary_StationInfo_TimeRun_Value_L"] = self.IOST_WRunStationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_TimeRun_Value_L"])
# self.IOST_Objs[window_name]["_Summary_StationInfo_ServerIP_Value_L"] = self.IOST_WRunStationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_ServerIP_Value_L"])
# self.IOST_Objs[window_name]["_Summary_StationInfo_OCD_Enable_L"] = self.IOST_WRunStationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_OCD_Enable_L"])
# self.IOST_Objs[window_name]["_Summary_StationInfo_OCD_IP_Value_L"] = self.IOST_WRunStationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_OCD_IP_Value_L"])
# self.IOST_Objs[window_name]["_Summary_StationInfo_BDI_Enable_L"] = self.IOST_WRunStationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_BDI_Enable_L"])
# self.IOST_Objs[window_name]["_Summary_StationInfo_BDI_IP_Value_L"] = self.IOST_WRunStationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_BDI_IP_Value_L"])
#----------------------------------------------------------------------
def WRun_InitStationInfo_Obj(self, window_name):
"Initialization all Station info objects when WRun start"
self.IOST_Objs[window_name]["_Summary_StationInfo_ConsoleIP_Value_L"].set_text(self.IOST_Data["StationInfo"]["ConsoleIP"])
FormatText(self.IOST_Objs[window_name]["_Summary_StationInfo_ConsoleIP_Value_L"], color=WRUN_STATION_INFO_COLOR)
self.IOST_Objs[window_name]["_Summary_StationInfo_ConsolePort_Value_L"].set_text(self.IOST_Data["StationInfo"]["ConsolePort"])
FormatText(self.IOST_Objs[window_name]["_Summary_StationInfo_ConsolePort_Value_L"], color=WRUN_STATION_INFO_COLOR)
self.IOST_Objs[window_name]["_Summary_StationInfo_SlimproPort_Value_L"].set_text(self.IOST_Data["StationInfo"]["SlimproPort"])
FormatText(self.IOST_Objs[window_name]["_Summary_StationInfo_SlimproPort_Value_L"], color=WRUN_STATION_INFO_COLOR)
self.IOST_Objs[window_name]["_Summary_StationInfo_NPS_IP_Value_L"].set_text(self.IOST_Data["StationInfo"]["NPS_IP"])
FormatText(self.IOST_Objs[window_name]["_Summary_StationInfo_NPS_IP_Value_L"], color=WRUN_STATION_INFO_COLOR)
self.IOST_Objs[window_name]["_Summary_StationInfo_NPS_Port_Value_L"].set_text(self.IOST_Data["StationInfo"]["NPS_Port"])
FormatText(self.IOST_Objs[window_name]["_Summary_StationInfo_NPS_Port_Value_L"], color=WRUN_STATION_INFO_COLOR)
self.IOST_Objs[window_name]["_Summary_StationInfo_ThermalIP_Value_L"].set_text(self.IOST_Data["StationInfo"]["ThermalIP"])
FormatText(self.IOST_Objs[window_name]["_Summary_StationInfo_ThermalIP_Value_L"], color=WRUN_STATION_INFO_COLOR)
self.IOST_Objs[window_name]["_Summary_StationInfo_ThermalPort_Value_L"].set_text(self.IOST_Data["StationInfo"]["ThermalPort"])
FormatText(self.IOST_Objs[window_name]["_Summary_StationInfo_ThermalPort_Value_L"], color=WRUN_STATION_INFO_COLOR)
self.IOST_Objs[window_name]["_Summary_StationInfo_Temperature_Value_L"].set_text(self.IOST_Data["StationInfo"]["Temperature"])
FormatText(self.IOST_Objs[window_name]["_Summary_StationInfo_Temperature_Value_L"], color=WRUN_STATION_INFO_COLOR)
self.IOST_Objs[window_name]["_Summary_StationInfo_TimeRun_Value_L"].set_text(self.IOST_Data["StationInfo"]["TimeRunHour"])
FormatText(self.IOST_Objs[window_name]["_Summary_StationInfo_TimeRun_Value_L"], color=WRUN_STATION_INFO_COLOR)
self.IOST_Objs[window_name]["_Summary_StationInfo_ServerIP_Value_L"].set_text(self.IOST_Data["StationInfo"]["ServerIP"])
FormatText(self.IOST_Objs[window_name]["_Summary_StationInfo_ServerIP_Value_L"], color=WRUN_STATION_INFO_COLOR)
if self.IOST_Data["StationInfo"]["OCD_Enable"] == STATUS_ENABLE:
self.IOST_Objs[window_name]["_Summary_StationInfo_OCD_Enable_L"].set_text(STATUS_AVAIL)
FormatText(self.IOST_Objs[window_name]["_Summary_StationInfo_OCD_Enable_L"], color=WRUN_STATION_INFO_COLOR)
self.IOST_Objs[window_name]["_Summary_StationInfo_OCD_IP_Value_L"].set_text(self.IOST_Data["StationInfo"]["OCD_IP"])
FormatText(self.IOST_Objs[window_name]["_Summary_StationInfo_OCD_IP_Value_L"], color=WRUN_STATION_INFO_COLOR)
else:
self.IOST_Objs[window_name]["_Summary_StationInfo_OCD_Enable_L"].set_text(STATUS_NOT_AVAIL)
FormatText(self.IOST_Objs[window_name]["_Summary_StationInfo_OCD_Enable_L"], color="red")
self.IOST_Objs[window_name]["_Summary_StationInfo_OCD_IP_Value_L"].set_text(STATUS_N_A)
if self.IOST_Data["StationInfo"]["BDI_Enable"] == STATUS_ENABLE:
self.IOST_Objs[window_name]["_Summary_StationInfo_BDI_Enable_L"].set_text(STATUS_AVAIL)
FormatText(self.IOST_Objs[window_name]["_Summary_StationInfo_BDI_Enable_L"], color=WRUN_STATION_INFO_COLOR)
self.IOST_Objs[window_name]["_Summary_StationInfo_BDI_IP_Value_L"].set_text(self.IOST_Data["StationInfo"]["BDI_IP"])
FormatText(self.IOST_Objs[window_name]["_Summary_StationInfo_BDI_IP_Value_L"], color=WRUN_STATION_INFO_COLOR)
else:
self.IOST_Objs[window_name]["_Summary_StationInfo_BDI_Enable_L"].set_text(STATUS_NOT_AVAIL)
FormatText(self.IOST_Objs[window_name]["_Summary_StationInfo_BDI_Enable_L"], color="red")
self.IOST_Objs[window_name]["_Summary_StationInfo_BDI_IP_Value_L"].set_text(STATUS_N_A)
#
station_info = self.IOST_WRunStationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_L").get_text()
station_info += " : \n\tStation Name : " + self.IOST_Data["StationInfo"]["StationName"] \
+ "\t| Board Number : " + self.IOST_Data["StationInfo"]["StationBoardNumber"] \
+ "\t| Chip Number : " + self.IOST_Data["StationInfo"]["StationChipNumber"] + "\n"
FormatText(self.IOST_WRunStationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_L"), color=WRUN_IP_COLOR_DEFAULT, bold=True, text=station_info)
# #0099ff
FormatText(self.IOST_WRunStationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_ConsoleIP_L"), color=WRUN_STATION_INFO_LABLE_COLOR)
FormatText(self.IOST_WRunStationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_ConsolePort_L"), color=WRUN_STATION_INFO_LABLE_COLOR)
FormatText(self.IOST_WRunStationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_SlimproPort_L"), color=WRUN_STATION_INFO_LABLE_COLOR)
FormatText(self.IOST_WRunStationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_NPS_IP_L"), color=WRUN_STATION_INFO_LABLE_COLOR)
FormatText(self.IOST_WRunStationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_NPS_Port_L"), color=WRUN_STATION_INFO_LABLE_COLOR)
FormatText(self.IOST_WRunStationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_ThermalIP_L"), color=WRUN_STATION_INFO_LABLE_COLOR)
FormatText(self.IOST_WRunStationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_ThermalPort_L"), color=WRUN_STATION_INFO_LABLE_COLOR)
FormatText(self.IOST_WRunStationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_Temperature_L"), color=WRUN_STATION_INFO_LABLE_COLOR, text=TEMPERATURE_STR)
FormatText(self.IOST_WRunStationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_TimeRun_L"), color=WRUN_STATION_INFO_LABLE_COLOR)
FormatText(self.IOST_WRunStationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_ServerIP_L"), color=WRUN_STATION_INFO_LABLE_COLOR)
FormatText(self.IOST_WRunStationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_OCD_L"), color=WRUN_STATION_INFO_LABLE_COLOR)
FormatText(self.IOST_WRunStationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_OCD_IP_L"), color=WRUN_STATION_INFO_LABLE_COLOR)
FormatText(self.IOST_WRunStationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_BDI_L"), color=WRUN_STATION_INFO_LABLE_COLOR)
FormatText(self.IOST_WRunStationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_BDI_IP_L"), color=WRUN_STATION_INFO_LABLE_COLOR)
|
erdc/proteus
|
refs/heads/cutfem_update
|
scripts/cobras_saj_embankment.py
|
1
|
#! /usr/bin/env python
from builtins import range
import math
def genPoly(polyfileBase = "cobras_saj_embankment",
lengthBousDomain = 170.0,ransDomainStop=1000.00,
ransDomainHeight = 10.0,
inflowLength = 10.0,
inflowPad = 15.0,
outflowLength= 1.0,
outflowPad= 1.0):
"""
try to generate Jeff Melby's domain
points from sajlevee.bat file
lengthBousDomain -- point in bathymetry data afterwhich RANS domain is considered
ransDomainStop -- point in bathymetry data afterwhich RANS domain stops
inflowLength -- size of region for inflow boundary
inflowPad -- how much to add to domain to accomodate inflow (make sure on flat region)
outflowLength -- size of region for inflow boundary
outflowPad -- how much to add to domain to accomodate outflow (make sure on flat region)
"""
#lengthBousDomain = 170.0# where Boussinesq domain stops
allBathymetryPoints = [(-100, -8.53),
(101.71, -8.53),
(120.00, -7.92),
(132.19, -7.52),
(184.01, -5.79),
(190.41, -3.66),
(196.81, -1.52),
(204.43, -1.52),
(204.43, -1.22),
(205.34, -1.22),
(205.34, -0.91),
(206.26, -0.91),
(206.26, -0.61),
(207.17, -0.61),
(207.17, -0.30),
(208.09, -0.30),
(208.09, 0.00),
(209.00, 0.00),
(209.00, 0.30),
(209.92, 0.30),
(209.92, 0.61),
(210.83, 0.61),
(210.83, 0.91),
(215.10, 0.91),
(215.10, -1.00),
(250.00, -1.25)]
#pop off points in Boussinesq domain
bathymetryPoints = []
for p in allBathymetryPoints:
if p[0] >= lengthBousDomain and p[0] <= ransDomainStop:
bathymetryPoints.append(p)
backHeight = 4.54
#how much to pad domain for inflow and outflow
#inflowPad = 5.0 #m
#outflowPad= 1.0
#inflowLength = 1.0
#outflowLength= 1.0
#have to assume points in correct order
#pad for inflow outflow
pin = bathymetryPoints[0]
pout= bathymetryPoints[-1]
#now get corners of domain
pSW = (pin[0]-inflowPad,pin[1])
pSE = (pout[0]+outflowPad,pout[1])
ransDomainLength= pSE[0]-pSW[0]
#domain
L = (ransDomainLength,ransDomainHeight,1.0)
#assume height of RANS domain gives enough vertical padding
minY = min(pSW[1],pSE[1])
pNW = (pSW[0],minY+ransDomainHeight)
pNE = (pSE[0],minY+ransDomainHeight)
#check vertical coordinates
tmp = sorted(bathymetryPoints,cmp=lambda x,y: int(x[1]-y[1]))
assert minY <= tmp[0][1], "found point below proposed block floor minY=%s tmp[0]= " % (minY,tmp[0])
assert minY+ransDomainHeight > tmp[-1][1], "found point above proposed block ceiling maxnY=%s tmp[-1]= " % (minY+ransDomainHeight,
tmp[-1])
#make our domain start at origin
xshift=0.0-pSW[0]
yshift=0.0-minY
vertices = [p for p in bathymetryPoints]
#start with NW corner and work way around
#left
vertices.insert(0,pNW)
vertices.insert(1,pSW)
#add midpoint to make sure some points are inflow labelled
#inflow is on bottom
vertices.insert(2,(pSW[0]+0.5*inflowLength,pSW[1]))
vertices.insert(3,(pSW[0]+inflowLength,pSW[1]))
#
#vertices.append((bathymetryPoints[-1][0]+outflowPad-outflowLength,bathymetryPoints[-1][1]))
vertices.append((bathymetryPoints[-1][0]+outflowPad-0.5*outflowLength,bathymetryPoints[-1][1]))
#right
vertices.append(pSE)
vertices.append(pNE)
nvertices = len(vertices)
segmentLabels = {'left': 1,
'bottom' : 2,
'right' : 3,
'top' : 4,
'inflow' : 5,
'outflow': 6}
segments = []
segments.append((0,1,segmentLabels['left']))
segments.append((1,2,segmentLabels['inflow']))
segments.append((2,3,segmentLabels['inflow']))
for i in range(3,nvertices-3):
segments.append((i,i+1,segmentLabels['bottom']))
segments.append((nvertices-4,nvertices-3,segmentLabels['outflow']))
segments.append((nvertices-3,nvertices-2,segmentLabels['outflow']))
segments.append((nvertices-2,nvertices-1,segmentLabels['right']))
segments.append((nvertices-1,0,segmentLabels['top']))
poly = open(polyfileBase+'.poly','w')
poly.write('%d %d %d %d \n' % (nvertices,2,0,0))
#write vertices
poly.write("#vertices \n")
for i,p in enumerate(vertices):
poly.write('%d %12.5e %12.5e \n' % (i+1,xshift+p[0],yshift+p[1]))
#write segments
nSegments = len(segments)
poly.write('%d %d \n' % (nSegments,1))
poly.write("#segments \n")
for sN,s in enumerate(segments):
poly.write('%d %d %d %d \n' % (sN+1,s[0]+1,s[1]+1,s[2]))
#if mesh just the outside of the structure insert holes here
nholes = 0
poly.write('%d \n' % (nholes,))
poly.write("#holes \n")
nsolid_regions = 0
poly.write('%d \n' % (nholes,))
poly.write("#solid regions \n")
poly.close()
return L,segmentLabels,backHeight
|
yanheven/nova
|
refs/heads/master
|
nova/tests/unit/virt/test_block_device.py
|
6
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from oslo_serialization import jsonutils
from nova import block_device
from nova import context
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit import matchers
from nova.virt import block_device as driver_block_device
from nova.virt import driver
from nova.volume import cinder
from nova.volume import encryptors
class TestDriverBlockDevice(test.NoDBTestCase):
driver_classes = {
'swap': driver_block_device.DriverSwapBlockDevice,
'ephemeral': driver_block_device.DriverEphemeralBlockDevice,
'volume': driver_block_device.DriverVolumeBlockDevice,
'snapshot': driver_block_device.DriverSnapshotBlockDevice,
'image': driver_block_device.DriverImageBlockDevice,
'blank': driver_block_device.DriverBlankBlockDevice
}
swap_bdm = block_device.BlockDeviceDict(
{'id': 1, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'disk_bus': 'scsi',
'volume_size': 2,
'boot_index': -1})
swap_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2,
'disk_bus': 'scsi'}
swap_legacy_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2}
ephemeral_bdm = block_device.BlockDeviceDict(
{'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 4,
'guest_format': 'ext4',
'delete_on_termination': True,
'boot_index': -1})
ephemeral_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'device_type': 'disk',
'guest_format': 'ext4',
'disk_bus': 'scsi'}
ephemeral_legacy_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'virtual_name': 'ephemeral0',
'num': 0}
volume_bdm = block_device.BlockDeviceDict(
{'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda1',
'source_type': 'volume',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 8,
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'guest_format': 'ext4',
'connection_info': '{"fake": "connection_info"}',
'delete_on_termination': False,
'boot_index': 0})
volume_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': 'ext4',
'boot_index': 0}
volume_legacy_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False}
snapshot_bdm = block_device.BlockDeviceDict(
{'id': 4, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
snapshot_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
snapshot_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
image_bdm = block_device.BlockDeviceDict(
{'id': 5, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 1,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'image',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'image_id': 'fake-image-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
image_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
image_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
blank_bdm = block_device.BlockDeviceDict(
{'id': 6, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'blank',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
blank_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
blank_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
def setUp(self):
super(TestDriverBlockDevice, self).setUp()
self.volume_api = self.mox.CreateMock(cinder.API)
self.virt_driver = self.mox.CreateMock(driver.ComputeDriver)
self.context = context.RequestContext('fake_user',
'fake_project')
def test_no_device_raises(self):
for name, cls in self.driver_classes.items():
self.assertRaises(driver_block_device._NotTransformable,
cls, {'no_device': True})
def _test_driver_device(self, name):
db_bdm = getattr(self, "%s_bdm" % name)
test_bdm = self.driver_classes[name](db_bdm)
self.assertThat(test_bdm, matchers.DictMatches(
getattr(self, "%s_driver_bdm" % name)))
for k, v in db_bdm.iteritems():
field_val = getattr(test_bdm._bdm_obj, k)
if isinstance(field_val, bool):
v = bool(v)
self.assertEqual(field_val, v)
self.assertThat(test_bdm.legacy(),
matchers.DictMatches(
getattr(self, "%s_legacy_driver_bdm" % name)))
# Test passthru attributes
for passthru in test_bdm._proxy_as_attr:
self.assertEqual(getattr(test_bdm, passthru),
getattr(test_bdm._bdm_obj, passthru))
# Make sure that all others raise _invalidType
for other_name, cls in self.driver_classes.iteritems():
if other_name == name:
continue
self.assertRaises(driver_block_device._InvalidType,
cls,
getattr(self, '%s_bdm' % name))
# Test the save method
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
for fld, alias in test_bdm._update_on_save.iteritems():
test_bdm[alias or fld] = 'fake_changed_value'
test_bdm.save()
for fld, alias in test_bdm._update_on_save.iteritems():
self.assertEqual(test_bdm[alias or fld],
getattr(test_bdm._bdm_obj, fld))
save_mock.assert_called_once_with()
def check_save():
self.assertEqual(set([]), test_bdm._bdm_obj.obj_what_changed())
# Test that nothing is set on the object if there are no actual changes
test_bdm._bdm_obj.obj_reset_changes()
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
save_mock.side_effect = check_save
test_bdm.save()
def _test_driver_default_size(self, name):
size = 'swap_size' if name == 'swap' else 'size'
no_size_bdm = getattr(self, "%s_bdm" % name).copy()
no_size_bdm['volume_size'] = None
driver_bdm = self.driver_classes[name](no_size_bdm)
self.assertEqual(driver_bdm[size], 0)
del no_size_bdm['volume_size']
driver_bdm = self.driver_classes[name](no_size_bdm)
self.assertEqual(driver_bdm[size], 0)
def test_driver_swap_block_device(self):
self._test_driver_device("swap")
def test_driver_swap_default_size(self):
self._test_driver_default_size('swap')
def test_driver_ephemeral_block_device(self):
self._test_driver_device("ephemeral")
def test_driver_ephemeral_default_size(self):
self._test_driver_default_size('ephemeral')
def test_driver_volume_block_device(self):
self._test_driver_device("volume")
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
self.assertEqual(test_bdm['connection_info'],
jsonutils.loads(test_bdm._bdm_obj.connection_info))
self.assertEqual(test_bdm._bdm_obj.id, 3)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-1')
self.assertEqual(test_bdm.volume_size, 8)
def test_driver_snapshot_block_device(self):
self._test_driver_device("snapshot")
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 4)
self.assertEqual(test_bdm.snapshot_id, 'fake-snapshot-id-1')
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
self.assertEqual(test_bdm.volume_size, 3)
def test_driver_image_block_device(self):
self._test_driver_device('image')
test_bdm = self.driver_classes['image'](
self.image_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 5)
self.assertEqual(test_bdm.image_id, 'fake-image-id-1')
self.assertEqual(test_bdm.volume_size, 1)
def test_driver_image_block_device_destination_local(self):
self._test_driver_device('image')
bdm = self.image_bdm.copy()
bdm['destination_type'] = 'local'
self.assertRaises(driver_block_device._InvalidType,
self.driver_classes['image'], bdm)
def test_driver_blank_block_device(self):
self._test_driver_device('blank')
test_bdm = self.driver_classes['blank'](
self.blank_bdm)
self.assertEqual(6, test_bdm._bdm_obj.id)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
self.assertEqual(3, test_bdm.volume_size)
def _test_volume_attach(self, driver_bdm, bdm_dict,
fake_volume, check_attach=True,
fail_check_attach=False, driver_attach=False,
fail_driver_attach=False, volume_attach=True,
fail_volume_attach=False, access_mode='rw'):
elevated_context = self.context.elevated()
self.stubs.Set(self.context, 'elevated',
lambda: elevated_context)
self.mox.StubOutWithMock(driver_bdm._bdm_obj, 'save')
self.mox.StubOutWithMock(encryptors, 'get_encryption_metadata')
instance_detail = {'id': '123', 'uuid': 'fake_uuid'}
instance = fake_instance.fake_instance_obj(self.context,
**instance_detail)
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'access_mode': access_mode}}
expected_conn_info = {'data': {'access_mode': access_mode},
'serial': fake_volume['id']}
enc_data = {'fake': 'enc_data'}
self.volume_api.get(self.context,
fake_volume['id']).AndReturn(fake_volume)
if check_attach:
if not fail_check_attach:
self.volume_api.check_attach(self.context, fake_volume,
instance=instance).AndReturn(None)
else:
self.volume_api.check_attach(self.context, fake_volume,
instance=instance).AndRaise(
test.TestingException)
driver_bdm._bdm_obj.save().AndReturn(None)
return instance, expected_conn_info
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
elevated_context, fake_volume['id'],
connector).AndReturn(connection_info)
if driver_attach:
encryptors.get_encryption_metadata(
elevated_context, self.volume_api, fake_volume['id'],
connection_info).AndReturn(enc_data)
if not fail_driver_attach:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'],
encryption=enc_data).AndReturn(None)
else:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'],
encryption=enc_data).AndRaise(test.TestingException)
self.volume_api.terminate_connection(
elevated_context, fake_volume['id'],
connector).AndReturn(None)
driver_bdm._bdm_obj.save().AndReturn(None)
return instance, expected_conn_info
if volume_attach:
driver_bdm._bdm_obj.save().AndReturn(None)
if not fail_volume_attach:
self.volume_api.attach(elevated_context, fake_volume['id'],
'fake_uuid', bdm_dict['device_name'],
mode=access_mode).AndReturn(None)
else:
self.volume_api.attach(elevated_context, fake_volume['id'],
'fake_uuid', bdm_dict['device_name'],
mode=access_mode).AndRaise(
test.TestingException)
driver_bdm._bdm_obj.save().AndReturn(None)
return instance, expected_conn_info
def test_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_ro(self):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, access_mode='ro')
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_check_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, fail_check_attach=True)
self.mox.ReplayAll()
self.assertRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver)
def test_volume_no_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, check_attach=False,
driver_attach=False)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_check_attach=False, do_driver_attach=False)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_no_check_driver_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, check_attach=False,
driver_attach=True)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_check_attach=False, do_driver_attach=True)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_driver_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, driver_attach=True,
fail_driver_attach=True)
self.mox.ReplayAll()
self.assertRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver,
do_driver_attach=True)
def test_volume_attach_volume_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, driver_attach=True,
fail_volume_attach=True)
self.mox.ReplayAll()
self.assertRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver,
do_driver_attach=True)
def test_refresh_connection(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'multipath_id': 'fake_multipath_id'}}
expected_conn_info = {'data': {'multipath_id': 'fake_multipath_id'},
'serial': 'fake-volume-id-2'}
self.mox.StubOutWithMock(test_bdm._bdm_obj, 'save')
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
self.context, test_bdm.volume_id,
connector).AndReturn(connection_info)
test_bdm._bdm_obj.save().AndReturn(None)
self.mox.ReplayAll()
test_bdm.refresh_connection_info(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_snapshot_attach_no_volume(self):
no_volume_snapshot = self.snapshot_bdm.copy()
no_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['snapshot'](no_volume_snapshot)
snapshot = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.get_snapshot(self.context,
'fake-snapshot-id-1').AndReturn(snapshot)
self.volume_api.create(self.context, 3, '', '', snapshot,
availability_zone=None).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_snapshot, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_snapshot_attach_volume(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, do_check_attach=True
).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_no_volume(self):
no_volume_image = self.image_bdm.copy()
no_volume_image['volume_id'] = None
test_bdm = self.driver_classes['image'](no_volume_image)
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.create(self.context, 1, '', '', image_id=image['id'],
availability_zone=None).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_image, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_volume(self):
test_bdm = self.driver_classes['image'](
self.image_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, do_check_attach=True
).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_blank_attach_volume(self):
no_blank_volume = self.blank_bdm.copy()
no_blank_volume['volume_id'] = None
test_bdm = self.driver_classes['blank'](no_blank_volume)
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': 'fake-uuid'})
volume_class = self.driver_classes['volume']
volume = {'id': 'fake-volume-id-2',
'display_name': 'fake-uuid-blank-vol'}
with contextlib.nested(
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(volume_class, 'attach')
) as (vol_create, vol_attach):
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
vol_create.assert_called_once_with(
self.context, test_bdm.volume_size, 'fake-uuid-blank-vol',
'', availability_zone=instance.availability_zone)
vol_attach.assert_called_once_with(self.context, instance,
self.volume_api,
self.virt_driver,
do_check_attach=True)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_convert_block_devices(self):
converted = driver_block_device._convert_block_devices(
self.driver_classes['volume'],
[self.volume_bdm, self.ephemeral_bdm])
self.assertEqual(converted, [self.volume_driver_bdm])
def test_convert_all_volumes(self):
converted = driver_block_device.convert_all_volumes()
self.assertEqual([], converted)
converted = driver_block_device.convert_all_volumes(
self.volume_bdm, self.ephemeral_bdm, self.image_bdm)
self.assertEqual(converted, [self.volume_driver_bdm,
self.image_driver_bdm])
def test_convert_volume(self):
self.assertIsNone(driver_block_device.convert_volume(self.swap_bdm))
self.assertEqual(self.volume_driver_bdm,
driver_block_device.convert_volume(self.volume_bdm))
self.assertEqual(self.snapshot_driver_bdm,
driver_block_device.convert_volume(self.snapshot_bdm))
def test_legacy_block_devices(self):
test_snapshot = self.driver_classes['snapshot'](
self.snapshot_bdm)
block_device_mapping = [test_snapshot, test_snapshot]
legacy_bdm = driver_block_device.legacy_block_devices(
block_device_mapping)
self.assertEqual(legacy_bdm, [self.snapshot_legacy_driver_bdm,
self.snapshot_legacy_driver_bdm])
# Test that the ephemerals work as expected
test_ephemerals = [self.driver_classes['ephemeral'](
self.ephemeral_bdm) for _ in xrange(2)]
expected = [self.ephemeral_legacy_driver_bdm.copy()
for _ in xrange(2)]
expected[0]['virtual_name'] = 'ephemeral0'
expected[0]['num'] = 0
expected[1]['virtual_name'] = 'ephemeral1'
expected[1]['num'] = 1
legacy_ephemerals = driver_block_device.legacy_block_devices(
test_ephemerals)
self.assertEqual(expected, legacy_ephemerals)
def test_get_swap(self):
swap = [self.swap_driver_bdm]
legacy_swap = [self.swap_legacy_driver_bdm]
no_swap = [self.volume_driver_bdm]
self.assertEqual(swap[0], driver_block_device.get_swap(swap))
self.assertEqual(legacy_swap[0],
driver_block_device.get_swap(legacy_swap))
self.assertIsNone(driver_block_device.get_swap(no_swap))
self.assertIsNone(driver_block_device.get_swap([]))
def test_is_implemented(self):
for bdm in (self.image_bdm, self.volume_bdm, self.swap_bdm,
self.ephemeral_bdm, self.snapshot_bdm):
self.assertTrue(driver_block_device.is_implemented(bdm))
local_image = self.image_bdm.copy()
local_image['destination_type'] = 'local'
self.assertFalse(driver_block_device.is_implemented(local_image))
def test_is_block_device_mapping(self):
test_swap = self.driver_classes['swap'](self.swap_bdm)
test_ephemeral = self.driver_classes['ephemeral'](self.ephemeral_bdm)
test_image = self.driver_classes['image'](self.image_bdm)
test_snapshot = self.driver_classes['snapshot'](self.snapshot_bdm)
test_volume = self.driver_classes['volume'](self.volume_bdm)
test_blank = self.driver_classes['blank'](self.blank_bdm)
for bdm in (test_image, test_snapshot, test_volume, test_blank):
self.assertTrue(driver_block_device.is_block_device_mapping(
bdm._bdm_obj))
for bdm in (test_swap, test_ephemeral):
self.assertFalse(driver_block_device.is_block_device_mapping(
bdm._bdm_obj))
|
adcentury/electron
|
refs/heads/master
|
script/upload-checksums.py
|
131
|
#!/usr/bin/env python
import argparse
import hashlib
import os
import tempfile
from lib.config import s3_config
from lib.util import download, rm_rf, s3put
DIST_URL = 'https://atom.io/download/atom-shell/'
def main():
args = parse_args()
url = DIST_URL + args.version + '/'
directory, files = download_files(url, get_files_list(args.version))
checksums = [
create_checksum('sha1', directory, 'SHASUMS.txt', files),
create_checksum('sha256', directory, 'SHASUMS256.txt', files)
]
bucket, access_key, secret_key = s3_config()
s3put(bucket, access_key, secret_key, directory,
'atom-shell/dist/{0}'.format(args.version), checksums)
rm_rf(directory)
def parse_args():
parser = argparse.ArgumentParser(description='upload sumsha file')
parser.add_argument('-v', '--version', help='Specify the version',
required=True)
return parser.parse_args()
def get_files_list(version):
return [
'node-{0}.tar.gz'.format(version),
'iojs-{0}.tar.gz'.format(version),
'iojs-{0}-headers.tar.gz'.format(version),
'node.lib',
'x64/node.lib',
'win-x86/iojs.lib',
'win-x64/iojs.lib',
]
def download_files(url, files):
directory = tempfile.mkdtemp(prefix='electron-tmp')
return directory, [
download(f, url + f, os.path.join(directory, f))
for f in files
]
def create_checksum(algorithm, directory, filename, files):
lines = []
for path in files:
h = hashlib.new(algorithm)
with open(path, 'r') as f:
h.update(f.read())
lines.append(h.hexdigest() + ' ' + os.path.relpath(path, directory))
checksum_file = os.path.join(directory, filename)
with open(checksum_file, 'w') as f:
f.write('\n'.join(lines) + '\n')
return checksum_file
if __name__ == '__main__':
import sys
sys.exit(main())
|
B-MOOC/edx-platform
|
refs/heads/master
|
common/lib/xmodule/xmodule/fields.py
|
144
|
import time
import logging
import re
from xblock.fields import JSONField
import datetime
import dateutil.parser
from pytz import UTC
log = logging.getLogger(__name__)
class Date(JSONField):
'''
Date fields know how to parse and produce json (iso) compatible formats. Converts to tz aware datetimes.
'''
# See note below about not defaulting these
CURRENT_YEAR = datetime.datetime.now(UTC).year
PREVENT_DEFAULT_DAY_MON_SEED1 = datetime.datetime(CURRENT_YEAR, 1, 1, tzinfo=UTC)
PREVENT_DEFAULT_DAY_MON_SEED2 = datetime.datetime(CURRENT_YEAR, 2, 2, tzinfo=UTC)
MUTABLE = False
def _parse_date_wo_default_month_day(self, field):
"""
Parse the field as an iso string but prevent dateutils from defaulting the day or month while
allowing it to default the other fields.
"""
# It's not trivial to replace dateutil b/c parsing timezones as Z, +03:30, -400 is hard in python
# however, we don't want dateutil to default the month or day (but some tests at least expect
# us to default year); so, we'll see if dateutil uses the defaults for these the hard way
result = dateutil.parser.parse(field, default=self.PREVENT_DEFAULT_DAY_MON_SEED1)
result_other = dateutil.parser.parse(field, default=self.PREVENT_DEFAULT_DAY_MON_SEED2)
if result != result_other:
log.warning("Field {0} is missing month or day".format(self.name))
return None
if result.tzinfo is None:
result = result.replace(tzinfo=UTC)
return result
def from_json(self, field):
"""
Parse an optional metadata key containing a time: if present, complain
if it doesn't parse.
Return None if not present or invalid.
"""
if field is None:
return field
elif field is "":
return None
elif isinstance(field, basestring):
return self._parse_date_wo_default_month_day(field)
elif isinstance(field, (int, long, float)):
return datetime.datetime.fromtimestamp(field / 1000, UTC)
elif isinstance(field, time.struct_time):
return datetime.datetime.fromtimestamp(time.mktime(field), UTC)
elif isinstance(field, datetime.datetime):
return field
else:
msg = "Field {0} has bad value '{1}'".format(
self.name, field)
raise TypeError(msg)
def to_json(self, value):
"""
Convert a time struct to a string
"""
if value is None:
return None
if isinstance(value, time.struct_time):
# struct_times are always utc
return time.strftime('%Y-%m-%dT%H:%M:%SZ', value)
elif isinstance(value, datetime.datetime):
if value.tzinfo is None or value.utcoffset().total_seconds() == 0:
# isoformat adds +00:00 rather than Z
return value.strftime('%Y-%m-%dT%H:%M:%SZ')
else:
return value.isoformat()
else:
raise TypeError("Cannot convert {!r} to json".format(value))
enforce_type = from_json
TIMEDELTA_REGEX = re.compile(r'^((?P<days>\d+?) day(?:s?))?(\s)?((?P<hours>\d+?) hour(?:s?))?(\s)?((?P<minutes>\d+?) minute(?:s)?)?(\s)?((?P<seconds>\d+?) second(?:s)?)?$')
class Timedelta(JSONField):
# Timedeltas are immutable, see http://docs.python.org/2/library/datetime.html#available-types
MUTABLE = False
def from_json(self, time_str):
"""
time_str: A string with the following components:
<D> day[s] (optional)
<H> hour[s] (optional)
<M> minute[s] (optional)
<S> second[s] (optional)
Returns a datetime.timedelta parsed from the string
"""
if time_str is None:
return None
if isinstance(time_str, datetime.timedelta):
return time_str
parts = TIMEDELTA_REGEX.match(time_str)
if not parts:
return
parts = parts.groupdict()
time_params = {}
for (name, param) in parts.iteritems():
if param:
time_params[name] = int(param)
return datetime.timedelta(**time_params)
def to_json(self, value):
if value is None:
return None
values = []
for attr in ('days', 'hours', 'minutes', 'seconds'):
cur_value = getattr(value, attr, 0)
if cur_value > 0:
values.append("%d %s" % (cur_value, attr))
return ' '.join(values)
def enforce_type(self, value):
"""
Ensure that when set explicitly the Field is set to a timedelta
"""
if isinstance(value, datetime.timedelta) or value is None:
return value
return self.from_json(value)
class RelativeTime(JSONField):
"""
Field for start_time and end_time video module properties.
It was decided, that python representation of start_time and end_time
should be python datetime.timedelta object, to be consistent with
common time representation.
At the same time, serialized representation should be "HH:MM:SS"
This format is convenient to use in XML (and it is used now),
and also it is used in frond-end studio editor of video module as format
for start and end time fields.
In database we previously had float type for start_time and end_time fields,
so we are checking it also.
Python object of RelativeTime is datetime.timedelta.
JSONed representation of RelativeTime is "HH:MM:SS"
"""
# Timedeltas are immutable, see http://docs.python.org/2/library/datetime.html#available-types
MUTABLE = False
@classmethod
def isotime_to_timedelta(cls, value):
"""
Validate that value in "HH:MM:SS" format and convert to timedelta.
Validate that user, that edits XML, sets proper format, and
that max value that can be used by user is "23:59:59".
"""
try:
obj_time = time.strptime(value, '%H:%M:%S')
except ValueError as e:
raise ValueError(
"Incorrect RelativeTime value {!r} was set in XML or serialized. "
"Original parse message is {}".format(value, e.message)
)
return datetime.timedelta(
hours=obj_time.tm_hour,
minutes=obj_time.tm_min,
seconds=obj_time.tm_sec
)
def from_json(self, value):
"""
Convert value is in 'HH:MM:SS' format to datetime.timedelta.
If not value, returns 0.
If value is float (backward compatibility issue), convert to timedelta.
"""
if not value:
return datetime.timedelta(seconds=0)
if isinstance(value, datetime.timedelta):
return value
# We've seen serialized versions of float in this field
if isinstance(value, float):
return datetime.timedelta(seconds=value)
if isinstance(value, basestring):
return self.isotime_to_timedelta(value)
msg = "RelativeTime Field {0} has bad value '{1!r}'".format(self.name, value)
raise TypeError(msg)
def to_json(self, value):
"""
Convert datetime.timedelta to "HH:MM:SS" format.
If not value, return "00:00:00"
Backward compatibility: check if value is float, and convert it. No exceptions here.
If value is not float, but is exceed 23:59:59, raise exception.
"""
if not value:
return "00:00:00"
if isinstance(value, float): # backward compatibility
value = min(value, 86400)
return self.timedelta_to_string(datetime.timedelta(seconds=value))
if isinstance(value, datetime.timedelta):
if value.total_seconds() > 86400: # sanity check
raise ValueError(
"RelativeTime max value is 23:59:59=86400.0 seconds, "
"but {} seconds is passed".format(value.total_seconds())
)
return self.timedelta_to_string(value)
raise TypeError("RelativeTime: cannot convert {!r} to json".format(value))
def timedelta_to_string(self, value):
"""
Makes first 'H' in str representation non-optional.
str(timedelta) has [H]H:MM:SS format, which is not suitable
for front-end (and ISO time standard), so we force HH:MM:SS format.
"""
stringified = str(value)
if len(stringified) == 7:
stringified = '0' + stringified
return stringified
def enforce_type(self, value):
"""
Ensure that when set explicitly the Field is set to a timedelta
"""
if isinstance(value, datetime.timedelta) or value is None:
return value
return self.from_json(value)
|
40223125/w16btest1
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/site-packages/spur.py
|
291
|
#coding: utf-8
import math
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = math.pi/180.
class Spur(object):
def __init__(self, ctx):
self.ctx = ctx
def create_line(self, x1, y1, x2, y2, width=3, fill="red"):
self.ctx.beginPath()
self.ctx.lineWidth = width
self.ctx.moveTo(x1, y1)
self.ctx.lineTo(x2, y2)
self.ctx.strokeStyle = fill
self.ctx.stroke()
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
# pa 為壓力角 (deg)
# rot 為旋轉角 (deg)
# 注意 n 為 52 齒時繪圖產生錯誤, 因為 base circle 與齒根圓大小未進行判斷, 必須要修正
def Gear(self, midx, midy, rp, n=20, pa=20, color="black"):
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
self.create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*math.cos(pa*deg)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時, 漸開線並非畫至 rb, 而是 rd
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
# 當 rd 大於 rb 時, 漸開線並非畫至 rb, 而是 rd
if rd>rb:
dr = (ra-rd)/imax
else:
dr=(ra-rb)/imax
# tan(pa*deg)-pa*deg 為漸開線函數
sigma=math.pi/(2*n)+math.tan(pa*deg)-pa*deg
for j in range(n):
ang=-2.*j*math.pi/n+sigma
ang2=2.*j*math.pi/n+sigma
lxd=midx+rd*math.sin(ang2-2.*math.pi/n)
lyd=midy-rd*math.cos(ang2-2.*math.pi/n)
for i in range(imax+1):
# 當 rd 大於 rb 時, 漸開線並非畫至 rb, 而是 rd
if rd>rb:
r=rd+i*dr
else:
r=rb+i*dr
theta=math.sqrt((r*r)/(rb*rb)-1.)
alpha=theta-math.atan(theta)
xpt=r*math.sin(alpha-ang)
ypt=r*math.cos(alpha-ang)
xd=rd*math.sin(-ang)
yd=rd*math.cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
self.create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=color)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# the line from last end of dedendum point to the recent
# end of dedendum point
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
self.create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=color)
for i in range(imax+1):
# 當 rd 大於 rb 時, 漸開線並非畫至 rb, 而是 rd
if rd>rb:
r=rd+i*dr
else:
r=rb+i*dr
theta=math.sqrt((r*r)/(rb*rb)-1.)
alpha=theta-math.atan(theta)
xpt=r*math.sin(ang2-alpha)
ypt=r*math.cos(ang2-alpha)
xd=rd*math.sin(ang2)
yd=rd*math.cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
self.create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=color)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
self.create_line(lfx,lfy,rfx,rfy,fill=color)
|
stvstnfrd/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/oauth_dispatch/tests/test_dot_overrides.py
|
1
|
"""
Test of custom django-oauth-toolkit behavior
"""
# pylint: disable=protected-access
import datetime
import unittest
from django.conf import settings
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.test import RequestFactory, TestCase
from django.utils import timezone
from common.djangoapps.student.tests.factories import UserFactory
# oauth_dispatch is not in CMS' INSTALLED_APPS so these imports will error during test collection
if settings.ROOT_URLCONF == 'lms.urls':
from oauth2_provider import models as dot_models
from .. import adapters
from .. import models
from ..dot_overrides.validators import EdxOAuth2Validator
from .constants import DUMMY_REDIRECT_URL
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class AuthenticateTestCase(TestCase):
"""
Test that users can authenticate with either username or email
"""
def setUp(self):
super(AuthenticateTestCase, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.user = User.objects.create_user(
username='darkhelmet',
password='12345',
email='darkhelmet@spaceball_one.org',
)
self.validator = EdxOAuth2Validator()
def test_authenticate_with_username(self):
user = self.validator._authenticate(username='darkhelmet', password='12345')
assert self.user == user
def test_authenticate_with_email(self):
user = self.validator._authenticate(username='darkhelmet@spaceball_one.org', password='12345')
assert self.user == user
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class CustomValidationTestCase(TestCase):
"""
Test custom user validation works.
In particular, inactive users should be able to validate.
"""
def setUp(self):
super(CustomValidationTestCase, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.user = User.objects.create_user(
username='darkhelmet',
password='12345',
email='darkhelmet@spaceball_one.org',
)
self.validator = EdxOAuth2Validator()
self.request_factory = RequestFactory()
def test_active_user_validates(self):
assert self.user.is_active
request = self.request_factory.get('/')
assert self.validator.validate_user('darkhelmet', '12345', client=None, request=request)
def test_inactive_user_validates(self):
self.user.is_active = False
self.user.save()
request = self.request_factory.get('/')
assert self.validator.validate_user('darkhelmet', '12345', client=None, request=request)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class CustomAuthorizationViewTestCase(TestCase):
"""
Test custom authorization view works.
In particular, users should not be re-prompted to approve
an application even if the access token is expired.
(This is a temporary override until Auth Scopes is implemented.)
"""
def setUp(self):
super(CustomAuthorizationViewTestCase, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.dot_adapter = adapters.DOTAdapter()
self.user = UserFactory()
self.client.login(username=self.user.username, password='test')
self.restricted_dot_app = self._create_restricted_app()
self._create_expired_token(self.restricted_dot_app)
def _create_restricted_app(self): # lint-amnesty, pylint: disable=missing-function-docstring
restricted_app = self.dot_adapter.create_confidential_client(
name='test restricted dot application',
user=self.user,
redirect_uri=DUMMY_REDIRECT_URL,
client_id='dot-restricted-app-client-id',
)
models.RestrictedApplication.objects.create(application=restricted_app)
return restricted_app
def _create_expired_token(self, application):
date_in_the_past = timezone.now() + datetime.timedelta(days=-100)
dot_models.AccessToken.objects.create(
user=self.user,
token='1234567890',
application=application,
expires=date_in_the_past,
scope='profile',
)
def _get_authorize(self, scope):
authorize_url = '/oauth2/authorize/'
return self.client.get(
authorize_url,
{
'client_id': self.restricted_dot_app.client_id,
'response_type': 'code',
'state': 'random_state_string',
'redirect_uri': DUMMY_REDIRECT_URL,
'scope': scope,
},
)
def test_no_reprompting(self):
response = self._get_authorize(scope='profile')
assert response.status_code == 302
assert response.url.startswith(DUMMY_REDIRECT_URL)
def test_prompting_with_new_scope(self):
response = self._get_authorize(scope='email')
assert response.status_code == 200
self.assertContains(response, settings.OAUTH2_PROVIDER['SCOPES']['email'])
self.assertNotContains(response, settings.OAUTH2_PROVIDER['SCOPES']['profile'])
|
saeki-masaki/glance
|
refs/heads/master
|
glance/tests/unit/v2/test_registry_client.py
|
7
|
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Glance Registry's client.
This tests are temporary and will be removed once
the registry's driver tests will be added.
"""
import copy
import datetime
import os
import uuid
from mock import patch
from oslo_utils import timeutils
from glance.common import config
from glance.common import exception
from glance import context
from glance.db.sqlalchemy import api as db_api
from glance import i18n
from glance.registry.api import v2 as rserver
import glance.registry.client.v2.api as rapi
from glance.registry.client.v2.api import client as rclient
from glance.tests.unit import base
from glance.tests import utils as test_utils
_ = i18n._
_gen_uuid = lambda: str(uuid.uuid4())
UUID1 = str(uuid.uuid4())
UUID2 = str(uuid.uuid4())
# NOTE(bcwaldon): needed to init config_dir cli opt
config.parse_args(args=[])
class TestRegistryV2Client(base.IsolatedUnitTest,
test_utils.RegistryAPIMixIn):
"""Test proper actions made against a registry service.
Test for both valid and invalid requests.
"""
# Registry server to user
# in the stub.
registry = rserver
def setUp(self):
"""Establish a clean test environment"""
super(TestRegistryV2Client, self).setUp()
db_api.get_engine()
self.context = context.RequestContext(is_admin=True)
uuid1_time = timeutils.utcnow()
uuid2_time = uuid1_time + datetime.timedelta(seconds=5)
self.FIXTURES = [
self.get_extra_fixture(
id=UUID1, name='fake image #1', is_public=False,
disk_format='ami', container_format='ami', size=13,
virtual_size=26, properties={'type': 'kernel'},
location="swift://user:passwd@acct/container/obj.tar.0",
created_at=uuid1_time),
self.get_extra_fixture(id=UUID2, name='fake image #2',
properties={}, size=19, virtual_size=38,
location="file:///tmp/glance-tests/2",
created_at=uuid2_time)]
self.destroy_fixtures()
self.create_fixtures()
self.client = rclient.RegistryClient("0.0.0.0")
def tearDown(self):
"""Clear the test environment"""
super(TestRegistryV2Client, self).tearDown()
self.destroy_fixtures()
def test_image_get_index(self):
"""Test correct set of public image returned"""
images = self.client.image_get_all()
self.assertEqual(2, len(images))
def test_create_image_with_null_min_disk_min_ram(self):
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf', min_disk=None,
min_ram=None)
db_api.image_create(self.context, extra_fixture)
image = self.client.image_get(image_id=UUID3)
self.assertEqual(0, image["min_ram"])
self.assertEqual(0, image["min_disk"])
def test_get_index_sort_name_asc(self):
"""Tests that the registry API returns list of public images.
Must be sorted alphabetically by name in ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key=['name'],
sort_dir=['asc'])
self.assertEqualImages(images, (UUID3, UUID1, UUID2, UUID4),
unjsonify=False)
def test_get_index_sort_status_desc(self):
"""Tests that the registry API returns list of public images.
Must be sorted alphabetically by status in descending order.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
status='queued')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz',
created_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key=['status'],
sort_dir=['desc'])
self.assertEqualImages(images, (UUID3, UUID4, UUID2, UUID1),
unjsonify=False)
def test_get_index_sort_disk_format_asc(self):
"""Tests that the registry API returns list of public images.
Must besorted alphabetically by disk_format in ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
disk_format='ami',
container_format='ami')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz',
disk_format='vdi')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key=['disk_format'],
sort_dir=['asc'])
self.assertEqualImages(images, (UUID1, UUID3, UUID4, UUID2),
unjsonify=False)
def test_get_index_sort_container_format_desc(self):
"""Tests that the registry API returns list of public images.
Must be sorted alphabetically by container_format in descending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
disk_format='ami',
container_format='ami')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz',
disk_format='iso',
container_format='bare')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key=['container_format'],
sort_dir=['desc'])
self.assertEqualImages(images, (UUID2, UUID4, UUID3, UUID1),
unjsonify=False)
def test_get_index_sort_size_asc(self):
"""Tests that the registry API returns list of public images.
Must be sorted by size in ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
disk_format='ami',
container_format='ami',
size=100, virtual_size=200)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='asdf',
disk_format='iso',
container_format='bare',
size=2, virtual_size=4)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key=['size'], sort_dir=['asc'])
self.assertEqualImages(images, (UUID4, UUID1, UUID2, UUID3),
unjsonify=False)
def test_get_index_sort_created_at_asc(self):
"""Tests that the registry API returns list of public images.
Must be sorted by created_at in ascending order.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, created_at=uuid3_time)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, created_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key=['created_at'],
sort_dir=['asc'])
self.assertEqualImages(images, (UUID1, UUID2, UUID4, UUID3),
unjsonify=False)
def test_get_index_sort_updated_at_desc(self):
"""Tests that the registry API returns list of public images.
Must be sorted by updated_at in descending order.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, created_at=None,
updated_at=uuid3_time)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, created_at=None,
updated_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key=['updated_at'],
sort_dir=['desc'])
self.assertEqualImages(images, (UUID3, UUID4, UUID2, UUID1),
unjsonify=False)
def test_get_image_details_sort_multiple_keys(self):
"""
Tests that a detailed call returns list of
public images sorted by name-size and
size-name in ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
size=19)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name=u'xyz',
size=20)
db_api.image_create(self.context, extra_fixture)
UUID5 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID5, name=u'asdf',
size=20)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key=['name', 'size'],
sort_dir=['asc'])
self.assertEqualImages(images, (UUID3, UUID5, UUID1, UUID2, UUID4),
unjsonify=False)
images = self.client.image_get_all(sort_key=['size', 'name'],
sort_dir=['asc'])
self.assertEqualImages(images, (UUID1, UUID3, UUID2, UUID5, UUID4),
unjsonify=False)
def test_get_image_details_sort_multiple_dirs(self):
"""
Tests that a detailed call returns list of
public images sorted by name-size and
size-name in ascending and descending orders.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
size=19)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz',
size=20)
db_api.image_create(self.context, extra_fixture)
UUID5 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID5, name='asdf',
size=20)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key=['name', 'size'],
sort_dir=['asc', 'desc'])
self.assertEqualImages(images, (UUID5, UUID3, UUID1, UUID2, UUID4),
unjsonify=False)
images = self.client.image_get_all(sort_key=['name', 'size'],
sort_dir=['desc', 'asc'])
self.assertEqualImages(images, (UUID4, UUID2, UUID1, UUID3, UUID5),
unjsonify=False)
images = self.client.image_get_all(sort_key=['size', 'name'],
sort_dir=['asc', 'desc'])
self.assertEqualImages(images, (UUID1, UUID2, UUID3, UUID4, UUID5),
unjsonify=False)
images = self.client.image_get_all(sort_key=['size', 'name'],
sort_dir=['desc', 'asc'])
self.assertEqualImages(images, (UUID5, UUID4, UUID3, UUID2, UUID1),
unjsonify=False)
def test_image_get_index_marker(self):
"""Test correct set of images returned with marker param."""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='new name! #123',
status='saving',
created_at=uuid3_time)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='new name! #125',
status='saving',
created_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(marker=UUID3)
self.assertEqualImages(images, (UUID4, UUID2, UUID1), unjsonify=False)
def test_image_get_index_limit(self):
"""Test correct number of images returned with limit param."""
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #123',
status='saving')
db_api.image_create(self.context, extra_fixture)
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #125',
status='saving')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(limit=2)
self.assertEqual(2, len(images))
def test_image_get_index_marker_limit(self):
"""Test correct set of images returned with marker/limit params."""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='new name! #123',
status='saving',
created_at=uuid3_time)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='new name! #125',
status='saving',
created_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(marker=UUID4, limit=1)
self.assertEqualImages(images, (UUID2,), unjsonify=False)
def test_image_get_index_limit_None(self):
"""Test correct set of images returned with limit param == None."""
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #123',
status='saving')
db_api.image_create(self.context, extra_fixture)
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #125',
status='saving')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(limit=None)
self.assertEqual(4, len(images))
def test_image_get_index_by_name(self):
"""Test correct set of public, name-filtered image returned.
This is just a sanity check, we test the details call more in-depth.
"""
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #123')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(filters={'name': 'new name! #123'})
self.assertEqual(1, len(images))
for image in images:
self.assertEqual('new name! #123', image['name'])
def test_image_get_is_public_v2(self):
"""Tests that a detailed call can be filtered by a property"""
extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving',
properties={'is_public': 'avalue'})
context = copy.copy(self.context)
db_api.image_create(context, extra_fixture)
filters = {'is_public': 'avalue'}
images = self.client.image_get_all(filters=filters)
self.assertEqual(1, len(images))
for image in images:
self.assertEqual('avalue', image['properties'][0]['value'])
def test_image_get(self):
"""Tests that the detailed info about an image returned"""
fixture = self.get_fixture(id=UUID1, name='fake image #1',
is_public=False, size=13, virtual_size=26,
disk_format='ami', container_format='ami')
data = self.client.image_get(image_id=UUID1)
for k, v in fixture.items():
el = data[k]
self.assertEqual(v, data[k],
"Failed v != data[k] where v = %(v)s and "
"k = %(k)s and data[k] = %(el)s" %
dict(v=v, k=k, el=el))
def test_image_get_non_existing(self):
"""Tests that NotFound is raised when getting a non-existing image"""
self.assertRaises(exception.NotFound,
self.client.image_get,
image_id=_gen_uuid())
def test_image_create_basic(self):
"""Tests that we can add image metadata and returns the new id"""
fixture = self.get_fixture()
new_image = self.client.image_create(values=fixture)
# Test all other attributes set
data = self.client.image_get(image_id=new_image['id'])
for k, v in fixture.items():
self.assertEqual(v, data[k])
# Test status was updated properly
self.assertIn('status', data)
self.assertEqual('active', data['status'])
def test_image_create_with_properties(self):
"""Tests that we can add image metadata with properties"""
fixture = self.get_fixture(location="file:///tmp/glance-tests/2",
properties={'distro': 'Ubuntu 10.04 LTS'})
new_image = self.client.image_create(values=fixture)
self.assertIn('properties', new_image)
self.assertEqual(new_image['properties'][0]['value'],
fixture['properties']['distro'])
del fixture['location']
del fixture['properties']
for k, v in fixture.items():
self.assertEqual(v, new_image[k])
# Test status was updated properly
self.assertIn('status', new_image.keys())
self.assertEqual('active', new_image['status'])
def test_image_create_already_exists(self):
"""Tests proper exception is raised if image with ID already exists"""
fixture = self.get_fixture(id=UUID2,
location="file:///tmp/glance-tests/2")
self.assertRaises(exception.Duplicate,
self.client.image_create,
values=fixture)
def test_image_create_with_bad_status(self):
"""Tests proper exception is raised if a bad status is set"""
fixture = self.get_fixture(status='bad status',
location="file:///tmp/glance-tests/2")
self.assertRaises(exception.Invalid,
self.client.image_create,
values=fixture)
def test_image_update(self):
"""Tests that the registry API updates the image"""
fixture = {'name': 'fake public image #2',
'disk_format': 'vmdk',
'status': 'saving'}
self.assertTrue(self.client.image_update(image_id=UUID2,
values=fixture))
# Test all other attributes set
data = self.client.image_get(image_id=UUID2)
for k, v in fixture.items():
self.assertEqual(v, data[k])
def test_image_update_conflict(self):
"""Tests that the registry API updates the image"""
next_state = 'saving'
fixture = {'name': 'fake public image #2',
'disk_format': 'vmdk',
'status': next_state}
image = self.client.image_get(image_id=UUID2)
current = image['status']
self.assertEqual('active', current)
# image is in 'active' state so this should cause a failure.
from_state = 'saving'
self.assertRaises(exception.Conflict, self.client.image_update,
image_id=UUID2, values=fixture,
from_state=from_state)
try:
self.client.image_update(image_id=UUID2, values=fixture,
from_state=from_state)
except exception.Conflict as exc:
msg = (_('cannot transition from %(current)s to '
'%(next)s in update (wanted '
'from_state=%(from)s)') %
{'current': current, 'next': next_state,
'from': from_state})
self.assertEqual(str(exc), msg)
def test_image_update_with_invalid_min_disk(self):
"""Tests that the registry API updates the image"""
next_state = 'saving'
fixture = {'name': 'fake image',
'disk_format': 'vmdk',
'min_disk': str(2 ** 31 + 1),
'status': next_state}
image = self.client.image_get(image_id=UUID2)
current = image['status']
self.assertEqual('active', current)
# image is in 'active' state so this should cause a failure.
from_state = 'saving'
self.assertRaises(exception.Invalid, self.client.image_update,
image_id=UUID2, values=fixture,
from_state=from_state)
def test_image_update_with_invalid_min_ram(self):
"""Tests that the registry API updates the image"""
next_state = 'saving'
fixture = {'name': 'fake image',
'disk_format': 'vmdk',
'min_ram': str(2 ** 31 + 1),
'status': next_state}
image = self.client.image_get(image_id=UUID2)
current = image['status']
self.assertEqual('active', current)
# image is in 'active' state so this should cause a failure.
from_state = 'saving'
self.assertRaises(exception.Invalid, self.client.image_update,
image_id=UUID2, values=fixture,
from_state=from_state)
def _test_image_update_not_existing(self):
"""Tests non existing image update doesn't work"""
fixture = self.get_fixture(status='bad status')
self.assertRaises(exception.NotFound,
self.client.image_update,
image_id=_gen_uuid(),
values=fixture)
def test_image_destroy(self):
"""Tests that image metadata is deleted properly"""
# Grab the original number of images
orig_num_images = len(self.client.image_get_all())
# Delete image #2
image = self.FIXTURES[1]
deleted_image = self.client.image_destroy(image_id=image['id'])
self.assertTrue(deleted_image)
self.assertEqual(image['id'], deleted_image['id'])
self.assertTrue(deleted_image['deleted'])
self.assertTrue(deleted_image['deleted_at'])
# Verify one less image
filters = {'deleted': False}
new_num_images = len(self.client.image_get_all(filters=filters))
self.assertEqual(new_num_images, orig_num_images - 1)
def test_image_destroy_not_existing(self):
"""Tests cannot delete non-existing image"""
self.assertRaises(exception.NotFound,
self.client.image_destroy,
image_id=_gen_uuid())
def test_image_get_members(self):
"""Tests getting image members"""
memb_list = self.client.image_member_find(image_id=UUID2)
num_members = len(memb_list)
self.assertEqual(0, num_members)
def test_image_get_members_not_existing(self):
"""Tests getting non-existent image members"""
self.assertRaises(exception.NotFound,
self.client.image_get_members,
image_id=_gen_uuid())
def test_image_member_find(self):
"""Tests getting member images"""
memb_list = self.client.image_member_find(member='pattieblack')
num_members = len(memb_list)
self.assertEqual(0, num_members)
def test_add_update_members(self):
"""Tests updating image members"""
values = dict(image_id=UUID2, member='pattieblack')
member = self.client.image_member_create(values=values)
self.assertTrue(member)
values['member'] = 'pattieblack2'
self.assertTrue(self.client.image_member_update(memb_id=member['id'],
values=values))
def test_add_delete_member(self):
"""Tests deleting image members"""
values = dict(image_id=UUID2, member='pattieblack')
member = self.client.image_member_create(values=values)
self.client.image_member_delete(memb_id=member['id'])
memb_list = self.client.image_member_find(member='pattieblack')
self.assertEqual(0, len(memb_list))
class TestRegistryV2ClientApi(base.IsolatedUnitTest):
"""Test proper actions made against a registry service.
Test for both valid and invalid requests.
"""
def setUp(self):
"""Establish a clean test environment"""
super(TestRegistryV2ClientApi, self).setUp()
reload(rapi)
def tearDown(self):
"""Clear the test environment"""
super(TestRegistryV2ClientApi, self).tearDown()
def test_configure_registry_client_not_using_use_user_token(self):
self.config(use_user_token=False)
with patch.object(rapi,
'configure_registry_admin_creds') as mock_rapi:
rapi.configure_registry_client()
mock_rapi.assert_called_once_with()
def _get_fake_config_creds(self, auth_url='auth_url', strategy='keystone'):
return {
'user': 'user',
'password': 'password',
'username': 'user',
'tenant': 'tenant',
'auth_url': auth_url,
'strategy': strategy,
'region': 'region'
}
def test_configure_registry_admin_creds(self):
expected = self._get_fake_config_creds(auth_url=None,
strategy='configured_strategy')
self.config(admin_user=expected['user'])
self.config(admin_password=expected['password'])
self.config(admin_tenant_name=expected['tenant'])
self.config(auth_strategy=expected['strategy'])
self.config(auth_region=expected['region'])
self.stubs.Set(os, 'getenv', lambda x: None)
self.assertIsNone(rapi._CLIENT_CREDS)
rapi.configure_registry_admin_creds()
self.assertEqual(expected, rapi._CLIENT_CREDS)
def test_configure_registry_admin_creds_with_auth_url(self):
expected = self._get_fake_config_creds()
self.config(admin_user=expected['user'])
self.config(admin_password=expected['password'])
self.config(admin_tenant_name=expected['tenant'])
self.config(auth_url=expected['auth_url'])
self.config(auth_strategy='test_strategy')
self.config(auth_region=expected['region'])
self.assertIsNone(rapi._CLIENT_CREDS)
rapi.configure_registry_admin_creds()
self.assertEqual(expected, rapi._CLIENT_CREDS)
|
alex/boto
|
refs/heads/develop
|
tests/unit/cloudsearch2/test_search.py
|
114
|
#!/usr/bin env python
from boto.cloudsearch2.domain import Domain
from boto.cloudsearch2.layer1 import CloudSearchConnection
from tests.compat import mock, unittest
from httpretty import HTTPretty
import json
from boto.cloudsearch2.search import SearchConnection, SearchServiceException
from boto.compat import six, map
from tests.unit import AWSMockServiceTestCase
from tests.unit.cloudsearch2 import DEMO_DOMAIN_DATA
from tests.unit.cloudsearch2.test_connection import TestCloudSearchCreateDomain
HOSTNAME = "search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com"
FULL_URL = 'http://%s/2013-01-01/search' % HOSTNAME
class CloudSearchSearchBaseTest(unittest.TestCase):
hits = [
{
'id': '12341',
'fields': {
'title': 'Document 1',
'rank': 1
}
},
{
'id': '12342',
'fields': {
'title': 'Document 2',
'rank': 2
}
},
{
'id': '12343',
'fields': {
'title': 'Document 3',
'rank': 3
}
},
{
'id': '12344',
'fields': {
'title': 'Document 4',
'rank': 4
}
},
{
'id': '12345',
'fields': {
'title': 'Document 5',
'rank': 5
}
},
{
'id': '12346',
'fields': {
'title': 'Document 6',
'rank': 6
}
},
{
'id': '12347',
'fields': {
'title': 'Document 7',
'rank': 7
}
},
]
content_type = "text/xml"
response_status = 200
def get_args(self, requestline):
(_, request, _) = requestline.split(b" ")
(_, request) = request.split(b"?", 1)
args = six.moves.urllib.parse.parse_qs(request)
return args
def setUp(self):
HTTPretty.enable()
body = self.response
if not isinstance(body, bytes):
body = json.dumps(body).encode('utf-8')
HTTPretty.register_uri(HTTPretty.GET, FULL_URL,
body=body,
content_type=self.content_type,
status=self.response_status)
def tearDown(self):
HTTPretty.disable()
class CloudSearchSearchTest(CloudSearchSearchBaseTest):
response = {
'rank': '-text_relevance',
'match-expr': "Test",
'hits': {
'found': 30,
'start': 0,
'hit': CloudSearchSearchBaseTest.hits
},
'status': {
'rid': 'b7c167f6c2da6d93531b9a7b314ad030b3a74803b4b7797edb905ba5a6a08',
'time-ms': 2,
'cpu-time-ms': 0
}
}
def test_cloudsearch_qsearch(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', options='TestOptions')
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'q'], [b"Test"])
self.assertEqual(args[b'q.options'], [b"TestOptions"])
self.assertEqual(args[b'start'], [b"0"])
self.assertEqual(args[b'size'], [b"10"])
def test_cloudsearch_search_details(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', size=50, start=20)
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'q'], [b"Test"])
self.assertEqual(args[b'size'], [b"50"])
self.assertEqual(args[b'start'], [b"20"])
def test_cloudsearch_facet_constraint_single(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(
q='Test',
facet={'author': "'John Smith','Mark Smith'"})
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'facet.author'],
[b"'John Smith','Mark Smith'"])
def test_cloudsearch_facet_constraint_multiple(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(
q='Test',
facet={'author': "'John Smith','Mark Smith'",
'category': "'News','Reviews'"})
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'facet.author'],
[b"'John Smith','Mark Smith'"])
self.assertEqual(args[b'facet.category'],
[b"'News','Reviews'"])
def test_cloudsearch_facet_sort_single(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', facet={'author': {'sort': 'alpha'}})
args = self.get_args(HTTPretty.last_request.raw_requestline)
print(args)
self.assertEqual(args[b'facet.author'], [b'{"sort": "alpha"}'])
def test_cloudsearch_facet_sort_multiple(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', facet={'author': {'sort': 'alpha'},
'cat': {'sort': 'count'}})
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'facet.author'], [b'{"sort": "alpha"}'])
self.assertEqual(args[b'facet.cat'], [b'{"sort": "count"}'])
def test_cloudsearch_result_fields_single(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', return_fields=['author'])
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'return'], [b'author'])
def test_cloudsearch_result_fields_multiple(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', return_fields=['author', 'title'])
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'return'], [b'author,title'])
def test_cloudsearch_results_meta(self):
"""Check returned metadata is parsed correctly"""
search = SearchConnection(endpoint=HOSTNAME)
results = search.search(q='Test')
# These rely on the default response which is fed into HTTPretty
self.assertEqual(results.hits, 30)
self.assertEqual(results.docs[0]['fields']['rank'], 1)
def test_cloudsearch_results_info(self):
"""Check num_pages_needed is calculated correctly"""
search = SearchConnection(endpoint=HOSTNAME)
results = search.search(q='Test')
# This relies on the default response which is fed into HTTPretty
self.assertEqual(results.num_pages_needed, 3.0)
def test_cloudsearch_results_matched(self):
"""
Check that information objects are passed back through the API
correctly.
"""
search = SearchConnection(endpoint=HOSTNAME)
query = search.build_query(q='Test')
results = search(query)
self.assertEqual(results.search_service, search)
self.assertEqual(results.query, query)
def test_cloudsearch_results_hits(self):
"""Check that documents are parsed properly from AWS"""
search = SearchConnection(endpoint=HOSTNAME)
results = search.search(q='Test')
hits = list(map(lambda x: x['id'], results.docs))
# This relies on the default response which is fed into HTTPretty
self.assertEqual(
hits, ["12341", "12342", "12343", "12344",
"12345", "12346", "12347"])
def test_cloudsearch_results_iterator(self):
"""Check the results iterator"""
search = SearchConnection(endpoint=HOSTNAME)
results = search.search(q='Test')
results_correct = iter(["12341", "12342", "12343", "12344",
"12345", "12346", "12347"])
for x in results:
self.assertEqual(x['id'], next(results_correct))
def test_cloudsearch_results_internal_consistancy(self):
"""Check the documents length matches the iterator details"""
search = SearchConnection(endpoint=HOSTNAME)
results = search.search(q='Test')
self.assertEqual(len(results), len(results.docs))
def test_cloudsearch_search_nextpage(self):
"""Check next page query is correct"""
search = SearchConnection(endpoint=HOSTNAME)
query1 = search.build_query(q='Test')
query2 = search.build_query(q='Test')
results = search(query2)
self.assertEqual(results.next_page().query.start,
query1.start + query1.size)
self.assertEqual(query1.q, query2.q)
class CloudSearchSearchFacetTest(CloudSearchSearchBaseTest):
response = {
'rank': '-text_relevance',
'match-expr': "Test",
'hits': {
'found': 30,
'start': 0,
'hit': CloudSearchSearchBaseTest.hits
},
'status': {
'rid': 'b7c167f6c2da6d93531b9a7b314ad030b3a74803b4b7797edb905ba5a6a08',
'time-ms': 2,
'cpu-time-ms': 0
},
'facets': {
'tags': {},
'animals': {'buckets': [{'count': '2', 'value': 'fish'}, {'count': '1', 'value': 'lions'}]},
}
}
def test_cloudsearch_search_facets(self):
#self.response['facets'] = {'tags': {}}
search = SearchConnection(endpoint=HOSTNAME)
results = search.search(q='Test', facet={'tags': {}})
self.assertTrue('tags' not in results.facets)
self.assertEqual(results.facets['animals'], {u'lions': u'1', u'fish': u'2'})
class CloudSearchNonJsonTest(CloudSearchSearchBaseTest):
response = b'<html><body><h1>500 Internal Server Error</h1></body></html>'
response_status = 500
content_type = 'text/xml'
def test_response(self):
search = SearchConnection(endpoint=HOSTNAME)
with self.assertRaises(SearchServiceException):
search.search(q='Test')
class CloudSearchUnauthorizedTest(CloudSearchSearchBaseTest):
response = b'<html><body><h1>403 Forbidden</h1>foo bar baz</body></html>'
response_status = 403
content_type = 'text/html'
def test_response(self):
search = SearchConnection(endpoint=HOSTNAME)
with self.assertRaisesRegexp(SearchServiceException, 'foo bar baz'):
search.search(q='Test')
class FakeResponse(object):
status_code = 405
content = b''
class CloudSearchConnectionTest(AWSMockServiceTestCase):
cloudsearch = True
connection_class = CloudSearchConnection
def setUp(self):
super(CloudSearchConnectionTest, self).setUp()
self.conn = SearchConnection(
endpoint='test-domain.cloudsearch.amazonaws.com'
)
def test_expose_additional_error_info(self):
mpo = mock.patch.object
fake = FakeResponse()
fake.content = b'Nopenopenope'
# First, in the case of a non-JSON, non-403 error.
with mpo(self.conn.session, 'get', return_value=fake) as mock_request:
with self.assertRaises(SearchServiceException) as cm:
self.conn.search(q='not_gonna_happen')
self.assertTrue('non-json response' in str(cm.exception))
self.assertTrue('Nopenopenope' in str(cm.exception))
# Then with JSON & an 'error' key within.
fake.content = json.dumps({
'error': "Something went wrong. Oops."
}).encode('utf-8')
with mpo(self.conn.session, 'get', return_value=fake) as mock_request:
with self.assertRaises(SearchServiceException) as cm:
self.conn.search(q='no_luck_here')
self.assertTrue('Unknown error' in str(cm.exception))
self.assertTrue('went wrong. Oops' in str(cm.exception))
def test_proxy(self):
conn = self.service_connection
conn.proxy = "127.0.0.1"
conn.proxy_user = "john.doe"
conn.proxy_pass="p4ssw0rd"
conn.proxy_port="8180"
conn.use_proxy = True
domain = Domain(conn, DEMO_DOMAIN_DATA)
search = SearchConnection(domain=domain)
self.assertEqual(search.session.proxies, {'http': 'http://john.doe:[email protected]:8180'})
|
satoshinm/NetCraft
|
refs/heads/master
|
server.py
|
1
|
#!/usr/bin/env python
#
import sys, time, socket, re
from math import floor
from world import World
import Queue
import SocketServer
import datetime
import random
import requests
import sqlite3
import threading
import traceback
DEFAULT_HOST = '0.0.0.0'
DEFAULT_PORT = 4080
DB_PATH = 'craft.db'
LOG_PATH = 'log.txt'
CHUNK_SIZE = 32
BUFFER_SIZE = 4096
COMMIT_INTERVAL = 5
AUTH_REQUIRED = True
AUTH_URL = 'https://craft.michaelfogleman.com/api/1/access'
DAY_LENGTH = 600
SPAWN_POINT = (0, 0, 0, 0, 0)
RATE_LIMIT = False
RECORD_HISTORY = False
INDESTRUCTIBLE_ITEMS = set([16])
ALLOWED_ITEMS = set([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
17, 18, 19, 20, 21, 22, 23,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63])
AUTHENTICATE = 'A'
BLOCK = 'B'
CHUNK = 'C'
DISCONNECT = 'D'
KEY = 'K'
LIGHT = 'L'
NICK = 'N'
POSITION = 'P'
REDRAW = 'R'
SIGN = 'S'
TALK = 'T'
TIME = 'E'
VERSION = 'V'
YOU = 'U'
try:
from config import *
except ImportError:
pass
def log(*args):
now = datetime.datetime.utcnow()
line = ' '.join(map(str, (now,) + args))
print line
with open(LOG_PATH, 'a') as fp:
fp.write('%s\n' % line)
def chunked(x):
return int(floor(round(x) / CHUNK_SIZE))
def packet(*args):
return '%s\n' % ','.join(map(str, args))
class RateLimiter(object):
def __init__(self, rate, per):
self.rate = float(rate)
self.per = float(per)
self.allowance = self.rate
self.last_check = time.time()
def tick(self):
if not RATE_LIMIT:
return False
now = time.time()
elapsed = now - self.last_check
self.last_check = now
self.allowance += elapsed * (self.rate / self.per)
if self.allowance > self.rate:
self.allowance = self.rate
if self.allowance < 1:
return True # too fast
else:
self.allowance -= 1
return False # okay
class Server(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
allow_reuse_address = True
daemon_threads = True
class Handler(SocketServer.BaseRequestHandler):
def setup(self):
self.position_limiter = RateLimiter(100, 5)
self.limiter = RateLimiter(1000, 10)
self.version = None
self.client_id = None
self.user_id = None
self.nick = None
self.queue = Queue.Queue()
self.running = True
self.start()
def handle(self):
model = self.server.model
model.enqueue(model.on_connect, self)
try:
buf = []
while True:
data = self.request.recv(BUFFER_SIZE)
if not data:
break
buf.extend(data.replace('\r\n', '\n'))
while '\n' in buf:
index = buf.index('\n')
line = ''.join(buf[:index])
buf = buf[index + 1:]
if not line:
continue
if line[0] == POSITION:
if self.position_limiter.tick():
log('RATE', self.client_id)
self.stop()
return
else:
if self.limiter.tick():
log('RATE', self.client_id)
self.stop()
return
model.enqueue(model.on_data, self, line)
finally:
model.enqueue(model.on_disconnect, self)
def finish(self):
self.running = False
def stop(self):
self.request.close()
def start(self):
thread = threading.Thread(target=self.run)
thread.setDaemon(True)
thread.start()
def run(self):
while self.running:
try:
buf = []
try:
buf.append(self.queue.get(timeout=5))
try:
while True:
buf.append(self.queue.get(False))
except Queue.Empty:
pass
except Queue.Empty:
continue
data = ''.join(buf)
self.request.sendall(data)
except Exception:
self.request.close()
raise
def send_raw(self, data):
if data:
self.queue.put(data)
def send(self, *args):
self.send_raw(packet(*args))
class Model(object):
def __init__(self, seed, lan_mode=False, force_hour=None, db_path=DB_PATH):
self.world = World(seed)
self.clients = []
self.lan_mode = lan_mode
self.db_path = db_path
if force_hour is not None:
# make the day progress VERY slowly, and start at indicated time
dl = 60000
force_hour %= 24
self.time_config = lambda: (force_hour*dl/24., dl)
else:
self.time_config = lambda: (time.time(), DAY_LENGTH)
self.queue = Queue.Queue()
self.commands = {
AUTHENTICATE: self.on_authenticate,
CHUNK: self.on_chunk,
BLOCK: self.on_block,
LIGHT: self.on_light,
POSITION: self.on_position,
TALK: self.on_talk,
SIGN: self.on_sign,
VERSION: self.on_version,
}
self.patterns = [
(re.compile(r'^/nick(?:\s+([^,\s]+))?$'), self.on_nick),
(re.compile(r'^/spawn$'), self.on_spawn),
(re.compile(r'^/goto(?:\s+(\S+))?$'), self.on_goto),
(re.compile(r'^/pq\s+(-?[0-9]+)\s*,?\s*(-?[0-9]+)$'), self.on_pq),
(re.compile(r'^/help(?:\s+(\S+))?$'), self.on_help),
(re.compile(r'^/list$'), self.on_list),
]
if self.lan_mode:
self.patterns.extend([
(re.compile(r'^/nick(?:\s+(\S+))?$'), self.on_set_nick),
])
def start(self):
thread = threading.Thread(target=self.run)
thread.setDaemon(True)
thread.start()
def run(self):
self.connection = sqlite3.connect(DB_PATH)
self.create_tables()
self.commit()
while True:
try:
if time.time() - self.last_commit > COMMIT_INTERVAL:
self.commit()
self.dequeue()
except Exception:
traceback.print_exc()
def enqueue(self, func, *args, **kwargs):
self.queue.put((func, args, kwargs))
def dequeue(self):
try:
func, args, kwargs = self.queue.get(timeout=5)
func(*args, **kwargs)
except Queue.Empty:
pass
def execute(self, *args, **kwargs):
return self.connection.execute(*args, **kwargs)
def commit(self):
self.last_commit = time.time()
self.connection.commit()
def create_tables(self):
queries = [
'create table if not exists block ('
' p int not null,'
' q int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' w int not null'
');',
'create unique index if not exists block_pqxyz_idx on '
' block (p, q, x, y, z);',
'create table if not exists light ('
' p int not null,'
' q int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' w int not null'
');',
'create unique index if not exists light_pqxyz_idx on '
' light (p, q, x, y, z);',
'create table if not exists sign ('
' p int not null,'
' q int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' face int not null,'
' text text not null'
');',
'create index if not exists sign_pq_idx on sign (p, q);',
'create unique index if not exists sign_xyzface_idx on '
' sign (x, y, z, face);',
'create table if not exists block_history ('
' timestamp real not null,'
' user_id int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' w int not null'
');',
]
for query in queries:
self.execute(query)
def get_default_block(self, x, y, z):
p, q = chunked(x), chunked(z)
chunk = self.world.get_chunk(p, q)
return chunk.get((x, y, z), 0)
def get_block(self, x, y, z):
query = (
'select w from block where '
'p = :p and q = :q and x = :x and y = :y and z = :z;'
)
p, q = chunked(x), chunked(z)
rows = list(self.execute(query, dict(p=p, q=q, x=x, y=y, z=z)))
if rows:
return rows[0][0]
return self.get_default_block(x, y, z)
def next_client_id(self):
result = 1
client_ids = set(x.client_id for x in self.clients)
while result in client_ids:
result += 1
return result
def on_connect(self, client):
client.client_id = self.next_client_id()
client.nick = 'guest%d' % client.client_id
log('CONN', client.client_id, *client.client_address)
client.position = SPAWN_POINT
self.clients.append(client)
client.send(YOU, client.client_id, *client.position)
client.send(TIME, *self.time_config())
client.send(TALK, 'Welcome to Craft!')
client.send(TALK, 'Type "/help" for a list of commands.')
self.send_position(client)
self.send_positions(client)
self.send_nick(client)
self.send_nicks(client)
def on_data(self, client, data):
#log('RECV', client.client_id, data)
args = data.split(',')
command, args = args[0], args[1:]
if command in self.commands:
func = self.commands[command]
func(client, *args)
def on_disconnect(self, client):
log('DISC', client.client_id, *client.client_address)
self.clients.remove(client)
self.send_disconnect(client)
self.send_talk('%s has disconnected from the server.' % client.nick)
def on_version(self, client, version):
if client.version is not None:
return
version = int(version)
if version != 1:
client.stop()
return
client.version = version
# TODO: client.start() here
def on_authenticate(self, client, username, access_token):
user_id = None
if self.lan_mode:
# Use the IP and port as a starting point.
ip, port = client.request.getpeername()
if not username:
try:
username = socket.gethostbyaddr(ip)[0] + ':%s' % port
except:
username = '%s:%s' % (ip, port)
client.nick = username
client.send(TALK, 'Welcome %s' % client.nick)
user_id = ((int(ip.replace('.',''))*65536) + port) % 2**30
client.user_id = user_id
else:
if username and access_token:
payload = {
'username': username,
'access_token': access_token,
}
response = requests.post(AUTH_URL, data=payload)
if response.status_code == 200 and response.text.isdigit():
user_id = int(response.text)
client.user_id = user_id
if user_id is None:
client.nick = 'guest%d' % client.client_id
client.send(TALK, 'Visit craft.michaelfogleman.com to register!')
else:
client.nick = username
self.send_nick(client)
# TODO: has left message if was already authenticated
self.send_talk('%s has joined the game.' % client.nick)
def on_chunk(self, client, p, q, key=0):
packets = []
p, q, key = map(int, (p, q, key))
query = (
'select rowid, x, y, z, w from block where '
'p = :p and q = :q and rowid > :key;'
)
rows = self.execute(query, dict(p=p, q=q, key=key))
max_rowid = 0
blocks = 0
for rowid, x, y, z, w in rows:
blocks += 1
packets.append(packet(BLOCK, p, q, x, y, z, w))
max_rowid = max(max_rowid, rowid)
query = (
'select x, y, z, w from light where '
'p = :p and q = :q;'
)
rows = self.execute(query, dict(p=p, q=q))
lights = 0
for x, y, z, w in rows:
lights += 1
packets.append(packet(LIGHT, p, q, x, y, z, w))
query = (
'select x, y, z, face, text from sign where '
'p = :p and q = :q;'
)
rows = self.execute(query, dict(p=p, q=q))
signs = 0
for x, y, z, face, text in rows:
signs += 1
packets.append(packet(SIGN, p, q, x, y, z, face, text))
if blocks:
packets.append(packet(KEY, p, q, max_rowid))
if blocks or lights or signs:
packets.append(packet(REDRAW, p, q))
packets.append(packet(CHUNK, p, q))
client.send_raw(''.join(packets))
def on_block(self, client, x, y, z, w):
x, y, z, w = map(int, (x, y, z, w))
p, q = chunked(x), chunked(z)
previous = self.get_block(x, y, z)
message = None
if AUTH_REQUIRED and client.user_id is None:
message = 'Only logged in users are allowed to build.'
elif y <= 0 or y > 255:
message = 'Invalid block coordinates.'
elif w not in ALLOWED_ITEMS:
message = 'That item is not allowed.'
elif w and previous:
message = 'Cannot create blocks in a non-empty space.'
elif not w and not previous:
message = 'That space is already empty.'
elif previous in INDESTRUCTIBLE_ITEMS:
message = 'Cannot destroy that type of block.'
if message is not None:
client.send(BLOCK, p, q, x, y, z, previous)
client.send(REDRAW, p, q)
client.send(TALK, message)
return
query = (
'insert into block_history (timestamp, user_id, x, y, z, w) '
'values (:timestamp, :user_id, :x, :y, :z, :w);'
)
if RECORD_HISTORY:
self.execute(query, dict(timestamp=time.time(),
user_id=client.user_id, x=x, y=y, z=z, w=w))
query = (
'insert or replace into block (p, q, x, y, z, w) '
'values (:p, :q, :x, :y, :z, :w);'
)
self.execute(query, dict(p=p, q=q, x=x, y=y, z=z, w=w))
self.send_block(client, p, q, x, y, z, w)
for dx in range(-1, 2):
for dz in range(-1, 2):
if dx == 0 and dz == 0:
continue
if dx and chunked(x + dx) == p:
continue
if dz and chunked(z + dz) == q:
continue
np, nq = p + dx, q + dz
self.execute(query, dict(p=np, q=nq, x=x, y=y, z=z, w=-w))
self.send_block(client, np, nq, x, y, z, -w)
if w == 0:
query = (
'delete from sign where '
'x = :x and y = :y and z = :z;'
)
self.execute(query, dict(x=x, y=y, z=z))
query = (
'update light set w = 0 where '
'x = :x and y = :y and z = :z;'
)
self.execute(query, dict(x=x, y=y, z=z))
def on_light(self, client, x, y, z, w):
x, y, z, w = map(int, (x, y, z, w))
p, q = chunked(x), chunked(z)
block = self.get_block(x, y, z)
message = None
if AUTH_REQUIRED and client.user_id is None:
message = 'Only logged in users are allowed to build.'
elif block == 0:
message = 'Lights must be placed on a block.'
elif w < 0 or w > 15:
message = 'Invalid light value.'
if message is not None:
# TODO: client.send(LIGHT, p, q, x, y, z, previous)
client.send(REDRAW, p, q)
client.send(TALK, message)
return
query = (
'insert or replace into light (p, q, x, y, z, w) '
'values (:p, :q, :x, :y, :z, :w);'
)
self.execute(query, dict(p=p, q=q, x=x, y=y, z=z, w=w))
self.send_light(client, p, q, x, y, z, w)
def on_sign(self, client, x, y, z, face, *args):
if AUTH_REQUIRED and client.user_id is None:
client.send(TALK, 'Only logged in users are allowed to build.')
return
text = ','.join(args)
x, y, z, face = map(int, (x, y, z, face))
if y <= 0 or y > 255:
return
if face < 0 or face > 7:
return
if len(text) > 48:
return
p, q = chunked(x), chunked(z)
if text:
query = (
'insert or replace into sign (p, q, x, y, z, face, text) '
'values (:p, :q, :x, :y, :z, :face, :text);'
)
self.execute(query,
dict(p=p, q=q, x=x, y=y, z=z, face=face, text=text))
else:
query = (
'delete from sign where '
'x = :x and y = :y and z = :z and face = :face;'
)
self.execute(query, dict(x=x, y=y, z=z, face=face))
self.send_sign(client, p, q, x, y, z, face, text)
def on_position(self, client, x, y, z, rx, ry):
x, y, z, rx, ry = map(float, (x, y, z, rx, ry))
client.position = (x, y, z, rx, ry)
self.send_position(client)
def on_talk(self, client, *args):
text = ','.join(args)
if text.startswith('/'):
for pattern, func in self.patterns:
match = pattern.match(text)
if match:
func(client, *match.groups())
break
else:
client.send(TALK, 'Unrecognized command: "%s"' % text)
elif text.startswith('@'):
nick = text[1:].split(' ', 1)[0]
for other in self.clients:
if other.nick == nick:
client.send(TALK, '%s> %s' % (client.nick, text))
other.send(TALK, '%s> %s' % (client.nick, text))
break
else:
client.send(TALK, 'Unrecognized nick: "%s"' % nick)
else:
self.send_talk('%s> %s' % (client.nick, text))
def on_nick(self, client, nick=None):
if AUTH_REQUIRED:
client.send(TALK, 'You cannot change your nick on this server.')
return
if nick is None:
client.send(TALK, 'Your nickname is %s' % client.nick)
else:
self.send_talk('%s is now known as %s' % (client.nick, nick))
client.nick = nick
self.send_nick(client)
def on_spawn(self, client):
client.position = SPAWN_POINT
client.send(YOU, client.client_id, *client.position)
self.send_position(client)
def on_set_nick(self, client, new_nick=None):
if new_nick is None:
return self.on_help(client, 'nick')
NICK_RE = r'[a-z0-9A-Z_]{3,32}'
if not re.match(NICK_RE, new_nick):
client.send(TALK, 'Nicknames are 3 to 32 chars: A-Z a-Z or _')
return
if new_nick.lower() in [i.nick.lower() for i in self.clients
if client != i]:
client.send(TALK, 'That nickname is taken already.')
return
old_nick = client.nick
client.nick = new_nick
self.send_nick(client)
self.send_nicks(client)
for other in self.clients:
other.send(TALK, '%s is now nick-named: %s' % (old_nick, new_nick))
def on_goto(self, client, nick=None):
if nick is None:
clients = [x for x in self.clients if x != client]
other = random.choice(clients) if clients else None
else:
nicks = dict((client.nick, client) for client in self.clients)
other = nicks.get(nick)
if other:
client.position = other.position
client.send(YOU, client.client_id, *client.position)
self.send_position(client)
def on_pq(self, client, p, q):
p, q = map(int, (p, q))
if abs(p) > 1000 or abs(q) > 1000:
return
client.position = (p * CHUNK_SIZE, 0, q * CHUNK_SIZE, 0, 0)
client.send(YOU, client.client_id, *client.position)
self.send_position(client)
def on_help(self, client, topic=None):
if topic is None:
client.send(TALK, 'Type "t" to chat. Type "/" to type commands:')
client.send(TALK, '/goto [NAME], /help [TOPIC], /list, /login NAME, /logout, /nick')
client.send(TALK, '/offline [FILE], /online HOST [PORT], /pq P Q, /spawn, /view N')
if self.lan_mode:
client.send(TALK, '/nick NEW_NICKNAME')
return
topic = topic.lower().strip()
if topic == 'goto':
client.send(TALK, 'Help: /goto [NAME]')
client.send(TALK, 'Teleport to another user.')
client.send(TALK, 'If NAME is unspecified, a random user is chosen.')
elif topic == 'list':
client.send(TALK, 'Help: /list')
client.send(TALK, 'Display a list of connected users.')
elif topic == 'login':
client.send(TALK, 'Help: /login NAME')
client.send(TALK, 'Switch to another registered username.')
client.send(TALK, 'The login server will be re-contacted. The username is case-sensitive.')
elif topic == 'logout':
client.send(TALK, 'Help: /logout')
client.send(TALK, 'Unauthenticate and become a guest user.')
client.send(TALK, 'Automatic logins will not occur again until the /login command is re-issued.')
elif topic == 'offline':
client.send(TALK, 'Help: /offline [FILE]')
client.send(TALK, 'Switch to offline mode.')
client.send(TALK, 'FILE specifies the save file to use and defaults to "craft".')
elif topic == 'online':
client.send(TALK, 'Help: /online HOST [PORT]')
client.send(TALK, 'Connect to the specified server.')
elif topic == 'nick':
client.send(TALK, 'Help: /nick [NICK]')
client.send(TALK, 'Get or set your nickname.')
elif topic == 'pq':
client.send(TALK, 'Help: /pq P Q')
client.send(TALK, 'Teleport to the specified chunk.')
elif topic == 'spawn':
client.send(TALK, 'Help: /spawn')
client.send(TALK, 'Teleport back to the spawn point.')
elif topic == 'view':
client.send(TALK, 'Help: /view N')
client.send(TALK, 'Set viewing distance, 1 - 24.')
elif self.lan_mode:
if topic == 'nick':
client.send(TALK, 'Help: /nick NEW_NICKNAME')
client.send(TALK, 'Set your nickname.')
def on_list(self, client):
client.send(TALK,
'Players: %s' % ', '.join(x.nick for x in self.clients))
def send_positions(self, client):
for other in self.clients:
if other == client:
continue
client.send(POSITION, other.client_id, *other.position)
def send_position(self, client):
for other in self.clients:
if other == client:
continue
other.send(POSITION, client.client_id, *client.position)
def send_nicks(self, client):
for other in self.clients:
if other == client:
continue
client.send(NICK, other.client_id, other.nick)
def send_nick(self, client):
for other in self.clients:
other.send(NICK, client.client_id, client.nick)
def send_disconnect(self, client):
for other in self.clients:
if other == client:
continue
other.send(DISCONNECT, client.client_id)
def send_block(self, client, p, q, x, y, z, w):
log("Send block: %s" %
','.join([str(i) for i in (p, q, x, y, z, w)]))
for other in self.clients:
if other == client:
continue
other.send(BLOCK, p, q, x, y, z, w)
other.send(REDRAW, p, q)
def send_light(self, client, p, q, x, y, z, w):
for other in self.clients:
if other == client:
continue
other.send(LIGHT, p, q, x, y, z, w)
other.send(REDRAW, p, q)
def send_sign(self, client, p, q, x, y, z, face, text):
for other in self.clients:
if other == client:
continue
other.send(SIGN, p, q, x, y, z, face, text)
def send_talk(self, text):
log(text)
for client in self.clients:
client.send(TALK, text)
def cleanup():
world = World(None)
conn = sqlite3.connect(DB_PATH)
query = 'select x, y, z from block order by rowid desc limit 1;'
last = list(conn.execute(query))[0]
query = 'select distinct p, q from block;'
chunks = list(conn.execute(query))
count = 0
total = 0
delete_query = 'delete from block where x = %d and y = %d and z = %d;'
print 'begin;'
for p, q in chunks:
chunk = world.create_chunk(p, q)
query = 'select x, y, z, w from block where p = :p and q = :q;'
rows = conn.execute(query, {'p': p, 'q': q})
for x, y, z, w in rows:
if chunked(x) != p or chunked(z) != q:
continue
total += 1
if (x, y, z) == last:
continue
original = chunk.get((x, y, z), 0)
if w == original or original in INDESTRUCTIBLE_ITEMS:
count += 1
print delete_query % (x, y, z)
conn.close()
print 'commit;'
print >> sys.stderr, '%d of %d blocks will be cleaned up' % (count, total)
def main():
import argparse
parser = argparse.ArgumentParser(description='Craft multi-user server')
parser.add_argument('--cleanup', action='store_true',
help="Wipe all existing world data")
parser.add_argument('--seed', default=None, type=int)
parser.add_argument('--host', default=DEFAULT_HOST)
parser.add_argument('--db', default=DB_PATH)
parser.add_argument('--port', '-p', default=DEFAULT_PORT, type=int,
help="Port to bind to. Default: %s" % DEFAULT_PORT)
parser.add_argument('--hour', default=None, type=int,
help="Make it the same time of day all the time (hour: 0...23).")
parser.add_argument('--lan', action='store_true',
help="No passwords, just hostname/ip. Lan use only!")
args = parser.parse_args()
host, port = args.host, args.port
if args.cleanup:
cleanup()
return
log('SERV', host, port)
model = Model(args.seed, lan_mode=args.lan, force_hour=args.hour, db_path=args.db)
model.start()
server = Server((host, port), Handler)
server.model = model
try:
server.serve_forever()
except KeyboardInterrupt:
log("Stopped")
if __name__ == '__main__':
sys.exit(main() or 0)
|
czpython/aldryn-newsblog
|
refs/heads/master
|
aldryn_newsblog/migrations/0001_initial.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import taggit.managers
import aldryn_categories.fields
import aldryn_newsblog.models
import filer.fields.image
from django.conf import settings
import sortedm2m.fields
import django.utils.timezone
import djangocms_text_ckeditor.fields
import cms.models.fields
import app_data.fields
import aldryn_apphooks_config.fields
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('taggit', '0001_initial'),
('cms', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('aldryn_people', '0001_initial'),
('filer', '0001_initial'),
('aldryn_categories', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('publishing_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='publishing date')),
('is_published', models.BooleanField(default=True, db_index=True, verbose_name='is published')),
('is_featured', models.BooleanField(default=False, db_index=True, verbose_name='is featured')),
],
options={
'ordering': ['-publishing_date'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ArticleTranslation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('language_code', models.CharField(max_length=15, verbose_name='Language', db_index=True)),
('title', models.CharField(max_length=234, verbose_name='title')),
('slug', models.SlugField(help_text='Used in the URL. If changed, the URL will change. Clear it to have it re-created automatically.', max_length=255, verbose_name='slug', blank=True)),
('lead_in', djangocms_text_ckeditor.fields.HTMLField(default='', help_text='Will be displayed in lists, and at the start of the detail page (in bold)', verbose_name='Optional lead-in', blank=True)),
('meta_title', models.CharField(default='', max_length=255, verbose_name='meta title', blank=True)),
('meta_description', models.TextField(default='', verbose_name='meta description', blank=True)),
('meta_keywords', models.TextField(default='', verbose_name='meta keywords', blank=True)),
('search_data', models.TextField(editable=False, blank=True)),
('master', models.ForeignKey(related_name='translations', editable=False, to='aldryn_newsblog.Article', null=True)),
],
options={
'managed': True,
'db_table': 'aldryn_newsblog_article_translation',
'db_tablespace': '',
'default_permissions': (),
'verbose_name': 'article Translation',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='NewsBlogArchivePlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, related_name='+', primary_key=True, serialize=False, to='cms.CMSPlugin')),
],
options={
'abstract': False,
},
bases=(aldryn_newsblog.models.PluginEditModeMixin, 'cms.cmsplugin'),
),
migrations.CreateModel(
name='NewsBlogArticleSearchPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, related_name='+', primary_key=True, serialize=False, to='cms.CMSPlugin')),
('max_articles', models.PositiveIntegerField(default=10, help_text='The maximum number of found articles display.', verbose_name='max articles', validators=[django.core.validators.MinValueValidator(1)])),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='NewsBlogAuthorsPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, related_name='+', primary_key=True, serialize=False, to='cms.CMSPlugin')),
],
options={
'abstract': False,
},
bases=(aldryn_newsblog.models.PluginEditModeMixin, 'cms.cmsplugin'),
),
migrations.CreateModel(
name='NewsBlogCategoriesPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, related_name='+', primary_key=True, serialize=False, to='cms.CMSPlugin')),
],
options={
'abstract': False,
},
bases=(aldryn_newsblog.models.PluginEditModeMixin, 'cms.cmsplugin'),
),
migrations.CreateModel(
name='NewsBlogConfig',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('type', models.CharField(max_length=100, verbose_name='type')),
('namespace', models.CharField(default=None, unique=True, max_length=100, verbose_name='instance namespace')),
('app_data', app_data.fields.AppDataField(default=dict, editable=False)),
('permalink_type', models.CharField(default='slug', help_text='Choose the style of urls to use from the examples. (Note, all types are relative to apphook)', max_length=8, verbose_name='permalink type', choices=[('s', 'the-eagle-has-landed/'), ('ys', '1969/the-eagle-has-landed/'), ('yms', '1969/07/the-eagle-has-landed/'), ('ymds', '1969/07/16/the-eagle-has-landed/'), ('ymdi', '1969/07/16/11/')])),
('non_permalink_handling', models.SmallIntegerField(default=302, help_text='How to handle non-permalink urls?', verbose_name='non-permalink handling', choices=[(200, 'Allow'), (302, 'Redirect to permalink (default)'), (301, 'Permanent redirect to permalink'), (404, 'Return 404: Not Found')])),
('paginate_by', models.PositiveIntegerField(default=5, help_text='When paginating list views, how many articles per page?', verbose_name='Paginate size')),
('create_authors', models.BooleanField(default=True, help_text='Automatically create authors from logged-in user?', verbose_name='Auto-create authors?')),
('search_indexed', models.BooleanField(default=True, help_text='Include articles in search indexes?', verbose_name='Include in search index?')),
('placeholder_base_sidebar', cms.models.fields.PlaceholderField(related_name='aldryn_newsblog_base_sidebar', slotname='newsblog_base_sidebar', editable=False, to='cms.Placeholder', null=True)),
('placeholder_base_top', cms.models.fields.PlaceholderField(related_name='aldryn_newsblog_base_top', slotname='newsblog_base_top', editable=False, to='cms.Placeholder', null=True)),
('placeholder_detail_bottom', cms.models.fields.PlaceholderField(related_name='aldryn_newsblog_detail_bottom', slotname='newsblog_detail_bottom', editable=False, to='cms.Placeholder', null=True)),
('placeholder_detail_footer', cms.models.fields.PlaceholderField(related_name='aldryn_newsblog_detail_footer', slotname='newsblog_detail_footer', editable=False, to='cms.Placeholder', null=True)),
('placeholder_detail_top', cms.models.fields.PlaceholderField(related_name='aldryn_newsblog_detail_top', slotname='newsblog_detail_top', editable=False, to='cms.Placeholder', null=True)),
('placeholder_list_footer', cms.models.fields.PlaceholderField(related_name='aldryn_newsblog_list_footer', slotname='newsblog_list_footer', editable=False, to='cms.Placeholder', null=True)),
('placeholder_list_top', cms.models.fields.PlaceholderField(related_name='aldryn_newsblog_list_top', slotname='newsblog_list_top', editable=False, to='cms.Placeholder', null=True)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='NewsBlogConfigTranslation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('language_code', models.CharField(max_length=15, verbose_name='Language', db_index=True)),
('app_title', models.CharField(max_length=234, verbose_name='application title')),
('master', models.ForeignKey(related_name='translations', editable=False, to='aldryn_newsblog.NewsBlogConfig', null=True)),
],
options={
'managed': True,
'db_table': 'aldryn_newsblog_newsblogconfig_translation',
'db_tablespace': '',
'default_permissions': (),
'verbose_name': 'news blog config Translation',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='NewsBlogFeaturedArticlesPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, related_name='+', primary_key=True, serialize=False, to='cms.CMSPlugin')),
('article_count', models.PositiveIntegerField(default=1, help_text='The maximum number of featured articles display.', validators=[django.core.validators.MinValueValidator(1)])),
('app_config', models.ForeignKey(to='aldryn_newsblog.NewsBlogConfig')),
],
options={
'abstract': False,
},
bases=(aldryn_newsblog.models.PluginEditModeMixin, 'cms.cmsplugin'),
),
migrations.CreateModel(
name='NewsBlogLatestArticlesPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, related_name='+', primary_key=True, serialize=False, to='cms.CMSPlugin')),
('latest_articles', models.IntegerField(default=5, help_text='The maximum number of latest articles to display.')),
('app_config', models.ForeignKey(to='aldryn_newsblog.NewsBlogConfig')),
],
options={
'abstract': False,
},
bases=(aldryn_newsblog.models.PluginEditModeMixin, 'cms.cmsplugin'),
),
migrations.CreateModel(
name='NewsBlogRelatedPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, related_name='+', primary_key=True, serialize=False, to='cms.CMSPlugin')),
],
options={
'abstract': False,
},
bases=(aldryn_newsblog.models.PluginEditModeMixin, 'cms.cmsplugin'),
),
migrations.CreateModel(
name='NewsBlogTagsPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, related_name='+', primary_key=True, serialize=False, to='cms.CMSPlugin')),
('app_config', models.ForeignKey(to='aldryn_newsblog.NewsBlogConfig')),
],
options={
'abstract': False,
},
bases=(aldryn_newsblog.models.PluginEditModeMixin, 'cms.cmsplugin'),
),
migrations.AlterUniqueTogether(
name='newsblogconfigtranslation',
unique_together=set([('language_code', 'master')]),
),
migrations.AddField(
model_name='newsblogcategoriesplugin',
name='app_config',
field=models.ForeignKey(to='aldryn_newsblog.NewsBlogConfig'),
preserve_default=True,
),
migrations.AddField(
model_name='newsblogauthorsplugin',
name='app_config',
field=models.ForeignKey(to='aldryn_newsblog.NewsBlogConfig'),
preserve_default=True,
),
migrations.AddField(
model_name='newsblogarticlesearchplugin',
name='app_config',
field=models.ForeignKey(to='aldryn_newsblog.NewsBlogConfig'),
preserve_default=True,
),
migrations.AddField(
model_name='newsblogarchiveplugin',
name='app_config',
field=models.ForeignKey(to='aldryn_newsblog.NewsBlogConfig'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='articletranslation',
unique_together=set([('language_code', 'master'), ('language_code', 'slug')]),
),
migrations.AddField(
model_name='article',
name='app_config',
field=aldryn_apphooks_config.fields.AppHookConfigField(verbose_name='app. config', to='aldryn_newsblog.NewsBlogConfig', help_text='When selecting a value, the form is reloaded to get the updated default'),
preserve_default=True,
),
migrations.AddField(
model_name='article',
name='author',
field=models.ForeignKey(verbose_name='author', blank=True, to='aldryn_people.Person', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='article',
name='categories',
field=aldryn_categories.fields.CategoryManyToManyField(to='aldryn_categories.Category', verbose_name='categories', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='article',
name='content',
field=cms.models.fields.PlaceholderField(related_name='newsblog_article_content', slotname='newsblog_article_content', editable=False, to='cms.Placeholder', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='article',
name='featured_image',
field=filer.fields.image.FilerImageField(blank=True, to='filer.Image', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='article',
name='owner',
field=models.ForeignKey(verbose_name='owner', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='article',
name='related',
field=sortedm2m.fields.SortedManyToManyField(help_text=None, related_name='related_rel_+', verbose_name='related articles', to='aldryn_newsblog.Article', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='article',
name='tags',
field=taggit.managers.TaggableManager(to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'),
preserve_default=True,
),
]
|
wang1352083/pythontool
|
refs/heads/master
|
python-2.7.12-lib/user.py
|
313
|
"""Hook to allow user-specified customization code to run.
As a policy, Python doesn't run user-specified code on startup of
Python programs (interactive sessions execute the script specified in
the PYTHONSTARTUP environment variable if it exists).
However, some programs or sites may find it convenient to allow users
to have a standard customization file, which gets run when a program
requests it. This module implements such a mechanism. A program
that wishes to use the mechanism must execute the statement
import user
The user module looks for a file .pythonrc.py in the user's home
directory and if it can be opened, execfile()s it in its own global
namespace. Errors during this phase are not caught; that's up to the
program that imports the user module, if it wishes.
The user's .pythonrc.py could conceivably test for sys.version if it
wishes to do different things depending on the Python version.
"""
from warnings import warnpy3k
warnpy3k("the user module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import os
home = os.curdir # Default
if 'HOME' in os.environ:
home = os.environ['HOME']
elif os.name == 'posix':
home = os.path.expanduser("~/")
elif os.name == 'nt': # Contributed by Jeff Bauer
if 'HOMEPATH' in os.environ:
if 'HOMEDRIVE' in os.environ:
home = os.environ['HOMEDRIVE'] + os.environ['HOMEPATH']
else:
home = os.environ['HOMEPATH']
pythonrc = os.path.join(home, ".pythonrc.py")
try:
f = open(pythonrc)
except IOError:
pass
else:
f.close()
execfile(pythonrc)
|
glenn-edgar/local_controller_3
|
refs/heads/master
|
__backup__/flask_web/werkzeug-master/docs/makearchive.py
|
50
|
import os
import conf
name = "werkzeug-docs-" + conf.version
os.chdir("_build")
os.rename("html", name)
os.system("tar czf %s.tar.gz %s" % (name, name))
os.rename(name, "html")
|
nicobustillos/odoo
|
refs/heads/8.0
|
addons/website_google_map/controllers/main.py
|
161
|
# -*- coding: utf-8 -*-
import json
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
class google_map(http.Controller):
'''
This class generates on-the-fly partner maps that can be reused in every
website page. To do so, just use an ``<iframe ...>`` whose ``src``
attribute points to ``/google_map`` (this controller generates a complete
HTML5 page).
URL query parameters:
- ``partner_ids``: a comma-separated list of ids (partners to be shown)
- ``partner_url``: the base-url to display the partner
(eg: if ``partner_url`` is ``/partners/``, when the user will click on
a partner on the map, it will be redirected to <myodoo>.com/partners/<id>)
In order to resize the map, simply resize the ``iframe`` with CSS
directives ``width`` and ``height``.
'''
@http.route(['/google_map'], type='http', auth="public", website=True)
def google_map(self, *arg, **post):
cr, uid, context = request.cr, request.uid, request.context
partner_obj = request.registry['res.partner']
# filter real ints from query parameters and build a domain
clean_ids = []
for s in post.get('partner_ids', "").split(","):
try:
i = int(s)
clean_ids.append(i)
except ValueError:
pass
# search for partners that can be displayed on a map
domain = [("id", "in", clean_ids), ('website_published', '=', True), ('is_company', '=', True)]
partners_ids = partner_obj.search(cr, SUPERUSER_ID, domain, context=context)
# browse and format data
partner_data = {
"counter": len(partners_ids),
"partners": []
}
request.context.update({'show_address': True})
for partner in partner_obj.browse(cr, SUPERUSER_ID, partners_ids, context=context):
partner_data["partners"].append({
'id': partner.id,
'name': partner.name,
'address': '\n'.join(partner.name_get()[0][1].split('\n')[1:]),
'latitude': partner.partner_latitude,
'longitude': partner.partner_longitude,
})
# generate the map
values = {
'partner_url': post.get('partner_url'),
'partner_data': json.dumps(partner_data)
}
return request.website.render("website_google_map.google_map", values)
|
orekyuu/intellij-community
|
refs/heads/master
|
python/testData/resolve/multiFile/moduleValueCollision/boo.py
|
83
|
BOO = "only kidding"
|
IsaacYangSLA/nuxeo-drive
|
refs/heads/master
|
nuxeo-drive-client/nxdrive/tests/test_security_updates.py
|
1
|
import time
import sys
from nxdrive.tests.common_unit_test import UnitTestCase
from nose.plugins.skip import SkipTest
class TestSecurityUpdates(UnitTestCase):
def test_synchronize_denying_read_access(self):
if sys.platform != 'win32':
raise SkipTest("WIP in https://jira.nuxeo.com/browse/NXDRIVE-170")
"""Test that denying Read access server side is impacted client side
Use cases:
- Deny Read access on a regular folder
=> Folder should be locally deleted
- Grant Read access back
=> Folder should be locally re-created
- Deny Read access on a synchronization root
=> Synchronization root should be locally deleted
- Grant Read access back
=> Synchronization root should be locally re-created
See TestIntegrationRemoteDeletion.test_synchronize_remote_deletion
as the same uses cases are tested
"""
# Bind the server and root workspace
self.engine_1.start()
# Get local and remote clients
local = self.local_client_1
remote = self.remote_document_client_1
# Create documents in the remote root workspace
# then synchronize
remote.make_folder('/', 'Test folder')
remote.make_file('/Test folder', 'joe.txt', 'Some content')
self.wait_sync()
self.assertTrue(local.exists('/Test folder'))
self.assertTrue(local.exists('/Test folder/joe.txt'))
# Remove Read permission for test user on a regular folder
# then synchronize
self._set_read_permission("nuxeoDriveTestUser_user_1",
self.TEST_WORKSPACE_PATH + '/Test folder',
False)
self.wait_sync()
self.assertFalse(local.exists('/Test folder'))
# Add Read permission back for test user then synchronize
self._set_read_permission("nuxeoDriveTestUser_user_1",
self.TEST_WORKSPACE_PATH + '/Test folder',
True)
self.wait_sync()
self.assertTrue(local.exists('/Test folder'))
self.assertTrue(local.exists('/Test folder/joe.txt'))
# Remove Read permission for test user on a sync root
# then synchronize
self._set_read_permission("nuxeoDriveTestUser_user_1",
self.TEST_WORKSPACE_PATH,
False)
self.wait_sync()
self.assertFalse(local.exists('/'))
# Add Read permission back for test user then synchronize
self._set_read_permission("nuxeoDriveTestUser_user_1",
self.TEST_WORKSPACE_PATH,
True)
self.wait_sync()
self.assertTrue(local.exists('/'))
self.assertTrue(local.exists('/Test folder'))
self.assertTrue(local.exists('/Test folder/joe.txt'))
def test_synchronize_denying_read_access_local_modification(self):
# TO_REVIEW
raise SkipTest("WIP in https://jira.nuxeo.com/browse/NXDRIVE-170")
"""Test denying Read access with concurrent local modification
Use cases:
- Deny Read access on a regular folder and make some
local and remote changes concurrently.
=> Only locally modified content should be kept
and should be marked as 'unsynchronized',
other content should be deleted.
Remote changes should not be impacted client side.
Local changes should not be impacted server side.
- Grant Read access back.
=> Remote documents should be merged with
locally modified content which should be unmarked
as 'unsynchronized' and therefore synchronized upstream.
See TestIntegrationRemoteDeletion
.test_synchronize_remote_deletion_local_modification
as the same uses cases are tested.
Note that we use the .odt extension for test files to make sure
that they are created as File and not Note documents on the server
when synchronized upstream, as the current implementation of
RemoteDocumentClient is File oriented.
"""
# Bind the server and root workspace
self.engine_1.start()
# Get local and remote clients
local = self.local_client_1
remote = self.remote_document_client_1
root_remote = self.root_remote_client
# Create documents in the remote root workspace
# then synchronize
remote.make_folder('/', 'Test folder')
remote.make_file('/Test folder', 'joe.odt', 'Some content')
remote.make_file('/Test folder', 'jack.odt', 'Some content')
remote.make_folder('/Test folder', 'Sub folder 1')
remote.make_file('/Test folder/Sub folder 1', 'sub file 1.txt',
'Content')
self.wait_sync()
self.assertTrue(local.exists('/Test folder'))
self.assertTrue(local.exists('/Test folder/joe.odt'))
self.assertTrue(local.exists('/Test folder/jack.odt'))
self.assertTrue(local.exists('/Test folder/Sub folder 1'))
self.assertTrue(local.exists(
'/Test folder/Sub folder 1/sub file 1.txt'))
# Remove Read permission for test user on a regular folder
# and make some local and remote changes concurrently then synchronize
test_folder_path = self.TEST_WORKSPACE_PATH + '/Test folder'
self._set_read_permission("nuxeoDriveTestUser_user_1",
test_folder_path, False)
# Local changes
time.sleep(self.OS_STAT_MTIME_RESOLUTION)
# Create new file
local.make_file('/Test folder', 'local.odt', 'New local content')
# Create new folder with files
local.make_folder('/Test folder', 'Local sub folder 2')
local.make_file('/Test folder/Local sub folder 2',
'local sub file 2.txt', 'Other local content')
# Update file
local.update_content('/Test folder/joe.odt',
'Some locally updated content')
# Remote changes
# Create new file
root_remote.make_file(test_folder_path, 'remote.odt',
'New remote content')
# Create new folder with files
root_remote.make_folder(test_folder_path, 'Remote sub folder 2')
root_remote.make_file(test_folder_path + '/Remote sub folder 2',
'remote sub file 2.txt', 'Other remote content')
# Update file
root_remote.update_content(test_folder_path + '/joe.odt',
'Some remotely updated content')
self.wait_sync()
# Only locally modified content should exist
# and should be marked as 'unsynchronized', other content should
# have been deleted.
# Remote changes should not be impacted client side.
# Local changes should not be impacted server side.
# Local check
self.assertTrue(local.exists('/Test folder'))
self.assertEquals(len(local.get_children_info('/Test folder')), 3)
self.assertTrue(local.exists('/Test folder/joe.odt'))
self.assertEquals(local.get_content('/Test folder/joe.odt'),
'Some locally updated content')
self.assertTrue(local.exists('/Test folder/local.odt'))
self.assertTrue(local.exists('/Test folder/Local sub folder 2'))
self.assertTrue(local.exists(
'/Test folder/Local sub folder 2/local sub file 2.txt'))
self.assertFalse(local.exists('/Test folder/jack.odt'))
self.assertFalse(local.exists('/Test folder/remote.odt'))
self.assertFalse(local.exists('/Test folder/Sub folder 1'))
self.assertFalse(local.exists(
'/Test folder/Sub folder 1/sub file 1.txt'))
self.assertFalse(local.exists('/Test folder/Remote sub folder 1'))
self.assertFalse(local.exists(
'/Test folder/Remote sub folder 1/remote sub file 1.txt'))
# State check
self._check_pair_state('/Test folder', 'unsynchronized')
self._check_pair_state('/Test folder/joe.odt',
'unsynchronized')
self._check_pair_state('/Test folder/local.odt',
'unsynchronized')
self._check_pair_state('/Test folder/Local sub folder 2',
'unsynchronized')
self._check_pair_state('/Test folder/Local sub folder 2/local sub file 2.txt',
'unsynchronized')
# Remote check
test_folder_uid = root_remote.get_info(test_folder_path).uid
self.assertEquals(len(root_remote.get_children_info(
test_folder_uid)), 5)
self.assertTrue(root_remote.exists(test_folder_path + '/joe.odt'))
self.assertEquals(root_remote.get_content(
test_folder_path + '/joe.odt'),
'Some remotely updated content')
self.assertTrue(root_remote.exists(test_folder_path + '/jack.odt'))
self.assertTrue(root_remote.exists(test_folder_path + '/remote.odt'))
self.assertTrue(root_remote.exists(test_folder_path + '/Sub folder 1'))
self.assertTrue(root_remote.exists(
test_folder_path + '/Sub folder 1/sub file 1.txt'))
self.assertTrue(root_remote.exists(
test_folder_path + '/Remote sub folder 2'))
self.assertTrue(root_remote.exists(
test_folder_path + '/Remote sub folder 2/remote sub file 2.txt'))
self.assertFalse(root_remote.exists(test_folder_path + '/local.odt'))
self.assertFalse(root_remote.exists(
test_folder_path + '/Local sub folder 2'))
self.assertFalse(root_remote.exists(
test_folder_path + '/Local sub folder 1/local sub file 2.txt'))
# Add Read permission back for test user then synchronize
self._set_read_permission("nuxeoDriveTestUser_user_1",
self.TEST_WORKSPACE_PATH + '/Test folder',
True)
self.wait_sync()
# Remote documents should be merged with locally modified content
# which should be unmarked as 'unsynchronized' and therefore
# synchronized upstream.
# Local check
self.assertTrue(local.exists('/Test folder'))
children_info = local.get_children_info('/Test folder')
self.assertEquals(len(children_info), 8)
for info in children_info:
if info.name == 'joe.odt':
remote_version = info
elif info.name.startswith('joe (') and info.name.endswith(').odt'):
local_version = info
self.assertTrue(remote_version is not None)
self.assertTrue(local_version is not None)
self.assertTrue(local.exists(remote_version.path))
self.assertEquals(local.get_content(remote_version.path),
'Some remotely updated content')
self.assertTrue(local.exists(local_version.path))
self.assertEquals(local.get_content(local_version.path),
'Some locally updated content')
self.assertTrue(local.exists('/Test folder/jack.odt'))
self.assertTrue(local.exists('/Test folder/local.odt'))
self.assertTrue(local.exists('/Test folder/remote.odt'))
self.assertTrue(local.exists('/Test folder/Sub folder 1'))
self.assertTrue(local.exists(
'/Test folder/Sub folder 1/sub file 1.txt'))
self.assertTrue(local.exists('/Test folder/Local sub folder 2'))
self.assertTrue(local.exists(
'/Test folder/Local sub folder 2/local sub file 2.txt'))
self.assertTrue(local.exists('/Test folder/Remote sub folder 2'))
self.assertTrue(local.exists(
'/Test folder/Remote sub folder 2/remote sub file 2.txt'))
# State check
self._check_pair_state('/Test folder', 'synchronized')
self._check_pair_state('/Test folder/joe.odt',
'synchronized')
self._check_pair_state('/Test folder/local.odt',
'synchronized')
self._check_pair_state('/Test folder/Local sub folder 2',
'synchronized')
self._check_pair_state('/Test folder/Local sub folder 2/local sub file 2.txt',
'synchronized')
# Remote check
self.assertTrue(remote.exists('/Test folder'))
children_info = remote.get_children_info(test_folder_uid)
self.assertEquals(len(children_info), 8)
for info in children_info:
if info.name == 'joe.odt':
remote_version = info
elif info.name.startswith('joe (') and info.name.endswith(').odt'):
local_version = info
self.assertTrue(remote_version is not None)
self.assertTrue(local_version is not None)
remote_version_ref_length = (len(remote_version.path)
- len(self.TEST_WORKSPACE_PATH))
remote_version_ref = remote_version.path[-remote_version_ref_length:]
self.assertTrue(remote.exists(remote_version_ref))
self.assertEquals(remote.get_content(remote_version_ref),
'Some remotely updated content')
local_version_ref_length = (len(local_version.path)
- len(self.TEST_WORKSPACE_PATH))
local_version_ref = local_version.path[-local_version_ref_length:]
self.assertTrue(remote.exists(local_version_ref))
self.assertEquals(remote.get_content(local_version_ref),
'Some locally updated content')
self.assertTrue(remote.exists('/Test folder/jack.odt'))
self.assertTrue(remote.exists('/Test folder/local.odt'))
self.assertTrue(remote.exists('/Test folder/remote.odt'))
self.assertTrue(remote.exists('/Test folder/Sub folder 1'))
self.assertTrue(remote.exists(
'/Test folder/Sub folder 1/sub file 1.txt'))
self.assertTrue(remote.exists('/Test folder/Local sub folder 2'))
self.assertTrue(remote.exists(
'/Test folder/Local sub folder 2/local sub file 2.txt'))
self.assertTrue(remote.exists('/Test folder/Remote sub folder 2'))
self.assertTrue(remote.exists(
'/Test folder/Remote sub folder 2/remote sub file 2.txt'))
def _set_read_permission(self, user, doc_path, grant):
op_input = "doc:" + doc_path
if grant:
self.root_remote_client.execute("Document.SetACE",
op_input=op_input,
user=user,
permission="Read",
grant="true")
else:
self.root_remote_client.block_inheritance(doc_path)
def _check_pair_state(self, session, local_path, pair_state):
local_path = '/' + self.workspace_title + local_path
doc_pair = self.engine_1.get_dao().get_state_from_local(local_path)
self.assertEquals(doc_pair.pair_state, pair_state)
|
virt2real/linux-davinci
|
refs/heads/master
|
tools/perf/tests/attr.py
|
58
|
#! /usr/bin/python
import os
import sys
import glob
import optparse
import tempfile
import logging
import shutil
import ConfigParser
class Fail(Exception):
def __init__(self, test, msg):
self.msg = msg
self.test = test
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Unsup(Exception):
def __init__(self, test):
self.test = test
def getMsg(self):
return '\'%s\'' % self.test.path
class Event(dict):
terms = [
'flags',
'type',
'size',
'config',
'sample_period',
'sample_type',
'read_format',
'disabled',
'inherit',
'pinned',
'exclusive',
'exclude_user',
'exclude_kernel',
'exclude_hv',
'exclude_idle',
'mmap',
'comm',
'freq',
'inherit_stat',
'enable_on_exec',
'task',
'watermark',
'precise_ip',
'mmap_data',
'sample_id_all',
'exclude_host',
'exclude_guest',
'exclude_callchain_kernel',
'exclude_callchain_user',
'wakeup_events',
'bp_type',
'config1',
'config2',
'branch_sample_type',
'sample_regs_user',
'sample_stack_user',
]
def add(self, data):
for key, val in data:
log.debug(" %s = %s" % (key, val))
self[key] = val
def __init__(self, name, data, base):
log.debug(" Event %s" % name);
self.name = name;
self.group = ''
self.add(base)
self.add(data)
def compare_data(self, a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
b_list = b.split('|')
for a_item in a_list:
for b_item in b_list:
if (a_item == b_item):
return True
elif (a_item == '*') or (b_item == '*'):
return True
return False
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
return False
if not self.compare_data(self[t], other[t]):
return False
return True
def diff(self, other):
for t in Event.terms:
if not self.has_key(t) or not other.has_key(t):
continue
if not self.compare_data(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]
# - just single instance in file
# - needs to specify:
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
#
# [eventX:base]
# - one or multiple instances in file
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.debug("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
self.perf = options.perf
self.command = parser.get('config', 'command')
self.args = parser.get('config', 'args')
try:
self.ret = parser.get('config', 'ret')
except:
self.ret = 0
self.expect = {}
self.result = {}
log.debug(" loading expected events");
self.load_events(path, self.expect)
def is_event(self, name):
if name.find("event") == -1:
return False
else:
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
# optionaly followed by ':' allowing to load 'parent
# event' first as a base
for section in filter(self.is_event, parser_event.sections()):
parser_items = parser_event.items(section);
base_items = {}
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
e = Event(section, parser_items, base_items)
events[section] = e
def run_cmd(self, tempdir):
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.warning(" running '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
def compare(self, expect, result):
match = {}
log.debug(" compare");
# For each expected event find all matching
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK")
else:
log.debug(" ->FAIL");
log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
# we did not any matching event - fail
if (not exp_list):
exp_event.diff(res_event)
raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events
# check we match the same group in the result.
for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''):
continue
for res_name in match[exp_name]:
res_group = result[res_name].group
if res_group not in match[group]:
raise Fail(self, 'group failure')
log.debug(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.debug(" matched")
def resolve_groups(self, events):
for name, event in events.items():
group_fd = event['group_fd'];
if group_fd == '-1':
continue;
for iname, ievent in events.items():
if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname))
break;
def run(self):
tempdir = tempfile.mkdtemp();
try:
# run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.debug(" loading result events");
for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally:
# cleanup
shutil.rmtree(tempdir)
def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
def setup_log(verbose):
global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING
if verbose == 2:
level = logging.INFO
if verbose >= 3:
level = logging.DEBUG
log = logging.getLogger('test')
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
USAGE = '''%s [OPTIONS]
-d dir # tests dir
-p path # perf binary
-t test # single test
-v # verbose level
''' % sys.argv[0]
def main():
parser = optparse.OptionParser(usage=USAGE)
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-d", "--test-dir",
action="store", type="string", dest="test_dir")
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
options, args = parser.parse_args()
if args:
parser.error('FAILED wrong arguments %s' % ' '.join(args))
return -1
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
sys.exit(-1)
if not options.test:
options.test = 'test*'
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
sys.exit(-1)
sys.exit(0)
if __name__ == '__main__':
main()
|
BrunaNayara/django_webapp
|
refs/heads/master
|
qa/urls.py
|
1
|
from django.conf.urls import patterns, url
from qa import views
urlpatterns = patterns('',
url(r'^$', views.index, name = 'index'),
)
|
staute/shinken_package
|
refs/heads/master
|
shinken/objects/satellitelink.py
|
9
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
# Gregory Starck, [email protected]
# Hartmut Goebel, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import time
import cPickle
from shinken.util import get_obj_name_two_args_and_void
from shinken.objects.item import Item, Items
from shinken.property import BoolProp, IntegerProp, StringProp, ListProp, DictProp, AddrProp
from shinken.log import logger
from shinken.http_client import HTTPClient, HTTPExceptions
class SatelliteLink(Item):
"""SatelliteLink is a common Class for link to satellite for
Arbiter with Conf Dispatcher.
"""
# id = 0 each Class will have it's own id
properties = Item.properties.copy()
properties.update({
'address': StringProp(default='localhost', fill_brok=['full_status']),
'timeout': IntegerProp(default=3, fill_brok=['full_status']),
'data_timeout': IntegerProp(default=120, fill_brok=['full_status']),
'check_interval': IntegerProp(default=60, fill_brok=['full_status']),
'max_check_attempts': IntegerProp(default=3, fill_brok=['full_status']),
'spare': BoolProp(default=False, fill_brok=['full_status']),
'manage_sub_realms': BoolProp(default=True, fill_brok=['full_status']),
'manage_arbiters': BoolProp(default=False, fill_brok=['full_status'], to_send=True),
'modules': ListProp(default=[''], to_send=True, split_on_coma=True),
'polling_interval': IntegerProp(default=1, fill_brok=['full_status'], to_send=True),
'use_timezone': StringProp(default='NOTSET', to_send=True),
'realm': StringProp(default='', fill_brok=['full_status'],
brok_transformation=get_obj_name_two_args_and_void),
'satellitemap': DictProp(default={}, elts_prop=AddrProp, to_send=True, override=True),
'use_ssl': BoolProp(default=False, fill_brok=['full_status']),
'hard_ssl_name_check': BoolProp(default=True, fill_brok=['full_status']),
'passive': BoolProp(default=False, fill_brok=['full_status'], to_send=True),
})
running_properties = Item.running_properties.copy()
running_properties.update({
'con': StringProp(default=None),
'alive': BoolProp(default=True, fill_brok=['full_status']),
'broks': StringProp(default=[]),
# the number of failed attempt
'attempt': StringProp(default=0, fill_brok=['full_status']),
# can be network ask or not (dead or check in timeout or error)
'reachable': BoolProp(default=False, fill_brok=['full_status']),
'last_check': IntegerProp(default=0, fill_brok=['full_status']),
'managed_confs': StringProp(default={}),
})
def __init__(self, *args, **kwargs):
super(SatelliteLink, self).__init__(*args, **kwargs)
self.arb_satmap = {'address': '0.0.0.0', 'port': 0}
if hasattr(self, 'address'):
self.arb_satmap['address'] = self.address
if hasattr(self, 'port'):
try:
self.arb_satmap['port'] = int(self.port)
except Exception:
pass
def set_arbiter_satellitemap(self, satellitemap):
"""
arb_satmap is the satellitemap in current context:
- A SatelliteLink is owned by an Arbiter
- satellitemap attribute of SatelliteLink is the map
defined IN THE satellite configuration
but for creating connections, we need the have the satellitemap of the Arbiter
"""
self.arb_satmap = {'address': self.address, 'port': self.port, 'use_ssl': self.use_ssl,
'hard_ssl_name_check': self.hard_ssl_name_check}
self.arb_satmap.update(satellitemap)
def create_connection(self):
self.con = HTTPClient(address=self.arb_satmap['address'], port=self.arb_satmap['port'],
timeout=self.timeout, data_timeout=self.data_timeout,
use_ssl=self.use_ssl,
strong_ssl=self.hard_ssl_name_check
)
self.uri = self.con.uri
def put_conf(self, conf):
if self.con is None:
self.create_connection()
# Maybe the connection was not ok, bail out
if not self.con:
return False
try:
self.con.get('ping')
self.con.post('put_conf', {'conf': conf}, wait='long')
print "PUT CONF SUCESS", self.get_name()
return True
except HTTPExceptions, exp:
self.con = None
logger.error("Failed sending configuration for %s: %s", self.get_name(), str(exp))
return False
# Get and clean all of our broks
def get_all_broks(self):
res = self.broks
self.broks = []
return res
# Set alive, reachable, and reset attempts.
# If we change state, raise a status brok update
def set_alive(self):
was_alive = self.alive
self.alive = True
self.attempt = 0
self.reachable = True
# We came from dead to alive
# so we must add a brok update
if not was_alive:
b = self.get_update_status_brok()
self.broks.append(b)
def set_dead(self):
was_alive = self.alive
self.alive = False
self.con = None
# We are dead now. Must raise
# a brok to say it
if was_alive:
logger.warning("Setting the satellite %s to a dead state.", self.get_name())
b = self.get_update_status_brok()
self.broks.append(b)
# Go in reachable=False and add a failed attempt
# if we reach the max, go dead
def add_failed_check_attempt(self, reason=''):
self.reachable = False
self.attempt += 1
self.attempt = min(self.attempt, self.max_check_attempts)
# Don't need to warn again and again if the satellite is already dead
if self.alive:
logger.warning("Add failed attempt to %s (%d/%d) %s",
self.get_name(), self.attempt, self.max_check_attempts, reason)
# check when we just go HARD (dead)
if self.attempt == self.max_check_attempts:
self.set_dead()
# Update satellite info each self.check_interval seconds
# so we smooth arbiter actions for just useful actions
# and not cry for a little timeout
def update_infos(self):
# First look if it's not too early to ping
now = time.time()
since_last_check = now - self.last_check
if since_last_check < self.check_interval:
return
self.last_check = now
# We ping and update the managed list
self.ping()
self.update_managed_list()
# Update the state of this element
b = self.get_update_status_brok()
self.broks.append(b)
# The elements just got a new conf_id, we put it in our list
# because maybe the satellite is too busy to answer now
def known_conf_managed_push(self, cfg_id, push_flavor):
self.managed_confs[cfg_id] = push_flavor
def ping(self):
logger.debug("Pinging %s", self.get_name())
try:
if self.con is None:
self.create_connection()
logger.debug(" (%s)", self.uri)
# If the connection failed to initialize, bail out
if self.con is None:
self.add_failed_check_attempt()
return
r = self.con.get('ping')
# Should return us pong string
if r == 'pong':
self.set_alive()
else:
self.add_failed_check_attempt()
except HTTPExceptions, exp:
self.add_failed_check_attempt(reason=str(exp))
def wait_new_conf(self):
if self.con is None:
self.create_connection()
try:
r = self.con.get('wait_new_conf')
return True
except HTTPExceptions, exp:
self.con = None
return False
# To know if the satellite have a conf (magic_hash = None)
# OR to know if the satellite have THIS conf (magic_hash != None)
# Magic_hash is for arbiter check only
def have_conf(self, magic_hash=None):
if self.con is None:
self.create_connection()
# If the connection failed to initialize, bail out
if self.con is None:
return False
try:
if magic_hash is None:
r = self.con.get('have_conf')
else:
r = self.con.get('have_conf', {'magic_hash': magic_hash})
print "have_conf RAW CALL", r, type(r)
if not isinstance(r, bool):
return False
return r
except HTTPExceptions, exp:
self.con = None
return False
# To know if a receiver got a conf or not
def got_conf(self):
if self.con is None:
self.create_connection()
# If the connection failed to initialize, bail out
if self.con is None:
return False
try:
r = self.con.get('got_conf')
# Protect against bad return
if not isinstance(r, bool):
return False
return r
except HTTPExceptions, exp:
self.con = None
return False
def remove_from_conf(self, sched_id):
if self.con is None:
self.create_connection()
# If the connection failed to initialize, bail out
if self.con is None:
return
try:
self.con.get('remove_from_conf', {'sched_id': sched_id})
return True
except HTTPExceptions, exp:
self.con = None
return False
def update_managed_list(self):
if self.con is None:
self.create_connection()
# If the connection failed to initialize, bail out
if self.con is None:
self.managed_confs = {}
return
try:
tab = self.con.get('what_i_managed')
print "[%s]What i managed raw value is %s" % (self.get_name(), tab)
# Protect against bad return
if not isinstance(tab, dict):
print "[%s]What i managed: Got exception: bad what_i_managed returns" % \
self.get_name(), tab
self.con = None
self.managed_confs = {}
return
# Ok protect against json that is chaning keys as string instead of int
tab_cleaned = {}
for (k, v) in tab.iteritems():
try:
tab_cleaned[int(k)] = v
except ValueError:
print "[%s]What i managed: Got exception: bad what_i_managed returns" % \
self.get_name(), tab
# We can update our list now
self.managed_confs = tab_cleaned
except HTTPExceptions, exp:
print "EXCEPTION INwhat_i_managed", str(exp)
# A timeout is not a crime, put this case aside
# TODO : fix the timeout part?
self.con = None
print "[%s]What i managed: Got exception: %s %s %s" % \
(self.get_name(), exp, type(exp), exp.__dict__)
self.managed_confs = {}
# Return True if the satellite said to managed a configuration
def do_i_manage(self, cfg_id, push_flavor):
# If not even the cfg_id in the managed_conf, bail out
if cfg_id not in self.managed_confs:
return False
# maybe it's in but with a false push_flavor. check it :)
return self.managed_confs[cfg_id] == push_flavor
def push_broks(self, broks):
if self.con is None:
self.create_connection()
# If the connection failed to initialize, bail out
if self.con is None:
return False
try:
# Always do a simple ping to avoid a LOOOONG lock
self.con.get('ping')
self.con.post('push_broks', {'broks': broks}, wait='long')
return True
except HTTPExceptions, exp:
self.con = None
return False
def get_external_commands(self):
if self.con is None:
self.create_connection()
# If the connection failed to initialize, bail out
if self.con is None:
return []
try:
self.con.get('ping')
tab = self.con.get('get_external_commands', wait='long')
tab = cPickle.loads(str(tab))
# Protect against bad return
if not isinstance(tab, list):
self.con = None
return []
return tab
except HTTPExceptions, exp:
self.con = None
return []
except AttributeError:
self.con = None
return []
except:
self.con = None
return []
def prepare_for_conf(self):
self.cfg = {'global': {}, 'schedulers': {}, 'arbiters': {}}
properties = self.__class__.properties
for prop, entry in properties.items():
if entry.to_send:
self.cfg['global'][prop] = getattr(self, prop)
cls = self.__class__
# Also add global values
self.cfg['global']['api_key'] = cls.api_key
self.cfg['global']['secret'] = cls.secret
self.cfg['global']['http_proxy'] = cls.http_proxy
self.cfg['global']['statsd_host'] = cls.statsd_host
self.cfg['global']['statsd_port'] = cls.statsd_port
self.cfg['global']['statsd_prefix'] = cls.statsd_prefix
self.cfg['global']['statsd_enabled'] = cls.statsd_enabled
self.cfg['global']['statsd_interval'] = cls.statsd_interval
self.cfg['global']['statsd_types'] = cls.statsd_types
self.cfg['global']['statsd_pattern'] = cls.statsd_pattern
# Some parameters for satellites are not defined in the satellites conf
# but in the global configuration. We can pass them in the global
# property
def add_global_conf_parameters(self, params):
for prop in params:
self.cfg['global'][prop] = params[prop]
def get_my_type(self):
return self.__class__.my_type
# Here for poller and reactionner. Scheduler have its own function
def give_satellite_cfg(self):
return {'port': self.port,
'address': self.address,
'use_ssl': self.use_ssl,
'hard_ssl_name_check': self.hard_ssl_name_check,
'name': self.get_name(),
'instance_id': self.id,
'active': True,
'passive': self.passive,
'poller_tags': getattr(self, 'poller_tags', []),
'reactionner_tags': getattr(self, 'reactionner_tags', []),
'api_key': self.__class__.api_key,
'secret': self.__class__.secret,
}
# Call by pickle for dataify the downtime
# because we DO NOT WANT REF in this pickleisation!
def __getstate__(self):
cls = self.__class__
# id is not in *_properties
res = {'id': self.id}
for prop in cls.properties:
if prop != 'realm':
if hasattr(self, prop):
res[prop] = getattr(self, prop)
for prop in cls.running_properties:
if prop != 'con':
if hasattr(self, prop):
res[prop] = getattr(self, prop)
return res
# Inverted function of getstate
def __setstate__(self, state):
cls = self.__class__
self.id = state['id']
for prop in cls.properties:
if prop in state:
setattr(self, prop, state[prop])
for prop in cls.running_properties:
if prop in state:
setattr(self, prop, state[prop])
# con needs to be explicitly set:
self.con = None
class SatelliteLinks(Items):
"""Please Add a Docstring to describe the class here"""
# name_property = "name"
# inner_class = SchedulerLink
# We must have a realm property, so we find our realm
def linkify(self, realms, modules):
self.linkify_s_by_p(realms)
self.linkify_s_by_plug(modules)
def linkify_s_by_p(self, realms):
for s in self:
p_name = s.realm.strip()
# If no realm name, take the default one
if p_name == '':
p = realms.get_default()
s.realm = p
else: # find the realm one
p = realms.find_by_name(p_name)
s.realm = p
# Check if what we get is OK or not
if p is not None:
s.register_to_my_realm()
else:
err = "The %s %s got a unknown realm '%s'" % \
(s.__class__.my_type, s.get_name(), p_name)
s.configuration_errors.append(err)
|
piiswrong/mxnet
|
refs/heads/master
|
plugin/opencv/__init__.py
|
61
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=wildcard-import
"""Opencv plugin for mxnet"""
from .opencv import *
|
overtherain/scriptfile
|
refs/heads/master
|
software/googleAppEngine/lib/django_1_2/tests/regressiontests/forms/localflavor/nl.py
|
89
|
from django.contrib.localflavor.nl.forms import (NLPhoneNumberField,
NLZipCodeField, NLSoFiNumberField, NLProvinceSelect)
from utils import LocalFlavorTestCase
class NLLocalFlavorTests(LocalFlavorTestCase):
def test_NLProvinceSelect(self):
f = NLProvinceSelect()
out = u'''<select name="provinces">
<option value="DR">Drenthe</option>
<option value="FL">Flevoland</option>
<option value="FR">Friesland</option>
<option value="GL">Gelderland</option>
<option value="GR">Groningen</option>
<option value="LB">Limburg</option>
<option value="NB">Noord-Brabant</option>
<option value="NH">Noord-Holland</option>
<option value="OV" selected="selected">Overijssel</option>
<option value="UT">Utrecht</option>
<option value="ZE">Zeeland</option>
<option value="ZH">Zuid-Holland</option>
</select>'''
self.assertEqual(f.render('provinces', 'OV'), out)
def test_NLPhoneNumberField(self):
error_invalid = [u'Enter a valid phone number']
valid = {
'012-3456789': '012-3456789',
'0123456789': '0123456789',
'+31-12-3456789': '+31-12-3456789',
'(0123) 456789': '(0123) 456789',
}
invalid = {
'foo': error_invalid,
}
self.assertFieldOutput(NLPhoneNumberField, valid, invalid)
def test_NLZipCodeField(self):
error_invalid = [u'Enter a valid postal code']
valid = {
'1234ab': '1234 AB',
'1234 ab': '1234 AB',
'1234 AB': '1234 AB',
}
invalid = {
'0123AB': error_invalid,
'foo': error_invalid,
}
self.assertFieldOutput(NLZipCodeField, valid, invalid)
def test_NLSoFiNumberField(self):
error_invalid = [u'Enter a valid SoFi number']
valid = {
'123456782': '123456782',
}
invalid = {
'000000000': error_invalid,
'123456789': error_invalid,
'foo': error_invalid,
}
self.assertFieldOutput(NLSoFiNumberField, valid, invalid)
|
autosportlabs/RaceCapture_App
|
refs/heads/master
|
autosportlabs/racecapture/views/configuration/rcp/canconfigview.py
|
1
|
#
# Race Capture App
#
# Copyright (C) 2014-2017 Autosport Labs
#
# This file is part of the Race Capture App
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License for more details. You should
# have received a copy of the GNU General Public License along with
# this code. If not, see <http://www.gnu.org/licenses/>.
import kivy
kivy.require('1.10.0')
from kivy.metrics import dp
from settingsview import SettingsSwitch
from mappedspinner import MappedSpinner
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.switch import Switch
from kivy.uix.button import Button
from kivy.properties import NumericProperty, ListProperty
from kivy.app import Builder
from utils import *
from settingsview import SettingsView
from autosportlabs.racecapture.views.configuration.baseconfigview import BaseConfigView
from fieldlabel import FieldLabel
class CANBaudRateSpinner(MappedSpinner):
channel_id = NumericProperty(0)
def __init__(self, **kwargs):
super(CANBaudRateSpinner, self).__init__(**kwargs)
self.setValueMap({50000: '50K Baud', 100000: '100K Baud', 125000: '125K Baud', 250000:'250K Baud', 500000:'500K Baud', 1000000:'1M Baud'}, '500000')
class CANTerminationSwitch(Switch):
channel_id = NumericProperty(0)
CAN_CONFIG_VIEW_KV = """
<CANConfigView>:
spacing: dp(20)
orientation: 'vertical'
#row_default_height: dp(40)
id: cansettings
HSeparator:
text: 'CAN bus Configuration'
halign: 'left'
size_hint_y: 0.10
SettingsView:
id: can_enabled
label_text: 'CAN bus'
help_text: 'CAN interface for OBDII and custom CAN channels'
size_hint_y: 0.27
HLineSeparator:
size_hint_y: 0.01
GridLayout:
size_hint_y: 0.05
cols: 3
FieldLabel:
halign: 'center'
text: 'Channel'
FieldLabel:
halign: 'center'
text: 'Baud Rate'
FieldLabel:
halign: 'center'
id: can_term_header
text: 'Termination'
ScrollContainer:
size_hint_y: 0.57
do_scroll_x: False
do_scroll_y: True
size_hint_y: 1
size_hint_x: 1
GridLayout:
cols: 3
id: can_settings
row_force_default: True
padding: [0, dp(20)]
spacing: [0, dp(10)]
row_default_height: dp(50)
size_hint_y: None
height: self.minimum_height
"""
class CANConfigView(BaseConfigView):
Builder.load_string(CAN_CONFIG_VIEW_KV)
def __init__(self, **kwargs):
super(CANConfigView, self).__init__(**kwargs)
self.can_config = None
self.view_loaded = False
self.register_event_type('on_config_updated')
btEnable = self.ids.can_enabled
btEnable.bind(on_setting=self.on_can_enabled)
btEnable.setControl(SettingsSwitch())
def on_can_enabled(self, instance, value):
if self.view_loaded:
self.can_config.enabled = value
self.can_config.stale = True
self.dispatch('on_modified')
def on_can_baud(self, instance, value):
if self.view_loaded:
channel_id = instance.channel_id
self.can_config.baudRate[channel_id] = instance.getValueFromKey(value)
self.can_config.stale = True
self.dispatch('on_modified')
def on_can_termination(self, instance, value):
if self.view_loaded:
channel_id = instance.channel_id
self.can_config.termination_enabled[channel_id] = 1 if value == True else 0
self.can_config.stale = True
self.dispatch('on_modified')
def on_config_updated(self, cfg):
try:
self.view_loaded = False
can_config = cfg.canConfig
self.ids.can_enabled.setValue(can_config.enabled)
self._update_can_settings(cfg)
self.can_config = can_config
self.ids.can_term_header.text = 'Termination' if cfg.capabilities.has_can_term else ''
finally:
self.view_loaded = True
def _update_can_settings(self, cfg):
self.can_config = cfg.canConfig
capabilities = cfg.capabilities
can_settings = self.ids.can_settings
can_settings.clear_widgets()
can_channel_count = capabilities.channels.can
for i in range(0, can_channel_count):
can_settings.add_widget(FieldLabel(text=str(i + 1), halign='center'))
baud_rate = CANBaudRateSpinner()
baud_rate.channel_id = i
baud_rate.bind(text=self.on_can_baud)
baud_rate.setFromValue(self.can_config.baudRate[i])
can_settings.add_widget(baud_rate)
if capabilities.has_can_term:
termination = CANTerminationSwitch()
termination.channel_id = i
termination.active = self.can_config.termination_enabled[i]
termination.bind(active=self.on_can_termination)
can_settings.add_widget(termination)
else:
can_settings.add_widget(BoxLayout())
|
texastribune/tx_salaries
|
refs/heads/master
|
tx_salaries/utils/transformers/texas_state_university.py
|
1
|
from . import base
from . import mixins
from datetime import date
from .. import cleaver
# --row=4
class TransformedRecord(
mixins.GenericCompensationMixin,
mixins.GenericDepartmentMixin, mixins.GenericIdentifierMixin,
mixins.GenericJobTitleMixin, mixins.GenericPersonMixin,
mixins.MembershipMixin, mixins.OrganizationMixin, mixins.PostMixin,
mixins.RaceMixin, mixins.LinkMixin, base.BaseTransformedRecord):
MAP = {
'last_name': 'Last name',
'first_name': 'First name',
'middle_name': 'Middle name',
# 'suffix': '', if needed
'department': 'Organizational Unit',
'job_title': 'Job Title',
'hire_date': 'Hire Date',
'compensation': 'Annual',
'employee_status': 'Personnel subarea',
'gender': 'Gender',
'race': 'Ethnicity',
}
# The order of the name fields to build a full name.
# If `full_name` is in MAP, you don't need this at all.
NAME_FIELDS = ('first_name', 'middle_name', 'last_name', )
# The name of the organization this WILL SHOW UP ON THE SITE,
# so double check it!
ORGANIZATION_NAME = 'Texas State University'
# What type of organization is this?
# This MUST match what we use on the site,
# double check against salaries.texastribune.org
ORGANIZATION_CLASSIFICATION = 'University'
# When did you receive the data? NOT when we added it to the site.
DATE_PROVIDED = date(2017, 2, 2)
# The URL to find the raw data in our S3 bucket.
URL = ('https://s3.amazonaws.com/raw.texastribune.org/'
'texas_state_university/2017-02/texas-state.xlsx')
# How do they track gender? We need to map what they use to `F` and `M`.
gender_map = {'Female': 'F', 'Male': 'M'}
# This is how the loader checks for valid people.
# Defaults to checking to see if `last_name` is empty.
@property
def is_valid(self):
# Adjust to return False on invalid fields. For example:
return self.last_name.strip() != ''
@property
def compensation_type(self):
employee_type = self.employee_status.strip()
if 'PT' in employee_type:
return 'PT'
elif 'FT' in employee_type:
return 'FT'
@property
def description(self):
employee_type = self.employee_status.strip()
if 'PT' in employee_type:
return "Part-time annual pay"
elif 'FT' in employee_type:
return "Annual pay"
@property
def person(self):
name = self.get_name()
r = {
'family_name': name.last,
'given_name': name.first,
'additional_name': name.middle,
'name': unicode(name),
'gender': self.gender_map[self.get_mapped_value('gender')]
}
return r
transform = base.transform_factory(TransformedRecord)
|
40223202/2015cdb_g2
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/xml/sax/_exceptions.py
|
625
|
"""Different kinds of SAX Exceptions"""
#in brython the 4 lines below causes an $globals['Exception'] error
#import sys
#if sys.platform[:4] == "java":
# from java.lang import Exception
#del sys
# ===== SAXEXCEPTION =====
class SAXException(Exception):
"""Encapsulate an XML error or warning. This class can contain
basic error or warning information from either the XML parser or
the application: you can subclass it to provide additional
functionality, or to add localization. Note that although you will
receive a SAXException as the argument to the handlers in the
ErrorHandler interface, you are not actually required to raise
the exception; instead, you can simply read the information in
it."""
def __init__(self, msg, exception=None):
"""Creates an exception. The message is required, but the exception
is optional."""
self._msg = msg
self._exception = exception
Exception.__init__(self, msg)
def getMessage(self):
"Return a message for this exception."
return self._msg
def getException(self):
"Return the embedded exception, or None if there was none."
return self._exception
def __str__(self):
"Create a string representation of the exception."
return self._msg
def __getitem__(self, ix):
"""Avoids weird error messages if someone does exception[ix] by
mistake, since Exception has __getitem__ defined."""
raise AttributeError("__getitem__")
# ===== SAXPARSEEXCEPTION =====
class SAXParseException(SAXException):
"""Encapsulate an XML parse error or warning.
This exception will include information for locating the error in
the original XML document. Note that although the application will
receive a SAXParseException as the argument to the handlers in the
ErrorHandler interface, the application is not actually required
to raise the exception; instead, it can simply read the
information in it and take a different action.
Since this exception is a subclass of SAXException, it inherits
the ability to wrap another exception."""
def __init__(self, msg, exception, locator):
"Creates the exception. The exception parameter is allowed to be None."
SAXException.__init__(self, msg, exception)
self._locator = locator
# We need to cache this stuff at construction time.
# If this exception is raised, the objects through which we must
# traverse to get this information may be deleted by the time
# it gets caught.
self._systemId = self._locator.getSystemId()
self._colnum = self._locator.getColumnNumber()
self._linenum = self._locator.getLineNumber()
def getColumnNumber(self):
"""The column number of the end of the text where the exception
occurred."""
return self._colnum
def getLineNumber(self):
"The line number of the end of the text where the exception occurred."
return self._linenum
def getPublicId(self):
"Get the public identifier of the entity where the exception occurred."
return self._locator.getPublicId()
def getSystemId(self):
"Get the system identifier of the entity where the exception occurred."
return self._systemId
def __str__(self):
"Create a string representation of the exception."
sysid = self.getSystemId()
if sysid is None:
sysid = "<unknown>"
linenum = self.getLineNumber()
if linenum is None:
linenum = "?"
colnum = self.getColumnNumber()
if colnum is None:
colnum = "?"
return "%s:%s:%s: %s" % (sysid, linenum, colnum, self._msg)
# ===== SAXNOTRECOGNIZEDEXCEPTION =====
class SAXNotRecognizedException(SAXException):
"""Exception class for an unrecognized identifier.
An XMLReader will raise this exception when it is confronted with an
unrecognized feature or property. SAX applications and extensions may
use this class for similar purposes."""
pass
# ===== SAXNOTSUPPORTEDEXCEPTION =====
class SAXNotSupportedException(SAXException):
"""Exception class for an unsupported operation.
An XMLReader will raise this exception when a service it cannot
perform is requested (specifically setting a state or value). SAX
applications and extensions may use this class for similar
purposes."""
pass
# ===== SAXNOTSUPPORTEDEXCEPTION =====
class SAXReaderNotAvailable(SAXNotSupportedException):
"""Exception class for a missing driver.
An XMLReader module (driver) should raise this exception when it
is first imported, e.g. when a support module cannot be imported.
It also may be raised during parsing, e.g. if executing an external
program is not permitted."""
pass
|
idahu29/payment-redis
|
refs/heads/master
|
manage.py
|
1
|
from flask_script import Server, Manager
# from flask_migrate import Migrate, MigrateCommand
from application.app import app
import logging
manager = Manager(app)
# manager.add_command("runserver", Server(host="0.0.0.0", port=5000, ssl_crt='/Users/biao/Documents/myprojects/payment-redis/myselfsigned.cer', ssl_key='/Users/biao/Documents/myprojects/payment-redis/myselfsigned.key'))
# migrations
# manager.add_command('db', MigrateCommand)
# @manager.command
# def create_db():
# """Creates the db tables."""
# db.create_all()
if __name__ == '__main__':
manager.run()
|
bblay/iris
|
refs/heads/master
|
docs/iris/example_tests/test_global_map.py
|
3
|
# (C) British Crown Copyright 2010 - 2013, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
# Import Iris tests first so that some things can be initialised before importing anything else.
import iris.tests as tests
import extest_util
with extest_util.add_examples_to_path():
import global_map
class TestGlobalMap(tests.GraphicsTest):
"""Test the global_map example code."""
def test_global_map(self):
with extest_util.show_replaced_by_check_graphic(self):
global_map.main()
if __name__ == '__main__':
tests.main()
|
dexterx17/nodoSocket
|
refs/heads/master
|
clients/Python-2.7.6/Demo/scripts/lpwatch.py
|
10
|
#! /usr/bin/env python
# Watch line printer queue(s).
# Intended for BSD 4.3 lpq.
import os
import sys
import time
DEF_PRINTER = 'psc'
DEF_DELAY = 10
def main():
delay = DEF_DELAY # XXX Use getopt() later
try:
thisuser = os.environ['LOGNAME']
except:
thisuser = os.environ['USER']
printers = sys.argv[1:]
if printers:
# Strip '-P' from printer names just in case
# the user specified it...
for i, name in enumerate(printers):
if name[:2] == '-P':
printers[i] = name[2:]
else:
if os.environ.has_key('PRINTER'):
printers = [os.environ['PRINTER']]
else:
printers = [DEF_PRINTER]
clearhome = os.popen('clear', 'r').read()
while True:
text = clearhome
for name in printers:
text += makestatus(name, thisuser) + '\n'
print text
time.sleep(delay)
def makestatus(name, thisuser):
pipe = os.popen('lpq -P' + name + ' 2>&1', 'r')
lines = []
users = {}
aheadbytes = 0
aheadjobs = 0
userseen = False
totalbytes = 0
totaljobs = 0
for line in pipe:
fields = line.split()
n = len(fields)
if len(fields) >= 6 and fields[n-1] == 'bytes':
rank, user, job = fields[0:3]
files = fields[3:-2]
bytes = int(fields[n-2])
if user == thisuser:
userseen = True
elif not userseen:
aheadbytes += bytes
aheadjobs += 1
totalbytes += bytes
totaljobs += 1
ujobs, ubytes = users.get(user, (0, 0))
ujobs += 1
ubytes += bytes
users[user] = ujobs, ubytes
else:
if fields and fields[0] != 'Rank':
line = line.strip()
if line == 'no entries':
line = name + ': idle'
elif line[-22:] == ' is ready and printing':
line = name
lines.append(line)
if totaljobs:
line = '%d K' % ((totalbytes+1023) // 1024)
if totaljobs != len(users):
line += ' (%d jobs)' % totaljobs
if len(users) == 1:
line += ' for %s' % (users.keys()[0],)
else:
line += ' for %d users' % len(users)
if userseen:
if aheadjobs == 0:
line += ' (%s first)' % thisuser
else:
line += ' (%d K before %s)' % (
(aheadbytes+1023) // 1024, thisuser)
lines.append(line)
sts = pipe.close()
if sts:
lines.append('lpq exit status %r' % (sts,))
return ': '.join(lines)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
obnoxxx/samba
|
refs/heads/master
|
buildtools/wafsamba/tests/test_bundled.py
|
45
|
# Copyright (C) 2012 Jelmer Vernooij <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from wafsamba.tests import TestCase
from wafsamba.samba_bundled import (
tuplize_version,
)
class TuplizeVersionTests(TestCase):
def test_simple(self):
self.assertEquals((1, 2, 10), tuplize_version("1.2.10"))
|
yourcelf/cmsplugin-filer
|
refs/heads/master
|
cmsplugin_filer_folder/models.py
|
17
|
from django.utils.translation import ugettext_lazy as _
from django.db import models
from cms.models import CMSPlugin, Page
from django.utils.translation import ugettext_lazy as _
from posixpath import join, basename, splitext, exists
from filer.fields.folder import FilerFolderField
from django.conf import settings
from cmsplugin_filer_utils import FilerPluginManager
VIEW_OPTIONS = getattr(settings, 'CMSPLUGIN_FILER_FOLDER_VIEW_OPTIONS', (("list", _("List")),("slideshow",_("Slideshow"))))
class FilerFolder(CMSPlugin):
"""
Plugin for storing any type of Folder.
Default template displays files store inside this folder.
"""
title = models.CharField(_("title"), max_length=255, null=True, blank=True)
view_option = models.CharField(_("view option"),max_length=10,
choices=VIEW_OPTIONS, default="list")
folder = FilerFolderField()
objects = FilerPluginManager(select_related=('folder',))
def __unicode__(self):
if self.title:
return self.title
elif self.folder.name:
# added if, because it raised attribute error when file wasnt defined
return self.folder.name
return "<empty>"
search_fields = ('title',)
|
LightStage-Aber/LightStage-Repo
|
refs/heads/master
|
src/sequences/spherical_gradient.py
|
1
|
from __future__ import division
from collections import deque
import math
import numpy as np
### Calculate X-Axis Gradient White Value.
class BaseSequenceContainer:
def __init__(self, led_vertex, x_value, index, intensity_baseline):
self.led_vertex = led_vertex
self.x_value = x_value
self.index = index
self.intensity_baseline = intensity_baseline
self.intensity_gradient = None
def apply_gradient(self, f) :
self.intensity_gradient = f
def get_index(self):
return self.index
def get_intensity(self):
assert self.intensity_baseline is not None and self.intensity_gradient is not None , "LED index at "+str(self.index)+": baseline intensity, gradient intensity or proportional (rotation) position have not been set."
return self.intensity_baseline * self.intensity_gradient #watch out for accumulated/ power loss, e.g. when 0.5*0.5. Max values should be rescaled up to 1.0 (full) wattage.
class BaseGradientSequence:
"""
The basic sequence generator uses individual lights as intervals.
- This causes a quantity of sequence lighting steps equal to number of lights, which has a very specific and lengthy use case.
Public methods:
get_next_sequence()
get_sequence_number()
__init__()
"""
def __init__(self, leds_vertices, intensity_baselines, axis="x", scaled_range=[0.5, 1.0]):
self.leds_vertices = leds_vertices
self.intensity_baselines = intensity_baselines
self.from_value = scaled_range[1] #1.0
self.to_value = scaled_range[0] #0.5
self.axis = ord(axis)-120 if axis in ["x","y","z"] else None # Default to assertion failure.
self.loop_number = 0
self.sequence_counter = 0
assert self.axis in [0,1,2], "`Axis` argument must be specified as either \"x\", \"y\" or \"z\"."
assert self.leds_vertices is not None and self.intensity_baselines is not None, "Requires led vertex and corresponding intensity data in a sequence data type."
assert len(self.intensity_baselines) == len(self.leds_vertices), "The quantities of LED vertices and Intensity Baselines should be identical."
assert self.from_value > self.to_value, "From value (e.g. 1.0) should be larger than To value (e.g. 0.5)."
self.dequeue_Ls = self.__initialise()
def __initialise(self):
self.dequeue_Ls = []
self.__collect()
self.__order_by_axis()
self.dequeue_Ls = deque(self.dequeue_Ls)
return self.dequeue_Ls
def __collect(self):
if len(self.dequeue_Ls) == 0: # Avoid repeated processing for a collection.
for i in range(len(self.leds_vertices)):
vertex = self.leds_vertices[i]
intensity = self.intensity_baselines[i]
c = BaseSequenceContainer( vertex, vertex[self.axis], i, intensity )
self.dequeue_Ls.append( c )
return self.dequeue_Ls
def __order_by_axis(self):
self.dequeue_Ls.sort( key=lambda c: c.x_value )
def __apply_gradient(self):
interval = self.__get_interval()
curr_interval = self.from_value
for i in range(len(self.dequeue_Ls)):
L = self.dequeue_Ls[i]
L.apply_gradient( curr_interval )
curr_interval -= interval
def __get_interval(self):
grad_range = self.from_value-self.to_value
interval = grad_range / (len(self.dequeue_Ls)-1)
return interval
def __rotate(self):
Ls = self.dequeue_Ls
end = self.dequeue_Ls.pop() # Pop end (right).
self.dequeue_Ls.appendleft( end ) # Insert end at start (left).
def get_sequence_number(self):
return self.sequence_counter, self.loop_number
def get_next_sequence(self):
def __handle_sequence_count():
# handle index wrap around. Max = len(Ls)
if self.sequence_counter == len(self.dequeue_Ls): # Reached end of loop, therefore reset/update counters.
self.loop_number += 1
self.sequence_counter = 1
else: # start or midway through loop, update counter.
self.sequence_counter +=1
if self.sequence_counter is 0 and self.loop_number is 0: # First ever call
self.__apply_gradient()
else: # All subsequent calls
self.__rotate()
self.__apply_gradient()
__handle_sequence_count()
return self.dequeue_Ls
class GradientSequence_IntervalSpecified(BaseGradientSequence):
"""
The Interval Specified Sequence Generator uses specified quantity of intervals lighting steps, proportionally illuminated based on X-axis position.
- The quantity of sequence lighting steps is specified by user.
Public methods:
get_next_sequence()
get_sequence_number()
__init__()
"""
def __init__(self, leds_vertices, intensity_baselines, axis="x", scaled_range=[0.5, 1.0], quantity_of_intervals=10):
BaseGradientSequence.__init__(self, leds_vertices, intensity_baselines, axis, scaled_range)
self.quantity_of_intervals = quantity_of_intervals
assert leds_vertices is not None and isinstance(leds_vertices, list) and len(leds_vertices) > 0, "Sequence LED vertices must be valid, list type and quantity GT 0."
assert intensity_baselines is not None and isinstance(intensity_baselines, list) and len(intensity_baselines) > 0, "Sequence LED intensities must be valid, list type and quantity GT 0."
assert self.quantity_of_intervals > 0, "Quantity intervals ({}) must be GT 0.".format(self.quantity_of_intervals)
assert self.quantity_of_intervals <= len(leds_vertices), "Quantity intervals ({},({})) must be LTEQ quantity of lights ({},({})), result is {}.".format(
self.quantity_of_intervals, type(self.quantity_of_intervals),
len(leds_vertices), type(len(leds_vertices)),
(self.quantity_of_intervals <= len(leds_vertices)))
def __apply_gradient(self):
def __renormalize(n, range1=[0.0,1.0]):
delta1 = max(n) - min(n)
delta2 = range1[1] - range1[0]
return (delta2 * (np.array(n) - min(n)) / delta1) + range1[0]
def __get_proportion_list():
x_axis = [ c.x_value for c in self.dequeue_Ls ]
proportions = __renormalize(x_axis) # 2a. proportional positions , scale above 0.
assert all( [ 0.0 <= x <= 1.0 for x in proportions ] ), "Proportions should be between 0.0 and 1.0 only. Found: "+str(proportions)
assert all( [ c.x_value == x for c,x in zip( self.dequeue_Ls, x_axis ) ] ), "Proportions list and dequeue_Ls list should remain in identical order to maintain trackability."
gradients = __renormalize(proportions, range1=[self.to_value, self.from_value])
assert all( [ self.to_value <= x <= self.from_value for x in gradients ] ), "Gradients should be between "+str(self.to_value)+" and "+str(self.to_value)+" only. Found: "+str(gradients)
return gradients
props = __get_proportion_list() # 2a. proportional positions (as ratio)
for i in range(len(self.dequeue_Ls)):
c = self.dequeue_Ls[i]
p = props[i]
c.apply_gradient(p)
def __rotate(self):
steps = int( math.floor(len(self.dequeue_Ls) / self.quantity_of_intervals) )
for i in range( steps ):
end = self.dequeue_Ls.pop() # Pop end (right).
self.dequeue_Ls.appendleft( end ) # Insert end at start (left).
def get_next_sequence(self):
def __handle_sequence_count():
# handle index wrap around. Max = len(Ls)
if self.sequence_counter == self.quantity_of_intervals: # Reached end of loop, therefore reset/update counters.
self.loop_number += 1
self.sequence_counter = 1
else: # start or midway through loop, update counter.
self.sequence_counter +=1
if self.sequence_counter is 0 and self.loop_number is 0: # First ever call
self.__apply_gradient()
else: # All subsequent calls
self.__rotate()
self.__apply_gradient()
__handle_sequence_count()
return self.dequeue_Ls
|
calexil/FightstickDisplay
|
refs/heads/master
|
pyglet/window/mouse.py
|
1
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# Copyright (c) 2008-2021 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Mouse constants and utilities for pyglet.window.
"""
class MouseStateHandler(dict):
"""Simple handler that tracks the state of buttons from the mouse. If a
button is pressed then this handler holds a True value for it.
For example::
>>> win = window.Window()
>>> mousebuttons = mouse.MouseStateHandler()
>>> win.push_handlers(mousebuttons)
# Hold down the "left" button...
>>> mousebuttons[mouse.LEFT]
True
>>> mousebuttons[mouse.RIGHT]
False
"""
def __init__(self):
self["x"] = 0
self["y"] = 0
def on_mouse_press(self, x, y, button, modifiers):
self[button] = True
def on_mouse_release(self, x, y, button, modifiers):
self[button] = False
def on_mouse_motion(self, x, y, dx, dy):
self["x"] = x
self["y"] = y
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
self["x"] = x
self["y"] = y
def __getitem__(self, key):
return self.get(key, False)
def buttons_string(buttons):
"""Return a string describing a set of active mouse buttons.
Example::
>>> buttons_string(LEFT | RIGHT)
'LEFT|RIGHT'
:Parameters:
`buttons` : int
Bitwise combination of mouse button constants.
:rtype: str
"""
button_names = []
if buttons & LEFT:
button_names.append('LEFT')
if buttons & MIDDLE:
button_names.append('MIDDLE')
if buttons & RIGHT:
button_names.append('RIGHT')
return '|'.join(button_names)
# Symbolic names for the mouse buttons
LEFT = 1 << 0
MIDDLE = 1 << 1
RIGHT = 1 << 2
|
pyspeckit/pyspeckit
|
refs/heads/master
|
pyspeckit/cubes/__init__.py
|
8
|
"""
:Author: Adam Ginsburg <[email protected]>
"""
from .SpectralCube import Cube,CubeStack
|
LarsFronius/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/efs.py
|
51
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: efs
short_description: create and maintain EFS file systems
description:
- Module allows create, search and destroy Amazon EFS file systems
version_added: "2.2"
requirements: [ boto3 ]
author:
- "Ryan Sydnor (@ryansydnor)"
- "Artem Kazakov (@akazakov)"
options:
state:
description:
- Allows to create, search and destroy Amazon EFS file system
required: false
default: 'present'
choices: ['present', 'absent']
name:
description:
- Creation Token of Amazon EFS file system. Required for create. Either name or ID required for delete.
required: false
default: None
id:
description:
- ID of Amazon EFS. Either name or ID required for delete.
required: false
default: None
performance_mode:
description:
- File system's performance mode to use. Only takes effect during creation.
required: false
default: 'general_purpose'
choices: ['general_purpose', 'max_io']
tags:
description:
- "List of tags of Amazon EFS. Should be defined as dictionary
In case of 'present' state with list of tags and existing EFS (matched by 'name'), tags of EFS will be replaced with provided data."
required: false
default: None
targets:
description:
- "List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes:
- subnet_id - Mandatory. The ID of the subnet to add the mount target in.
- ip_address - Optional. A valid IPv4 address within the address range of the specified subnet.
- security_groups - Optional. List of security group IDs, of the form 'sg-xxxxxxxx'. These must be for the same VPC as subnet specified
This data may be modified for existing EFS using state 'present' and new list of mount targets."
required: false
default: None
wait:
description:
- "In case of 'present' state should wait for EFS 'available' life cycle state (of course, if current state not 'deleting' or 'deleted')
In case of 'absent' state should wait for EFS 'deleted' life cycle state"
required: false
default: "no"
choices: ["yes", "no"]
wait_timeout:
description:
- How long the module should wait (in seconds) for desired state before returning. Zero means wait as long as necessary.
required: false
default: 0
extends_documentation_fragment:
- aws
'''
EXAMPLES = '''
# EFS provisioning
- efs:
state: present
name: myTestEFS
tags:
name: myTestNameTag
purpose: file-storage
targets:
- subnet_id: subnet-748c5d03
security_groups: [ "sg-1a2b3c4d" ]
# Modifying EFS data
- efs:
state: present
name: myTestEFS
tags:
name: myAnotherTestTag
targets:
- subnet_id: subnet-7654fdca
security_groups: [ "sg-4c5d6f7a" ]
# Deleting EFS
- efs:
state: absent
name: myTestEFS
'''
RETURN = '''
creation_time:
description: timestamp of creation date
returned: always
type: string
sample: "2015-11-16 07:30:57-05:00"
creation_token:
description: EFS creation token
returned: always
type: string
sample: "console-88609e04-9a0e-4a2e-912c-feaa99509961"
file_system_id:
description: ID of the file system
returned: always
type: string
sample: "fs-xxxxxxxx"
life_cycle_state:
description: state of the EFS file system
returned: always
type: string
sample: "creating, available, deleting, deleted"
mount_point:
description: url of file system
returned: always
type: string
sample: ".fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/"
mount_targets:
description: list of mount targets
returned: always
type: list
sample:
[
{
"file_system_id": "fs-a7ad440e",
"ip_address": "172.31.17.173",
"life_cycle_state": "available",
"mount_target_id": "fsmt-d8907871",
"network_interface_id": "eni-6e387e26",
"owner_id": "740748460359",
"security_groups": [
"sg-a30b22c6"
],
"subnet_id": "subnet-e265c895"
},
...
]
name:
description: name of the file system
returned: always
type: string
sample: "my-efs"
number_of_mount_targets:
description: the number of targets mounted
returned: always
type: int
sample: 3
owner_id:
description: AWS account ID of EFS owner
returned: always
type: string
sample: "XXXXXXXXXXXX"
size_in_bytes:
description: size of the file system in bytes as of a timestamp
returned: always
type: dict
sample:
{
"timestamp": "2015-12-21 13:59:59-05:00",
"value": 12288
}
performance_mode:
description: performance mode of the file system
returned: always
type: string
sample: "generalPurpose"
tags:
description: tags on the efs instance
returned: always
type: dict
sample:
{
"name": "my-efs",
"key": "Value"
}
'''
import sys
from time import sleep
from time import time as timestamp
from collections import defaultdict
try:
from botocore.exceptions import ClientError
import boto3
HAS_BOTO3 = True
except ImportError as e:
HAS_BOTO3 = False
class EFSConnection(object):
DEFAULT_WAIT_TIMEOUT_SECONDS = 0
STATE_CREATING = 'creating'
STATE_AVAILABLE = 'available'
STATE_DELETING = 'deleting'
STATE_DELETED = 'deleted'
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = boto3_conn(module, conn_type='client',
resource='efs', region=region,
**aws_connect_params)
except Exception as e:
module.fail_json(msg="Failed to connect to AWS: %s" % str(e))
self.region = region
self.wait = module.params.get('wait')
self.wait_timeout = module.params.get('wait_timeout')
def get_file_systems(self, **kwargs):
"""
Returns generator of file systems including all attributes of FS
"""
items = iterate_all(
'FileSystems',
self.connection.describe_file_systems,
**kwargs
)
for item in items:
item['Name'] = item['CreationToken']
item['CreationTime'] = str(item['CreationTime'])
"""
Suffix of network path to be used as NFS device for mount. More detail here:
http://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html
"""
item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
if 'Timestamp' in item['SizeInBytes']:
item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
if item['LifeCycleState'] == self.STATE_AVAILABLE:
item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId'])
item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId']))
else:
item['Tags'] = {}
item['MountTargets'] = []
yield item
def get_tags(self, **kwargs):
"""
Returns tag list for selected instance of EFS
"""
tags = iterate_all(
'Tags',
self.connection.describe_tags,
**kwargs
)
return dict((tag['Key'], tag['Value']) for tag in tags)
def get_mount_targets(self, **kwargs):
"""
Returns mount targets for selected instance of EFS
"""
targets = iterate_all(
'MountTargets',
self.connection.describe_mount_targets,
**kwargs
)
for target in targets:
if target['LifeCycleState'] == self.STATE_AVAILABLE:
target['SecurityGroups'] = list(self.get_security_groups(
MountTargetId=target['MountTargetId']
))
else:
target['SecurityGroups'] = []
yield target
def get_security_groups(self, **kwargs):
"""
Returns security groups for selected instance of EFS
"""
return iterate_all(
'SecurityGroups',
self.connection.describe_mount_target_security_groups,
**kwargs
)
def get_file_system_id(self, name):
"""
Returns ID of instance by instance name
"""
info = first_or_default(iterate_all(
'FileSystems',
self.connection.describe_file_systems,
CreationToken=name
))
return info and info['FileSystemId'] or None
def get_file_system_state(self, name, file_system_id=None):
"""
Returns state of filesystem by EFS id/name
"""
info = first_or_default(iterate_all(
'FileSystems',
self.connection.describe_file_systems,
CreationToken=name,
FileSystemId=file_system_id
))
return info and info['LifeCycleState'] or self.STATE_DELETED
def get_mount_targets_in_state(self, file_system_id, states=None):
"""
Returns states of mount targets of selected EFS with selected state(s) (optional)
"""
targets = iterate_all(
'MountTargets',
self.connection.describe_mount_targets,
FileSystemId=file_system_id
)
if states:
if not isinstance(states, list):
states = [states]
targets = filter(lambda target: target['LifeCycleState'] in states, targets)
return list(targets)
def create_file_system(self, name, performance_mode):
"""
Creates new filesystem with selected name
"""
changed = False
state = self.get_file_system_state(name)
if state in [self.STATE_DELETING, self.STATE_DELETED]:
wait_for(
lambda: self.get_file_system_state(name),
self.STATE_DELETED
)
self.connection.create_file_system(CreationToken=name, PerformanceMode=performance_mode)
changed = True
# we always wait for the state to be available when creating.
# if we try to take any actions on the file system before it's available
# we'll throw errors
wait_for(
lambda: self.get_file_system_state(name),
self.STATE_AVAILABLE,
self.wait_timeout
)
return changed
def converge_file_system(self, name, tags, targets):
"""
Change attributes (mount targets and tags) of filesystem by name
"""
result = False
fs_id = self.get_file_system_id(name)
if tags is not None:
tags_to_create, _, tags_to_delete = dict_diff(self.get_tags(FileSystemId=fs_id), tags)
if tags_to_delete:
self.connection.delete_tags(
FileSystemId=fs_id,
TagKeys=[item[0] for item in tags_to_delete]
)
result = True
if tags_to_create:
self.connection.create_tags(
FileSystemId=fs_id,
Tags=[{'Key': item[0], 'Value': item[1]} for item in tags_to_create]
)
result = True
if targets is not None:
incomplete_states = [self.STATE_CREATING, self.STATE_DELETING]
wait_for(
lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
0
)
index_by_subnet_id = lambda items: dict((item['SubnetId'], item) for item in items)
current_targets = index_by_subnet_id(self.get_mount_targets(FileSystemId=fs_id))
targets = index_by_subnet_id(targets)
targets_to_create, intersection, targets_to_delete = dict_diff(current_targets,
targets, True)
""" To modify mount target it should be deleted and created again """
changed = filter(
lambda sid: not targets_equal(['SubnetId', 'IpAddress', 'NetworkInterfaceId'],
current_targets[sid], targets[sid]), intersection)
targets_to_delete = list(targets_to_delete) + changed
targets_to_create = list(targets_to_create) + changed
if targets_to_delete:
for sid in targets_to_delete:
self.connection.delete_mount_target(
MountTargetId=current_targets[sid]['MountTargetId']
)
wait_for(
lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
0
)
result = True
if targets_to_create:
for sid in targets_to_create:
self.connection.create_mount_target(
FileSystemId=fs_id,
**targets[sid]
)
wait_for(
lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
0,
self.wait_timeout
)
result = True
security_groups_to_update = filter(
lambda sid: 'SecurityGroups' in targets[sid] and
current_targets[sid]['SecurityGroups'] != targets[sid]['SecurityGroups'],
intersection
)
if security_groups_to_update:
for sid in security_groups_to_update:
self.connection.modify_mount_target_security_groups(
MountTargetId=current_targets[sid]['MountTargetId'],
SecurityGroups=targets[sid]['SecurityGroups']
)
result = True
return result
def delete_file_system(self, name, file_system_id=None):
"""
Removes EFS instance by id/name
"""
result = False
state = self.get_file_system_state(name, file_system_id)
if state in [self.STATE_CREATING, self.STATE_AVAILABLE]:
wait_for(
lambda: self.get_file_system_state(name),
self.STATE_AVAILABLE
)
if not file_system_id:
file_system_id = self.get_file_system_id(name)
self.delete_mount_targets(file_system_id)
self.connection.delete_file_system(FileSystemId=file_system_id)
result = True
if self.wait:
wait_for(
lambda: self.get_file_system_state(name),
self.STATE_DELETED,
self.wait_timeout
)
return result
def delete_mount_targets(self, file_system_id):
"""
Removes mount targets by EFS id
"""
wait_for(
lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_CREATING)),
0
)
targets = self.get_mount_targets_in_state(file_system_id, self.STATE_AVAILABLE)
for target in targets:
self.connection.delete_mount_target(MountTargetId=target['MountTargetId'])
wait_for(
lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_DELETING)),
0
)
return len(targets) > 0
def iterate_all(attr, map_method, **kwargs):
"""
Method creates iterator from boto result set
"""
args = dict((key, value) for (key, value) in kwargs.items() if value is not None)
wait = 1
while True:
try:
data = map_method(**args)
for elm in data[attr]:
yield elm
if 'NextMarker' in data:
args['Marker'] = data['Nextmarker']
continue
break
except ClientError as e:
if e.response['Error']['Code'] == "ThrottlingException" and wait < 600:
sleep(wait)
wait = wait * 2
continue
else:
raise
def targets_equal(keys, a, b):
"""
Method compare two mount targets by specified attributes
"""
for key in keys:
if key in b and a[key] != b[key]:
return False
return True
def dict_diff(dict1, dict2, by_key=False):
"""
Helper method to calculate difference of two dictionaries
"""
keys1 = set(dict1.keys() if by_key else dict1.items())
keys2 = set(dict2.keys() if by_key else dict2.items())
intersection = keys1 & keys2
return keys2 ^ intersection, intersection, keys1 ^ intersection
def first_or_default(items, default=None):
"""
Helper method to fetch first element of list (if exists)
"""
for item in items:
return item
return default
def wait_for(callback, value, timeout=EFSConnection.DEFAULT_WAIT_TIMEOUT_SECONDS):
"""
Helper method to wait for desired value returned by callback method
"""
wait_start = timestamp()
while True:
if callback() != value:
if timeout != 0 and (timestamp() - wait_start > timeout):
raise RuntimeError('Wait timeout exceeded (' + str(timeout) + ' sec)')
else:
sleep(5)
continue
break
def main():
"""
Module action handler
"""
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=False, type='str', choices=["present", "absent"], default="present"),
id=dict(required=False, type='str', default=None),
name=dict(required=False, type='str', default=None),
tags=dict(required=False, type="dict", default={}),
targets=dict(required=False, type="list", default=[]),
performance_mode=dict(required=False, type='str', choices=["general_purpose", "max_io"], default="general_purpose"),
wait=dict(required=False, type="bool", default=False),
wait_timeout=dict(required=False, type="int", default=0)
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, _, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = EFSConnection(module, region, **aws_connect_params)
name = module.params.get('name')
fs_id = module.params.get('id')
tags = module.params.get('tags')
target_translations = {
'ip_address': 'IpAddress',
'security_groups': 'SecurityGroups',
'subnet_id': 'SubnetId'
}
targets = [dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get('targets')]
performance_mode_translations = {
'general_purpose': 'generalPurpose',
'max_io': 'maxIO'
}
performance_mode = performance_mode_translations[module.params.get('performance_mode')]
changed = False
state = str(module.params.get('state')).lower()
if state == 'present':
if not name:
module.fail_json(msg='Name parameter is required for create')
changed = connection.create_file_system(name, performance_mode)
changed = connection.converge_file_system(name=name, tags=tags, targets=targets) or changed
result = first_or_default(connection.get_file_systems(CreationToken=name))
elif state == 'absent':
if not name and not fs_id:
module.fail_json(msg='Either name or id parameter is required for delete')
changed = connection.delete_file_system(name, fs_id)
result = None
if result:
result = camel_dict_to_snake_dict(result)
module.exit_json(changed=changed, efs=result)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
CyanogenMod/motorola-kernel-stingray
|
refs/heads/cm-10.1
|
tools/perf/scripts/python/sctop.py
|
11180
|
# system call top
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
nikushx/AlexaUrbanDictionaryWOTD
|
refs/heads/master
|
libraries/requests/utils.py
|
177
|
# -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import platform
import re
import sys
import socket
import struct
import warnings
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass, urlunparse,
basestring)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import InvalidURL, FileModeWarning
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
total_length = 0
current_position = 0
if hasattr(o, '__len__'):
total_length = len(o)
elif hasattr(o, 'len'):
total_length = o.len
elif hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringIO
total_length = len(o.getvalue())
elif hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
total_length = os.fstat(fileno).st_size
# Having used fstat to determine the file length, we need to
# confirm that this file was opened up in binary mode.
if 'b' not in o.mode:
warnings.warn((
"Requests has determined the content-length for this "
"request using the binary size of the file: however, the "
"file has been opened in text mode (i.e. without the 'b' "
"flag in the mode). This may lead to an incorrect "
"content-length. In Requests 3.0, support will be removed "
"for files in text mode."),
FileModeWarning
)
if hasattr(o, 'tell'):
current_position = o.tell()
return max(0, total_length - current_position)
def get_netrc_auth(url, raise_errors=False):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc. This weird `if...encode`` dance is
# used for Python 3.2, which doesn't support unicode literals.
splitstr = b':'
if isinstance(url, str):
splitstr = splitstr.decode('ascii')
host = ri.netloc.split(splitstr)[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth unless explicitly asked to raise errors.
if raise_errors:
raise
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""
This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""
Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""Very simple check of the cidr format in no_proxy variable"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = (
host for host in no_proxy.replace(' ', '').split(',') if host
)
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url):
"""Return a dict of environment proxies."""
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def select_proxy(url, proxies):
"""Select a proxy for the url, if applicable.
:param url: The url being for the request
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
"""
proxies = proxies or {}
urlparts = urlparse(url)
proxy = proxies.get(urlparts.scheme+'://'+urlparts.hostname)
if proxy is None:
proxy = proxies.get(urlparts.scheme)
return proxy
def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent."""
return '%s/%s' % (name, __version__)
def default_headers():
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
"""
links = []
replace_chars = " '\""
for val in re.split(", *<", value):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
'''Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.'''
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password."""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
out = None
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
|
akatsoulas/mozillians
|
refs/heads/master
|
mozillians/users/migrations/0025_auto_20171011_0859.py
|
3
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0024_auto_20171011_0320'),
]
operations = [
migrations.AddField(
model_name='idpprofile',
name='email',
field=models.EmailField(default=b'', max_length=254, blank=True),
),
migrations.AddField(
model_name='idpprofile',
name='privacy',
field=models.PositiveIntegerField(default=3, choices=[(3, 'Mozillians'), (4, 'Public')]),
),
migrations.AlterUniqueTogether(
name='idpprofile',
unique_together=set([('profile', 'type', 'email')]),
),
]
|
guewen/OpenUpgrade
|
refs/heads/master
|
addons/mass_mailing/models/mail_thread.py
|
65
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import logging
from openerp import tools
from openerp.addons.mail.mail_message import decode
from openerp.addons.mail.mail_thread import decode_header
from openerp.osv import osv
_logger = logging.getLogger(__name__)
class MailThread(osv.AbstractModel):
""" Update MailThread to add the feature of bounced emails and replied emails
in message_process. """
_name = 'mail.thread'
_inherit = ['mail.thread']
def message_route_check_bounce(self, cr, uid, message, context=None):
""" Override to verify that the email_to is the bounce alias. If it is the
case, log the bounce, set the parent and related document as bounced and
return False to end the routing process. """
bounce_alias = self.pool['ir.config_parameter'].get_param(cr, uid, "mail.bounce.alias", context=context)
message_id = message.get('Message-Id')
email_from = decode_header(message, 'From')
email_to = decode_header(message, 'To')
# 0. Verify whether this is a bounced email (wrong destination,...) -> use it to collect data, such as dead leads
if bounce_alias in email_to:
bounce_match = tools.bounce_re.search(email_to)
if bounce_match:
bounced_model, bounced_thread_id = None, False
bounced_mail_id = bounce_match.group(1)
stat_ids = self.pool['mail.mail.statistics'].set_bounced(cr, uid, mail_mail_ids=[bounced_mail_id], context=context)
for stat in self.pool['mail.mail.statistics'].browse(cr, uid, stat_ids, context=context):
bounced_model = stat.model
bounced_thread_id = stat.res_id
_logger.info('Routing mail from %s to %s with Message-Id %s: bounced mail from mail %s, model: %s, thread_id: %s',
email_from, email_to, message_id, bounced_mail_id, bounced_model, bounced_thread_id)
if bounced_model and bounced_model in self.pool and hasattr(self.pool[bounced_model], 'message_receive_bounce') and bounced_thread_id:
self.pool[bounced_model].message_receive_bounce(cr, uid, [bounced_thread_id], mail_id=bounced_mail_id, context=context)
return False
return True
def message_route(self, cr, uid, message, message_dict, model=None, thread_id=None,
custom_values=None, context=None):
if not self.message_route_check_bounce(cr, uid, message, context=context):
return []
return super(MailThread, self).message_route(cr, uid, message, message_dict, model, thread_id, custom_values, context)
def message_receive_bounce(self, cr, uid, ids, mail_id=None, context=None):
"""Called by ``message_process`` when a bounce email (such as Undelivered
Mail Returned to Sender) is received for an existing thread. The default
behavior is to check is an integer ``message_bounce`` column exists.
If it is the case, its content is incremented. """
if self._all_columns.get('message_bounce'):
for obj in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, [obj.id], {'message_bounce': obj.message_bounce + 1}, context=context)
def message_route_process(self, cr, uid, message, message_dict, routes, context=None):
""" Override to update the parent mail statistics. The parent is found
by using the References header of the incoming message and looking for
matching message_id in mail.mail.statistics. """
if message.get('References'):
message_ids = [x.strip() for x in decode(message['References']).split()]
self.pool['mail.mail.statistics'].set_replied(cr, uid, mail_message_ids=message_ids, context=context)
return super(MailThread, self).message_route_process(cr, uid, message, message_dict, routes, context=context)
|
conan-io/conan
|
refs/heads/develop
|
conan/tools/microsoft/msbuild.py
|
1
|
import os
from conans.errors import ConanException
def msbuild_verbosity_cmd_line_arg(conanfile):
verbosity = conanfile.conf["tools.microsoft.msbuild:verbosity"]
if verbosity:
if verbosity not in ("Quiet", "Minimal", "Normal", "Detailed", "Diagnostic"):
raise ConanException("Unknown msbuild verbosity: {}".format(verbosity))
return '/verbosity:{}'.format(verbosity)
def msbuild_max_cpu_count_cmd_line_arg(conanfile):
max_cpu_count = conanfile.conf["tools.microsoft.msbuild:max_cpu_count"] or \
conanfile.conf["tools.build:processes"]
if max_cpu_count:
return "/m:{}".format(max_cpu_count)
def msbuild_arch(arch):
return {'x86': 'x86',
'x86_64': 'x64',
'armv7': 'ARM',
'armv8': 'ARM64'}.get(str(arch))
class MSBuild(object):
def __init__(self, conanfile):
self._conanfile = conanfile
self.build_type = conanfile.settings.get_safe("build_type")
# if platforms:
# msvc_arch.update(platforms)
arch = conanfile.settings.get_safe("arch")
msvc_arch = msbuild_arch(arch)
if conanfile.settings.get_safe("os") == "WindowsCE":
msvc_arch = conanfile.settings.get_safe("os.platform")
self.platform = msvc_arch
def command(self, sln):
cmd = ('msbuild "%s" /p:Configuration=%s /p:Platform=%s'
% (sln, self.build_type, self.platform))
verbosity = msbuild_verbosity_cmd_line_arg(self._conanfile)
if verbosity:
cmd += " {}".format(verbosity)
max_cpu_count = msbuild_max_cpu_count_cmd_line_arg(self._conanfile)
if max_cpu_count:
cmd += " {}".format(max_cpu_count)
return cmd
def build(self, sln):
cmd = self.command(sln)
vcvars = os.path.join(self._conanfile.generators_folder, "conanvcvars")
self._conanfile.run(cmd, env=["conanbuildenv", vcvars])
@staticmethod
def get_version(_):
return NotImplementedError("get_version() method is not supported in MSBuild "
"toolchain helper")
|
xiaojunwu/crosswalk-test-suite
|
refs/heads/master
|
wrt/wrt-securitymanu-tizen-tests/inst.xpk.py
|
187
|
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = "/home/app/content"
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "xwalkctl" in cmd:
cmd = "su - app -c '%s;%s'" % (XW_ENV, cmd)
return cmd
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('xwalkctl'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('xwalkctl'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_app_id = None
for line in output:
pkg_infos = line.split()
if len(pkg_infos) == 1:
continue
name = pkg_infos[1]
if pkg_name == name:
test_app_id = pkg_infos[0]
print test_app_id
break
return test_app_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"xwalkctl -u %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"xwalkctl -i %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
Planigle/planigle
|
refs/heads/master
|
node_modules/angular-cli/node_modules/node-gyp/gyp/pylib/gyp/generator/ninja.py
|
1284
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import hashlib
import json
import multiprocessing
import os.path
import re
import signal
import subprocess
import sys
import gyp
import gyp.common
from gyp.common import OrderedSet
import gyp.msvs_emulation
import gyp.MSVSUtil as MSVSUtil
import gyp.xcode_emulation
from cStringIO import StringIO
from gyp.common import GetEnvironFallback
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! and $| (which begin with a $ so gyp knows it
# should be treated specially, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
# $! is used for variables that represent a path and that can only appear at
# the start of a string, while $| is used for variables that can appear
# anywhere in a string.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteForRspFile(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def Define(d, flavor):
"""Takes a preprocessor define and returns a -D parameter that's ninja- and
shell-escaped."""
if flavor == 'win':
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
d = d.replace('#', '\\%03o' % ord('#'))
return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
def AddArch(output, arch):
"""Adds an arch string to an output path."""
output, extension = os.path.splitext(output)
return '%s.%s%s' % (output, arch, extension)
class Target(object):
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
# On Windows, incremental linking requires linking against all the .objs
# that compose a .lib (rather than the .lib itself). That list is stored
# here. In this case, we also need to save the compile_deps for the target,
# so that the the target that directly depends on the .objs can also depend
# on those.
self.component_objs = None
self.compile_deps = None
# Windows only. The import .lib is the output of a build step, but
# because dependents only link against the lib (not both the lib and the
# dll) we keep track of the import library here.
self.import_lib = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def UsesToc(self, flavor):
"""Return true if the target should produce a restat rule based on a TOC
file."""
# For bundles, the .TOC should be produced for the binary, not for
# FinalOutput(). But the naive approach would put the TOC file into the
# bundle, so don't do this for bundles for now.
if flavor == 'win' or self.bundle:
return False
return self.type in ('shared_library', 'loadable_module')
def PreActionInput(self, flavor):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
if self.UsesToc(flavor):
return self.FinalOutput() + '.TOC'
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that it is unique
# to the input file name as well as the output target name.
class NinjaWriter(object):
def __init__(self, hash_for_rules, target_outputs, base_dir, build_dir,
output_file, toplevel_build, output_file_name, flavor,
toplevel_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
toplevel_dir: path to the toplevel directory
"""
self.hash_for_rules = hash_for_rules
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja = ninja_syntax.Writer(output_file)
self.toplevel_build = toplevel_build
self.output_file_name = output_file_name
self.flavor = flavor
self.abs_build_dir = None
if toplevel_dir is not None:
self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir,
build_dir))
self.obj_ext = '.obj' if flavor == 'win' else '.o'
if flavor == 'win':
# See docstring of msvs_emulation.GenerateEnvironmentFiles().
self.win_env = {}
for arch in ('x86', 'x64'):
self.win_env[arch] = 'environment.' + arch
# Relative path from build output dir to base dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir)
self.build_to_base = os.path.join(build_to_top, base_dir)
# Relative path from base dir to build dir.
base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir)
self.base_to_build = os.path.join(base_to_top, build_dir)
def ExpandSpecial(self, path, product_dir=None):
"""Expand specials like $!PRODUCT_DIR in |path|.
If |product_dir| is None, assumes the cwd is already the product
dir. Otherwise, |product_dir| is the relative path to the product
dir.
"""
PRODUCT_DIR = '$!PRODUCT_DIR'
if PRODUCT_DIR in path:
if product_dir:
path = path.replace(PRODUCT_DIR, product_dir)
else:
path = path.replace(PRODUCT_DIR + '/', '')
path = path.replace(PRODUCT_DIR + '\\', '')
path = path.replace(PRODUCT_DIR, '.')
INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
if INTERMEDIATE_DIR in path:
int_dir = self.GypPathToUniqueOutput('gen')
# GypPathToUniqueOutput generates a path relative to the product dir,
# so insert product_dir in front if it is provided.
path = path.replace(INTERMEDIATE_DIR,
os.path.join(product_dir or '', int_dir))
CONFIGURATION_NAME = '$|CONFIGURATION_NAME'
path = path.replace(CONFIGURATION_NAME, self.config_name)
return path
def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
if self.flavor == 'win':
path = self.msvs_settings.ConvertVSMacros(
path, config=self.config_name)
path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
dirname)
path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
return path
def GypPathToNinja(self, path, env=None):
"""Translate a gyp path to a ninja path, optionally expanding environment
variable references in |path| with |env|.
See the above discourse on path conversions."""
if env:
if self.flavor == 'mac':
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
elif self.flavor == 'win':
path = gyp.msvs_emulation.ExpandMacros(path, env)
if path.startswith('$!'):
expanded = self.ExpandSpecial(path)
if self.flavor == 'win':
expanded = os.path.normpath(expanded)
return expanded
if '$|' in path:
path = self.ExpandSpecial(path)
assert '$' not in path, path
return os.path.normpath(os.path.join(self.build_to_base, path))
def GypPathToUniqueOutput(self, path, qualified=True):
"""Translate a gyp path to a ninja path for writing output.
If qualified is True, qualify the resulting filename with the name
of the target. This is necessary when e.g. compiling the same
path twice for two separate output targets.
See the above discourse on path conversions."""
path = self.ExpandSpecial(path)
assert not path.startswith('$'), path
# Translate the path following this scheme:
# Input: foo/bar.gyp, target targ, references baz/out.o
# Output: obj/foo/baz/targ.out.o (if qualified)
# obj/foo/baz/out.o (otherwise)
# (and obj.host instead of obj for cross-compiles)
#
# Why this scheme and not some other one?
# 1) for a given input, you can compute all derived outputs by matching
# its path, even if the input is brought via a gyp file with '..'.
# 2) simple files like libraries and stamps have a simple filename.
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
path_dir, path_basename = os.path.split(path)
assert not os.path.isabs(path_dir), (
"'%s' can not be absolute path (see crbug.com/462153)." % path_dir)
if qualified:
path_basename = self.name + '.' + path_basename
return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,
path_basename))
def WriteCollapsedDependencies(self, name, targets, order_only=None):
"""Given a list of targets, return a path for a single file
representing the result of building all the targets or None.
Uses a stamp file if necessary."""
assert targets == filter(None, targets), targets
if len(targets) == 0:
assert not order_only
return None
if len(targets) > 1 or order_only:
stamp = self.GypPathToUniqueOutput(name + '.stamp')
targets = self.ninja.build(stamp, 'stamp', targets, order_only=order_only)
self.ninja.newline()
return targets[0]
def _SubninjaNameForArch(self, arch):
output_file_base = os.path.splitext(self.output_file_name)[0]
return '%s.%s.ninja' % (output_file_base, arch)
def WriteSpec(self, spec, config_name, generator_flags):
"""The main entry point for NinjaWriter: write the build rules for a spec.
Returns a Target object, which represents the output paths for this spec.
Returns None if there are no outputs (e.g. a settings-only 'none' type
target)."""
self.config_name = config_name
self.name = spec['target_name']
self.toolset = spec['toolset']
config = spec['configurations'][config_name]
self.target = Target(spec['type'])
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
# Track if this target contains any C++ files, to decide if gcc or g++
# should be used for linking.
self.uses_cpp = False
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
self.xcode_settings = self.msvs_settings = None
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
if self.flavor == 'win':
self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec,
generator_flags)
arch = self.msvs_settings.GetArch(config_name)
self.ninja.variable('arch', self.win_env[arch])
self.ninja.variable('cc', '$cl_' + arch)
self.ninja.variable('cxx', '$cl_' + arch)
self.ninja.variable('cc_host', '$cl_' + arch)
self.ninja.variable('cxx_host', '$cl_' + arch)
self.ninja.variable('asm', '$ml_' + arch)
if self.flavor == 'mac':
self.archs = self.xcode_settings.GetActiveArchs(config_name)
if len(self.archs) > 1:
self.arch_subninjas = dict(
(arch, ninja_syntax.Writer(
OpenOutput(os.path.join(self.toplevel_build,
self._SubninjaNameForArch(arch)),
'w')))
for arch in self.archs)
# Compute predepends for all rules.
# actions_depends is the dependencies this target depends on before running
# any of its action/rule/copy steps.
# compile_depends is the dependencies this target depends on before running
# any of its compile steps.
actions_depends = []
compile_depends = []
# TODO(evan): it is rather confusing which things are lists and which
# are strings. Fix these.
if 'dependencies' in spec:
for dep in spec['dependencies']:
if dep in self.target_outputs:
target = self.target_outputs[dep]
actions_depends.append(target.PreActionInput(self.flavor))
compile_depends.append(target.PreCompileInput())
actions_depends = filter(None, actions_depends)
compile_depends = filter(None, compile_depends)
actions_depends = self.WriteCollapsedDependencies('actions_depends',
actions_depends)
compile_depends = self.WriteCollapsedDependencies('compile_depends',
compile_depends)
self.target.preaction_stamp = actions_depends
self.target.precompile_stamp = compile_depends
# Write out actions, rules, and copies. These must happen before we
# compile any sources, so compute a list of predependencies for sources
# while we do it.
extra_sources = []
mac_bundle_depends = []
self.target.actions_stamp = self.WriteActionsRulesCopies(
spec, extra_sources, actions_depends, mac_bundle_depends)
# If we have actions/rules/copies, we depend directly on those, but
# otherwise we depend on dependent target's actions/rules/copies etc.
# We never need to explicitly depend on previous target's link steps,
# because no compile ever depends on them.
compile_depends_stamp = (self.target.actions_stamp or compile_depends)
# Write out the compilation steps, if any.
link_deps = []
sources = extra_sources + spec.get('sources', [])
if sources:
if self.flavor == 'mac' and len(self.archs) > 1:
# Write subninja file containing compile and link commands scoped to
# a single arch if a fat binary is being built.
for arch in self.archs:
self.ninja.subninja(self._SubninjaNameForArch(arch))
pch = None
if self.flavor == 'win':
gyp.msvs_emulation.VerifyMissingSources(
sources, self.abs_build_dir, generator_flags, self.GypPathToNinja)
pch = gyp.msvs_emulation.PrecompiledHeader(
self.msvs_settings, config_name, self.GypPathToNinja,
self.GypPathToUniqueOutput, self.obj_ext)
else:
pch = gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, self.GypPathToNinja,
lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang))
link_deps = self.WriteSources(
self.ninja, config_name, config, sources, compile_depends_stamp, pch,
spec)
# Some actions/rules output 'sources' that are already object files.
obj_outputs = [f for f in sources if f.endswith(self.obj_ext)]
if obj_outputs:
if self.flavor != 'mac' or len(self.archs) == 1:
link_deps += [self.GypPathToNinja(o) for o in obj_outputs]
else:
print "Warning: Actions/rules writing object files don't work with " \
"multiarch targets, dropping. (target %s)" % spec['target_name']
elif self.flavor == 'mac' and len(self.archs) > 1:
link_deps = collections.defaultdict(list)
compile_deps = self.target.actions_stamp or actions_depends
if self.flavor == 'win' and self.target.type == 'static_library':
self.target.component_objs = link_deps
self.target.compile_deps = compile_deps
# Write out a link step, if needed.
output = None
is_empty_bundle = not link_deps and not mac_bundle_depends
if link_deps or self.target.actions_stamp or actions_depends:
output = self.WriteTarget(spec, config_name, config, link_deps,
compile_deps)
if self.is_mac_bundle:
mac_bundle_depends.append(output)
# Bundle all of the above together, if needed.
if self.is_mac_bundle:
output = self.WriteMacBundle(spec, mac_bundle_depends, is_empty_bundle)
if not output:
return None
assert self.target.FinalOutput(), output
return self.target
def _WinIdlRule(self, source, prebuild, outputs):
"""Handle the implicit VS .idl rule for one source file. Fills |outputs|
with files that are generated."""
outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData(
source, self.config_name)
outdir = self.GypPathToNinja(outdir)
def fix_path(path, rel=None):
path = os.path.join(outdir, path)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
path = self.ExpandRuleVariables(
path, root, dirname, source, ext, basename)
if rel:
path = os.path.relpath(path, rel)
return path
vars = [(name, fix_path(value, outdir)) for name, value in vars]
output = [fix_path(p) for p in output]
vars.append(('outdir', outdir))
vars.append(('idlflags', flags))
input = self.GypPathToNinja(source)
self.ninja.build(output, 'idl', input,
variables=vars, order_only=prebuild)
outputs.extend(output)
def WriteWinIdlFiles(self, spec, prebuild):
"""Writes rules to match MSVS's implicit idl handling."""
assert self.flavor == 'win'
if self.msvs_settings.HasExplicitIdlRulesOrActions(spec):
return []
outputs = []
for source in filter(lambda x: x.endswith('.idl'), spec['sources']):
self._WinIdlRule(source, prebuild, outputs)
return outputs
def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
mac_bundle_depends):
"""Write out the Actions, Rules, and Copies steps. Return a path
representing the outputs of these steps."""
outputs = []
if self.is_mac_bundle:
mac_bundle_resources = spec.get('mac_bundle_resources', [])[:]
else:
mac_bundle_resources = []
extra_mac_bundle_resources = []
if 'actions' in spec:
outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'rules' in spec:
outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
mac_bundle_resources,
extra_mac_bundle_resources)
if 'copies' in spec:
outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends)
if 'sources' in spec and self.flavor == 'win':
outputs += self.WriteWinIdlFiles(spec, prebuild)
stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
if self.is_mac_bundle:
xcassets = self.WriteMacBundleResources(
extra_mac_bundle_resources + mac_bundle_resources, mac_bundle_depends)
partial_info_plist = self.WriteMacXCassets(xcassets, mac_bundle_depends)
self.WriteMacInfoPlist(partial_info_plist, mac_bundle_depends)
return stamp
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild,
extra_mac_bundle_resources):
# Actions cd into the base directory.
env = self.GetToolchainEnv()
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = '%s_%s' % (action['action_name'], self.hash_for_rules)
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action)
if self.flavor == 'win' else False)
args = action['action']
depfile = action.get('depfile', None)
if depfile:
depfile = self.ExpandSpecial(depfile, self.base_to_build)
pool = 'console' if int(action.get('ninja_use_console', 0)) else None
rule_name, _ = self.WriteNewNinjaRule(name, args, description,
is_cygwin, env, pool,
depfile=depfile)
inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += action['outputs']
outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild,
mac_bundle_resources, extra_mac_bundle_resources):
env = self.GetToolchainEnv()
all_outputs = []
for rule in rules:
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
# First write out a rule for the rule action.
name = '%s_%s' % (rule['rule_name'], self.hash_for_rules)
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule)
if self.flavor == 'win' else False)
pool = 'console' if int(rule.get('ninja_use_console', 0)) else None
rule_name, args = self.WriteNewNinjaRule(
name, args, description, is_cygwin, env, pool)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'dirname', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if '${%s}' % var in argument:
needed_variables.add(var)
def cygwin_munge(path):
# pylint: disable=cell-var-from-loop
if is_cygwin:
return path.replace('\\', '/')
return path
inputs = [self.GypPathToNinja(i, env) for i in rule.get('inputs', [])]
# If there are n source files matching the rule, and m additional rule
# inputs, then adding 'inputs' to each build edge written below will
# write m * n inputs. Collapsing reduces this to m + n.
sources = rule.get('rule_sources', [])
num_inputs = len(inputs)
if prebuild:
num_inputs += 1
if num_inputs > 2 and len(sources) > 2:
inputs = [self.WriteCollapsedDependencies(
rule['rule_name'], inputs, order_only=prebuild)]
prebuild = []
# For each source file, write an edge that generates all the outputs.
for source in sources:
source = os.path.normpath(source)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
# Gather the list of inputs and outputs, expanding $vars if possible.
outputs = [self.ExpandRuleVariables(o, root, dirname,
source, ext, basename)
for o in rule['outputs']]
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
was_mac_bundle_resource = source in mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Note: This is n_resources * n_outputs_in_rule. Put to-be-removed
# items in a set and remove them all in a single pass if this becomes
# a performance issue.
if was_mac_bundle_resource:
mac_bundle_resources.remove(source)
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', cygwin_munge(root)))
elif var == 'dirname':
# '$dirname' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build)
extra_bindings.append(('dirname', cygwin_munge(dirname_expanded)))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', cygwin_munge(source_expanded)))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', cygwin_munge(basename)))
else:
assert var == None, repr(var)
outputs = [self.GypPathToNinja(o, env) for o in outputs]
if self.flavor == 'win':
# WriteNewNinjaRule uses unique_name for creating an rsp file on win.
extra_bindings.append(('unique_name',
hashlib.md5(outputs[0]).hexdigest()))
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild, mac_bundle_depends):
outputs = []
env = self.GetToolchainEnv()
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path, env)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
env)
outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
if self.is_mac_bundle:
# gyp has mac_bundle_resources to copy things into a bundle's
# Resources folder, but there's no built-in way to copy files to other
# places in the bundle. Hence, some targets use copies for this. Check
# if this file is copied into the current bundle, and if so add it to
# the bundle depends so that dependent targets get rebuilt if the copy
# input changes.
if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()):
mac_bundle_depends.append(dst)
return outputs
def WriteMacBundleResources(self, resources, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources'."""
xcassets = []
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, map(self.GypPathToNinja, resources)):
output = self.ExpandSpecial(output)
if os.path.splitext(output)[-1] != '.xcassets':
isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
self.ninja.build(output, 'mac_tool', res,
variables=[('mactool_cmd', 'copy-bundle-resource'), \
('binary', isBinary)])
bundle_depends.append(output)
else:
xcassets.append(res)
return xcassets
def WriteMacXCassets(self, xcassets, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources' .xcassets files.
This add an invocation of 'actool' via the 'mac_tool.py' helper script.
It assumes that the assets catalogs define at least one imageset and
thus an Assets.car file will be generated in the application resources
directory. If this is not the case, then the build will probably be done
at each invocation of ninja."""
if not xcassets:
return
extra_arguments = {}
settings_to_arg = {
'XCASSETS_APP_ICON': 'app-icon',
'XCASSETS_LAUNCH_IMAGE': 'launch-image',
}
settings = self.xcode_settings.xcode_settings[self.config_name]
for settings_key, arg_name in settings_to_arg.iteritems():
value = settings.get(settings_key)
if value:
extra_arguments[arg_name] = value
partial_info_plist = None
if extra_arguments:
partial_info_plist = self.GypPathToUniqueOutput(
'assetcatalog_generated_info.plist')
extra_arguments['output-partial-info-plist'] = partial_info_plist
outputs = []
outputs.append(
os.path.join(
self.xcode_settings.GetBundleResourceFolder(),
'Assets.car'))
if partial_info_plist:
outputs.append(partial_info_plist)
keys = QuoteShellArgument(json.dumps(extra_arguments), self.flavor)
extra_env = self.xcode_settings.GetPerTargetSettings()
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
bundle_depends.extend(self.ninja.build(
outputs, 'compile_xcassets', xcassets,
variables=[('env', env), ('keys', keys)]))
return partial_info_plist
def WriteMacInfoPlist(self, partial_info_plist, bundle_depends):
"""Write build rules for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, self.GypPathToNinja)
if not info_plist:
return
out = self.ExpandSpecial(out)
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = self.GypPathToUniqueOutput(
os.path.basename(info_plist))
defines = ' '.join([Define(d, self.flavor) for d in defines])
info_plist = self.ninja.build(
intermediate_plist, 'preprocess_infoplist', info_plist,
variables=[('defines',defines)])
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
if partial_info_plist:
intermediate_plist = self.GypPathToUniqueOutput('merged_info.plist')
info_plist = self.ninja.build(
intermediate_plist, 'merge_infoplist',
[partial_info_plist, info_plist])
keys = self.xcode_settings.GetExtraPlistItems(self.config_name)
keys = QuoteShellArgument(json.dumps(keys), self.flavor)
isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
self.ninja.build(out, 'copy_infoplist', info_plist,
variables=[('env', env), ('keys', keys),
('binary', isBinary)])
bundle_depends.append(out)
def WriteSources(self, ninja_file, config_name, config, sources, predepends,
precompiled_header, spec):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'host':
self.ninja.variable('ar', '$ar_host')
self.ninja.variable('cc', '$cc_host')
self.ninja.variable('cxx', '$cxx_host')
self.ninja.variable('ld', '$ld_host')
self.ninja.variable('ldxx', '$ldxx_host')
self.ninja.variable('nm', '$nm_host')
self.ninja.variable('readelf', '$readelf_host')
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteSourcesForArch(
self.ninja, config_name, config, sources, predepends,
precompiled_header, spec)
else:
return dict((arch, self.WriteSourcesForArch(
self.arch_subninjas[arch], config_name, config, sources, predepends,
precompiled_header, spec, arch=arch))
for arch in self.archs)
def WriteSourcesForArch(self, ninja_file, config_name, config, sources,
predepends, precompiled_header, spec, arch=None):
"""Write build rules to compile all of |sources|."""
extra_defines = []
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name, arch=arch)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
elif self.flavor == 'win':
asmflags = self.msvs_settings.GetAsmflags(config_name)
cflags = self.msvs_settings.GetCflags(config_name)
cflags_c = self.msvs_settings.GetCflagsC(config_name)
cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
extra_defines = self.msvs_settings.GetComputedDefines(config_name)
# See comment at cc_command for why there's two .pdb files.
pdbpath_c = pdbpath_cc = self.msvs_settings.GetCompilerPdbName(
config_name, self.ExpandSpecial)
if not pdbpath_c:
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
pdbpath = os.path.normpath(os.path.join(obj, self.base_dir, self.name))
pdbpath_c = pdbpath + '.c.pdb'
pdbpath_cc = pdbpath + '.cc.pdb'
self.WriteVariableList(ninja_file, 'pdbname_c', [pdbpath_c])
self.WriteVariableList(ninja_file, 'pdbname_cc', [pdbpath_cc])
self.WriteVariableList(ninja_file, 'pchprefix', [self.name])
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
# Respect environment variables related to build, but target-specific
# flags can still override them.
if self.toolset == 'target':
cflags_c = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CFLAGS', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CXXFLAGS', '').split() + cflags_cc)
elif self.toolset == 'host':
cflags_c = (os.environ.get('CPPFLAGS_host', '').split() +
os.environ.get('CFLAGS_host', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS_host', '').split() +
os.environ.get('CXXFLAGS_host', '').split() + cflags_cc)
defines = config.get('defines', []) + extra_defines
self.WriteVariableList(ninja_file, 'defines',
[Define(d, self.flavor) for d in defines])
if self.flavor == 'win':
self.WriteVariableList(ninja_file, 'asmflags',
map(self.ExpandSpecial, asmflags))
self.WriteVariableList(ninja_file, 'rcflags',
[QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
for f in self.msvs_settings.GetRcflags(config_name,
self.GypPathToNinja)])
include_dirs = config.get('include_dirs', [])
env = self.GetToolchainEnv()
if self.flavor == 'win':
include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
config_name)
self.WriteVariableList(ninja_file, 'includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in include_dirs])
if self.flavor == 'win':
midl_include_dirs = config.get('midl_include_dirs', [])
midl_include_dirs = self.msvs_settings.AdjustMidlIncludeDirs(
midl_include_dirs, config_name)
self.WriteVariableList(ninja_file, 'midl_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in midl_include_dirs])
pch_commands = precompiled_header.GetPchBuildCommands(arch)
if self.flavor == 'mac':
# Most targets use no precompiled headers, so only write these if needed.
for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'),
('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]:
include = precompiled_header.GetInclude(ext, arch)
if include: ninja_file.variable(var, include)
arflags = config.get('arflags', [])
self.WriteVariableList(ninja_file, 'cflags',
map(self.ExpandSpecial, cflags))
self.WriteVariableList(ninja_file, 'cflags_c',
map(self.ExpandSpecial, cflags_c))
self.WriteVariableList(ninja_file, 'cflags_cc',
map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList(ninja_file, 'cflags_objc',
map(self.ExpandSpecial, cflags_objc))
self.WriteVariableList(ninja_file, 'cflags_objcc',
map(self.ExpandSpecial, cflags_objcc))
self.WriteVariableList(ninja_file, 'arflags',
map(self.ExpandSpecial, arflags))
ninja_file.newline()
outputs = []
has_rc_source = False
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
obj_ext = self.obj_ext
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
self.uses_cpp = True
elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
command = 'cc'
elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
command = 'cc_s'
elif (self.flavor == 'win' and ext == 'asm' and
not self.msvs_settings.HasExplicitAsmRules(spec)):
command = 'asm'
# Add the _asm suffix as msvs is capable of handling .cc and
# .asm files of the same name without collision.
obj_ext = '_asm.obj'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
self.uses_cpp = True
elif self.flavor == 'win' and ext == 'rc':
command = 'rc'
obj_ext = '.res'
has_rc_source = True
else:
# Ignore unhandled extensions.
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + obj_ext)
if arch is not None:
output = AddArch(output, arch)
implicit = precompiled_header.GetObjDependencies([input], [output], arch)
variables = []
if self.flavor == 'win':
variables, output, implicit = precompiled_header.GetFlagsModifications(
input, output, implicit, command, cflags_c, cflags_cc,
self.ExpandSpecial)
ninja_file.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends, variables=variables)
outputs.append(output)
if has_rc_source:
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
self.WriteVariableList(ninja_file, 'resource_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in resource_include_dirs])
self.WritePchTargets(ninja_file, pch_commands)
ninja_file.newline()
return outputs
def WritePchTargets(self, ninja_file, pch_commands):
"""Writes ninja rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
var_name = {
'c': 'cflags_pch_c',
'cc': 'cflags_pch_cc',
'm': 'cflags_pch_objc',
'mm': 'cflags_pch_objcc',
}[lang]
map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }
cmd = map.get(lang)
ninja_file.build(gch, cmd, input, variables=[(var_name, lang_flag)])
def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteLinkForArch(
self.ninja, spec, config_name, config, link_deps)
else:
output = self.ComputeOutput(spec)
inputs = [self.WriteLinkForArch(self.arch_subninjas[arch], spec,
config_name, config, link_deps[arch],
arch=arch)
for arch in self.archs]
extra_bindings = []
build_output = output
if not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
# TODO(yyanagisawa): more work needed to fix:
# https://code.google.com/p/gyp/issues/detail?id=411
if (spec['type'] in ('shared_library', 'loadable_module') and
not self.is_mac_bundle):
extra_bindings.append(('lib', output))
self.ninja.build([output, output + '.TOC'], 'solipo', inputs,
variables=extra_bindings)
else:
self.ninja.build(build_output, 'lipo', inputs, variables=extra_bindings)
return output
def WriteLinkForArch(self, ninja_file, spec, config_name, config,
link_deps, arch=None):
"""Write out a link step. Fills out target.binary. """
command = {
'executable': 'link',
'loadable_module': 'solink_module',
'shared_library': 'solink',
}[spec['type']]
command_suffix = ''
implicit_deps = set()
solibs = set()
order_deps = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
extra_link_deps = set()
for dep in spec['dependencies']:
target = self.target_outputs.get(dep)
if not target:
continue
linkable = target.Linkable()
if linkable:
new_deps = []
if (self.flavor == 'win' and
target.component_objs and
self.msvs_settings.IsUseLibraryDependencyInputs(config_name)):
new_deps = target.component_objs
if target.compile_deps:
order_deps.add(target.compile_deps)
elif self.flavor == 'win' and target.import_lib:
new_deps = [target.import_lib]
elif target.UsesToc(self.flavor):
solibs.add(target.binary)
implicit_deps.add(target.binary + '.TOC')
else:
new_deps = [target.binary]
for new_dep in new_deps:
if new_dep not in extra_link_deps:
extra_link_deps.add(new_dep)
link_deps.append(new_dep)
final_output = target.FinalOutput()
if not linkable or final_output != target.binary:
implicit_deps.add(final_output)
extra_bindings = []
if self.uses_cpp and self.flavor != 'win':
extra_bindings.append(('ld', '$ldxx'))
output = self.ComputeOutput(spec, arch)
if arch is None and not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
is_executable = spec['type'] == 'executable'
# The ldflags config key is not used on mac or win. On those platforms
# linker flags are set via xcode_settings and msvs_settings, respectively.
env_ldflags = os.environ.get('LDFLAGS', '').split()
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(config_name,
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.GypPathToNinja, arch)
ldflags = env_ldflags + ldflags
elif self.flavor == 'win':
manifest_base_name = self.GypPathToUniqueOutput(
self.ComputeOutputFileName(spec))
ldflags, intermediate_manifest, manifest_files = \
self.msvs_settings.GetLdflags(config_name, self.GypPathToNinja,
self.ExpandSpecial, manifest_base_name,
output, is_executable,
self.toplevel_build)
ldflags = env_ldflags + ldflags
self.WriteVariableList(ninja_file, 'manifests', manifest_files)
implicit_deps = implicit_deps.union(manifest_files)
if intermediate_manifest:
self.WriteVariableList(
ninja_file, 'intermediatemanifest', [intermediate_manifest])
command_suffix = _GetWinLinkRuleNameSuffix(
self.msvs_settings.IsEmbedManifest(config_name))
def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja)
if def_file:
implicit_deps.add(def_file)
else:
# Respect environment variables related to build, but target-specific
# flags can still override them.
ldflags = env_ldflags + config.get('ldflags', [])
if is_executable and len(solibs):
rpath = 'lib/'
if self.toolset != 'target':
rpath += self.toolset
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/%s' % rpath)
ldflags.append('-Wl,-rpath-link=%s' % rpath)
self.WriteVariableList(ninja_file, 'ldflags',
map(self.ExpandSpecial, ldflags))
library_dirs = config.get('library_dirs', [])
if self.flavor == 'win':
library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name)
for l in library_dirs]
library_dirs = ['/LIBPATH:' + QuoteShellArgument(self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
else:
library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
libraries = gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', [])))
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries, config_name)
elif self.flavor == 'win':
libraries = self.msvs_settings.AdjustLibraries(libraries)
self.WriteVariableList(ninja_file, 'libs', library_dirs + libraries)
linked_binary = output
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
extra_bindings.append(('lib',
gyp.common.EncodePOSIXShellArgument(output)))
if self.flavor != 'win':
link_file_list = output
if self.is_mac_bundle:
# 'Dependency Framework.framework/Versions/A/Dependency Framework' ->
# 'Dependency Framework.framework.rsp'
link_file_list = self.xcode_settings.GetWrapperName()
if arch:
link_file_list += '.' + arch
link_file_list += '.rsp'
# If an rspfile contains spaces, ninja surrounds the filename with
# quotes around it and then passes it to open(), creating a file with
# quotes in its name (and when looking for the rsp file, the name
# makes it through bash which strips the quotes) :-/
link_file_list = link_file_list.replace(' ', '_')
extra_bindings.append(
('link_file_list',
gyp.common.EncodePOSIXShellArgument(link_file_list)))
if self.flavor == 'win':
extra_bindings.append(('binary', output))
if ('/NOENTRY' not in ldflags and
not self.msvs_settings.GetNoImportLibrary(config_name)):
self.target.import_lib = output + '.lib'
extra_bindings.append(('implibflag',
'/IMPLIB:%s' % self.target.import_lib))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
output = [output, self.target.import_lib]
if pdbname:
output.append(pdbname)
elif not self.is_mac_bundle:
output = [output, output + '.TOC']
else:
command = command + '_notoc'
elif self.flavor == 'win':
extra_bindings.append(('binary', output))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
if pdbname:
output = [output, pdbname]
if len(solibs):
extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs)))
ninja_file.build(output, command + command_suffix, link_deps,
implicit=list(implicit_deps),
order_only=list(order_deps),
variables=extra_bindings)
return linked_binary
def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
extra_link_deps = any(self.target_outputs.get(dep).Linkable()
for dep in spec.get('dependencies', [])
if dep in self.target_outputs)
if spec['type'] == 'none' or (not link_deps and not extra_link_deps):
# TODO(evan): don't call this function for 'none' target types, as
# it doesn't do anything, and we fake out a 'binary' with a stamp file.
self.target.binary = compile_deps
self.target.type = 'none'
elif spec['type'] == 'static_library':
self.target.binary = self.ComputeOutput(spec)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.ninja.build(self.target.binary, 'alink_thin', link_deps,
order_only=compile_deps)
else:
variables = []
if self.xcode_settings:
libtool_flags = self.xcode_settings.GetLibtoolflags(config_name)
if libtool_flags:
variables.append(('libtool_flags', libtool_flags))
if self.msvs_settings:
libflags = self.msvs_settings.GetLibFlags(config_name,
self.GypPathToNinja)
variables.append(('libflags', libflags))
if self.flavor != 'mac' or len(self.archs) == 1:
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', link_deps,
order_only=compile_deps, variables=variables)
else:
inputs = []
for arch in self.archs:
output = self.ComputeOutput(spec, arch)
self.arch_subninjas[arch].build(output, 'alink', link_deps[arch],
order_only=compile_deps,
variables=variables)
inputs.append(output)
# TODO: It's not clear if libtool_flags should be passed to the alink
# call that combines single-arch .a files into a fat .a file.
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', inputs,
# FIXME: test proving order_only=compile_deps isn't
# needed.
variables=variables)
else:
self.target.binary = self.WriteLink(spec, config_name, config, link_deps)
return self.target.binary
def WriteMacBundle(self, spec, mac_bundle_depends, is_empty):
assert self.is_mac_bundle
package_framework = spec['type'] in ('shared_library', 'loadable_module')
output = self.ComputeMacBundleOutput()
if is_empty:
output += '.stamp'
variables = []
self.AppendPostbuildVariable(variables, spec, output, self.target.binary,
is_command_start=not package_framework)
if package_framework and not is_empty:
variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
self.ninja.build(output, 'package_framework', mac_bundle_depends,
variables=variables)
else:
self.ninja.build(output, 'stamp', mac_bundle_depends,
variables=variables)
self.target.bundle = output
return output
def GetToolchainEnv(self, additional_settings=None):
"""Returns the variables toolchain would set for build steps."""
env = self.GetSortedXcodeEnv(additional_settings=additional_settings)
if self.flavor == 'win':
env = self.GetMsvsToolchainEnv(
additional_settings=additional_settings)
return env
def GetMsvsToolchainEnv(self, additional_settings=None):
"""Returns the variables Visual Studio would set for build steps."""
return self.msvs_settings.GetVSMacroEnv('$!PRODUCT_DIR',
config=self.config_name)
def GetSortedXcodeEnv(self, additional_settings=None):
"""Returns the variables Xcode would set for build steps."""
assert self.abs_build_dir
abs_build_dir = self.abs_build_dir
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, abs_build_dir,
os.path.join(abs_build_dir, self.build_to_base), self.config_name,
additional_settings)
def GetSortedXcodePostbuildEnv(self):
"""Returns the variables Xcode would set for postbuild steps."""
postbuild_settings = {}
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE')
if strip_save_file:
postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file
return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
def AppendPostbuildVariable(self, variables, spec, output, binary,
is_command_start=False):
"""Adds a 'postbuild' variable if there is a postbuild for |output|."""
postbuild = self.GetPostbuildCommand(spec, output, binary, is_command_start)
if postbuild:
variables.append(('postbuilds', postbuild))
def GetPostbuildCommand(self, spec, output, output_binary, is_command_start):
"""Returns a shell command that runs all the postbuilds, and removes
|output| if any of them fails. If |is_command_start| is False, then the
returned string will start with ' && '."""
if not self.xcode_settings or spec['type'] == 'none' or not output:
return ''
output = QuoteShellArgument(output, self.flavor)
postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True)
if output_binary is not None:
postbuilds = self.xcode_settings.AddImplicitPostbuilds(
self.config_name,
os.path.normpath(os.path.join(self.base_to_build, output)),
QuoteShellArgument(
os.path.normpath(os.path.join(self.base_to_build, output_binary)),
self.flavor),
postbuilds, quiet=True)
if not postbuilds:
return ''
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(
['cd', self.build_to_base]))
env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv())
# G will be non-null if any postbuild fails. Run all postbuilds in a
# subshell.
commands = env + ' (' + \
' && '.join([ninja_syntax.escape(command) for command in postbuilds])
command_string = (commands + '); G=$$?; '
# Remove the final output if any postbuild failed.
'((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)')
if is_command_start:
return '(' + command_string + ' && '
else:
return '$ && (' + command_string
def ComputeExportEnvString(self, env):
"""Given an environment, returns a string looking like
'export FOO=foo; export BAR="${FOO} bar;'
that exports |env| to the shell."""
export_str = []
for k, v in env:
export_str.append('export %s=%s;' %
(k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v))))
return ' '.join(export_str)
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return self.ExpandSpecial(
os.path.join(path, self.xcode_settings.GetWrapperName()))
def ComputeOutputFileName(self, spec, type=None):
"""Compute the filename of the final output for the current target."""
if not type:
type = spec['type']
default_variables = copy.copy(generator_default_variables)
CalculateVariables(default_variables, {'flavor': self.flavor})
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': default_variables['SHARED_LIB_PREFIX'],
'shared_library': default_variables['SHARED_LIB_PREFIX'],
'static_library': default_variables['STATIC_LIB_PREFIX'],
'executable': default_variables['EXECUTABLE_PREFIX'],
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
'shared_library': default_variables['SHARED_LIB_SUFFIX'],
'static_library': default_variables['STATIC_LIB_SUFFIX'],
'executable': default_variables['EXECUTABLE_SUFFIX'],
}
extension = spec.get('product_extension')
if extension:
extension = '.' + extension
else:
extension = DEFAULT_EXTENSION.get(type, '')
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if type in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif type == 'none':
return '%s.stamp' % target
else:
raise Exception('Unhandled output type %s' % type)
def ComputeOutput(self, spec, arch=None):
"""Compute the path for the final output of the spec."""
type = spec['type']
if self.flavor == 'win':
override = self.msvs_settings.GetOutputName(self.config_name,
self.ExpandSpecial)
if override:
return override
if arch is None and self.flavor == 'mac' and type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
filename = self.xcode_settings.GetExecutablePath()
else:
filename = self.ComputeOutputFileName(spec, type)
if arch is None and 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Some products go into the output root, libraries go into shared library
# dir, and everything else goes into the normal place.
type_in_output_root = ['executable', 'loadable_module']
if self.flavor == 'mac' and self.toolset == 'target':
type_in_output_root += ['shared_library', 'static_library']
elif self.flavor == 'win' and self.toolset == 'target':
type_in_output_root += ['shared_library']
if arch is not None:
# Make sure partial executables don't end up in a bundle or the regular
# output directory.
archdir = 'arch'
if self.toolset != 'target':
archdir = os.path.join('arch', '%s' % self.toolset)
return os.path.join(archdir, AddArch(filename, arch))
elif type in type_in_output_root or self.is_standalone_static_library:
return filename
elif type == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = os.path.join('lib', '%s' % self.toolset)
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, ninja_file, var, values):
assert not isinstance(values, str)
if values is None:
values = []
ninja_file.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description, is_cygwin, env, pool,
depfile=None):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded."""
if self.flavor == 'win':
args = [self.msvs_settings.ConvertVSMacros(
arg, self.base_to_build, config=self.config_name)
for arg in args]
description = self.msvs_settings.ConvertVSMacros(
description, config=self.config_name)
elif self.flavor == 'mac':
# |env| is an empty list on non-mac.
args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
description = gyp.xcode_emulation.ExpandEnvVars(description, env)
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
# Remove variable references, but not if they refer to the magic rule
# variables. This is not quite right, as it also protects these for
# actions, not just for rules where they are valid. Good enough.
protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
description = re.sub(protect + r'\$', '_', description)
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
rspfile = None
rspfile_content = None
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
if self.flavor == 'win':
rspfile = rule_name + '.$unique_name.rsp'
# The cygwin case handles this inside the bash sub-shell.
run_in = '' if is_cygwin else ' ' + self.build_to_base
if is_cygwin:
rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
args, self.build_to_base)
else:
rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
rspfile + run_in)
else:
env = self.ComputeExportEnvString(env)
command = gyp.common.EncodePOSIXShellList(args)
command = 'cd %s; ' % self.build_to_base + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, depfile=depfile,
restat=True, pool=pool,
rspfile=rspfile, rspfile_content=rspfile_content)
self.ninja.newline()
return rule_name, args
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
global generator_additional_non_configuration_keys
global generator_additional_path_sections
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Ninja generator.
import gyp.generator.xcode as xcode_generator
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
elif flavor == 'win':
exts = gyp.MSVSUtil.TARGET_TYPE_EXT
default_variables.setdefault('OS', 'win')
default_variables['EXECUTABLE_SUFFIX'] = '.' + exts['executable']
default_variables['STATIC_LIB_PREFIX'] = ''
default_variables['STATIC_LIB_SUFFIX'] = '.' + exts['static_library']
default_variables['SHARED_LIB_PREFIX'] = ''
default_variables['SHARED_LIB_SUFFIX'] = '.' + exts['shared_library']
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'lib'))
default_variables.setdefault('LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'obj'))
def ComputeOutputDir(params):
"""Returns the path from the toplevel_dir to the build output directory."""
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to ninja easier, ninja doesn't put anything here.
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
# Relative path from source root to our output files. e.g. "out"
return os.path.normpath(os.path.join(generator_dir, output_dir))
def CalculateGeneratorInputInfo(params):
"""Called by __init__ to initialize generator values based on params."""
# E.g. "out/gypfiles"
toplevel = params['options'].toplevel_dir
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, ComputeOutputDir(params), 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
gyp.common.EnsureDirExists(path)
return open(path, mode)
def CommandWithWrapper(cmd, wrappers, prog):
wrapper = wrappers.get(cmd, '')
if wrapper:
return wrapper + ' ' + prog
return prog
def GetDefaultConcurrentLinks():
"""Returns a best-guess for a number of concurrent links."""
pool_size = int(os.environ.get('GYP_LINK_CONCURRENCY', 0))
if pool_size:
return pool_size
if sys.platform in ('win32', 'cygwin'):
import ctypes
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
stat = MEMORYSTATUSEX()
stat.dwLength = ctypes.sizeof(stat)
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
# VS 2015 uses 20% more working set than VS 2013 and can consume all RAM
# on a 64 GB machine.
mem_limit = max(1, stat.ullTotalPhys / (5 * (2 ** 30))) # total / 5GB
hard_cap = max(1, int(os.environ.get('GYP_LINK_CONCURRENCY_MAX', 2**32)))
return min(mem_limit, hard_cap)
elif sys.platform.startswith('linux'):
if os.path.exists("/proc/meminfo"):
with open("/proc/meminfo") as meminfo:
memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB')
for line in meminfo:
match = memtotal_re.match(line)
if not match:
continue
# Allow 8Gb per link on Linux because Gold is quite memory hungry
return max(1, int(match.group(1)) / (8 * (2 ** 20)))
return 1
elif sys.platform == 'darwin':
try:
avail_bytes = int(subprocess.check_output(['sysctl', '-n', 'hw.memsize']))
# A static library debug build of Chromium's unit_tests takes ~2.7GB, so
# 4GB per ld process allows for some more bloat.
return max(1, avail_bytes / (4 * (2 ** 30))) # total / 4GB
except:
return 1
else:
# TODO(scottmg): Implement this for other platforms.
return 1
def _GetWinLinkRuleNameSuffix(embed_manifest):
"""Returns the suffix used to select an appropriate linking rule depending on
whether the manifest embedding is enabled."""
return '_embed' if embed_manifest else ''
def _AddWinLinkRules(master_ninja, embed_manifest):
"""Adds link rules for Windows platform to |master_ninja|."""
def FullLinkCommand(ldcmd, out, binary_type):
resource_name = {
'exe': '1',
'dll': '2',
}[binary_type]
return '%(python)s gyp-win-tool link-with-manifests $arch %(embed)s ' \
'%(out)s "%(ldcmd)s" %(resname)s $mt $rc "$intermediatemanifest" ' \
'$manifests' % {
'python': sys.executable,
'out': out,
'ldcmd': ldcmd,
'resname': resource_name,
'embed': embed_manifest }
rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest)
use_separate_mspdbsrv = (
int(os.environ.get('GYP_USE_SEPARATE_MSPDBSRV', '0')) != 0)
dlldesc = 'LINK%s(DLL) $binary' % rule_name_suffix.upper()
dllcmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo $implibflag /DLL /OUT:$binary '
'@$binary.rsp' % (sys.executable, use_separate_mspdbsrv))
dllcmd = FullLinkCommand(dllcmd, '$binary', 'dll')
master_ninja.rule('solink' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
master_ninja.rule('solink_module' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
# Note that ldflags goes at the end so that it has the option of
# overriding default settings earlier in the command line.
exe_cmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo /OUT:$binary @$binary.rsp' %
(sys.executable, use_separate_mspdbsrv))
exe_cmd = FullLinkCommand(exe_cmd, '$binary', 'exe')
master_ninja.rule('link' + rule_name_suffix,
description='LINK%s $binary' % rule_name_suffix.upper(),
command=exe_cmd,
rspfile='$binary.rsp',
rspfile_content='$in_newline $libs $ldflags',
pool='link_pool')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(
os.path.join(ComputeOutputDir(params), config_name))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
master_ninja_file = OpenOutput(os.path.join(toplevel_build, 'build.ninja'))
master_ninja = ninja_syntax.Writer(master_ninja_file, width=120)
# Put build-time support tools in out/{config_name}.
gyp.common.CopyTool(flavor, toplevel_build)
# Grab make settings for CC/CXX.
# The rules are
# - The priority from low to high is gcc/g++, the 'make_global_settings' in
# gyp, the environment variable.
# - If there is no 'make_global_settings' for CC.host/CXX.host or
# 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set
# to cc/cxx.
if flavor == 'win':
ar = 'lib.exe'
# cc and cxx must be set to the correct architecture by overriding with one
# of cl_x86 or cl_x64 below.
cc = 'UNSET'
cxx = 'UNSET'
ld = 'link.exe'
ld_host = '$ld'
else:
ar = 'ar'
cc = 'cc'
cxx = 'c++'
ld = '$cc'
ldxx = '$cxx'
ld_host = '$cc_host'
ldxx_host = '$cxx_host'
ar_host = 'ar'
cc_host = None
cxx_host = None
cc_host_global_setting = None
cxx_host_global_setting = None
clang_cl = None
nm = 'nm'
nm_host = 'nm'
readelf = 'readelf'
readelf_host = 'readelf'
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings = data[build_file].get('make_global_settings', [])
build_to_root = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
wrappers = {}
for key, value in make_global_settings:
if key == 'AR':
ar = os.path.join(build_to_root, value)
if key == 'AR.host':
ar_host = os.path.join(build_to_root, value)
if key == 'CC':
cc = os.path.join(build_to_root, value)
if cc.endswith('clang-cl'):
clang_cl = cc
if key == 'CXX':
cxx = os.path.join(build_to_root, value)
if key == 'CC.host':
cc_host = os.path.join(build_to_root, value)
cc_host_global_setting = value
if key == 'CXX.host':
cxx_host = os.path.join(build_to_root, value)
cxx_host_global_setting = value
if key == 'LD':
ld = os.path.join(build_to_root, value)
if key == 'LD.host':
ld_host = os.path.join(build_to_root, value)
if key == 'NM':
nm = os.path.join(build_to_root, value)
if key == 'NM.host':
nm_host = os.path.join(build_to_root, value)
if key == 'READELF':
readelf = os.path.join(build_to_root, value)
if key == 'READELF.host':
readelf_host = os.path.join(build_to_root, value)
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value)
# Support wrappers from environment variables too.
for key, value in os.environ.iteritems():
if key.lower().endswith('_wrapper'):
key_prefix = key[:-len('_wrapper')]
key_prefix = re.sub(r'\.HOST$', '.host', key_prefix)
wrappers[key_prefix] = os.path.join(build_to_root, value)
if flavor == 'win':
configs = [target_dicts[qualified_target]['configurations'][config_name]
for qualified_target in target_list]
shared_system_includes = None
if not generator_flags.get('ninja_use_custom_environment_files', 0):
shared_system_includes = \
gyp.msvs_emulation.ExtractSharedMSVSSystemIncludes(
configs, generator_flags)
cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles(
toplevel_build, generator_flags, shared_system_includes, OpenOutput)
for arch, path in cl_paths.iteritems():
if clang_cl:
# If we have selected clang-cl, use that instead.
path = clang_cl
command = CommandWithWrapper('CC', wrappers,
QuoteShellArgument(path, 'win'))
if clang_cl:
# Use clang-cl to cross-compile for x86 or x86_64.
command += (' -m32' if arch == 'x86' else ' -m64')
master_ninja.variable('cl_' + arch, command)
cc = GetEnvironFallback(['CC_target', 'CC'], cc)
master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc))
cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx)
master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx))
if flavor == 'win':
master_ninja.variable('ld', ld)
master_ninja.variable('idl', 'midl.exe')
master_ninja.variable('ar', ar)
master_ninja.variable('rc', 'rc.exe')
master_ninja.variable('ml_x86', 'ml.exe')
master_ninja.variable('ml_x64', 'ml64.exe')
master_ninja.variable('mt', 'mt.exe')
else:
master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld))
master_ninja.variable('ldxx', CommandWithWrapper('LINK', wrappers, ldxx))
master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], ar))
if flavor != 'mac':
# Mac does not use readelf/nm for .TOC generation, so avoiding polluting
# the master ninja with extra unused variables.
master_ninja.variable(
'nm', GetEnvironFallback(['NM_target', 'NM'], nm))
master_ninja.variable(
'readelf', GetEnvironFallback(['READELF_target', 'READELF'], readelf))
if generator_supports_multiple_toolsets:
if not cc_host:
cc_host = cc
if not cxx_host:
cxx_host = cxx
master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], ar_host))
master_ninja.variable('nm_host', GetEnvironFallback(['NM_host'], nm_host))
master_ninja.variable('readelf_host',
GetEnvironFallback(['READELF_host'], readelf_host))
cc_host = GetEnvironFallback(['CC_host'], cc_host)
cxx_host = GetEnvironFallback(['CXX_host'], cxx_host)
# The environment variable could be used in 'make_global_settings', like
# ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here.
if '$(CC)' in cc_host and cc_host_global_setting:
cc_host = cc_host_global_setting.replace('$(CC)', cc)
if '$(CXX)' in cxx_host and cxx_host_global_setting:
cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx)
master_ninja.variable('cc_host',
CommandWithWrapper('CC.host', wrappers, cc_host))
master_ninja.variable('cxx_host',
CommandWithWrapper('CXX.host', wrappers, cxx_host))
if flavor == 'win':
master_ninja.variable('ld_host', ld_host)
else:
master_ninja.variable('ld_host', CommandWithWrapper(
'LINK', wrappers, ld_host))
master_ninja.variable('ldxx_host', CommandWithWrapper(
'LINK', wrappers, ldxx_host))
master_ninja.newline()
master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks())
master_ninja.newline()
deps = 'msvc' if flavor == 'win' else 'gcc'
if flavor != 'win':
master_ninja.rule(
'cc',
description='CC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'cc_s',
description='CC $out',
command=('$cc $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'))
master_ninja.rule(
'cxx',
description='CXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out'),
depfile='$out.d',
deps=deps)
else:
# TODO(scottmg) Separate pdb names is a test to see if it works around
# http://crbug.com/142362. It seems there's a race between the creation of
# the .pdb by the precompiled header step for .cc and the compilation of
# .c files. This should be handled by mspdbsrv, but rarely errors out with
# c1xx : fatal error C1033: cannot open program database
# By making the rules target separate pdb files this might be avoided.
cc_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cc /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_c ')
cxx_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cxx /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_cc ')
master_ninja.rule(
'cc',
description='CC $out',
command=cc_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_c',
deps=deps)
master_ninja.rule(
'cxx',
description='CXX $out',
command=cxx_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_cc',
deps=deps)
master_ninja.rule(
'idl',
description='IDL $in',
command=('%s gyp-win-tool midl-wrapper $arch $outdir '
'$tlb $h $dlldata $iid $proxy $in '
'$midl_includes $idlflags' % sys.executable))
master_ninja.rule(
'rc',
description='RC $in',
# Note: $in must be last otherwise rc.exe complains.
command=('%s gyp-win-tool rc-wrapper '
'$arch $rc $defines $resource_includes $rcflags /fo$out $in' %
sys.executable))
master_ninja.rule(
'asm',
description='ASM $out',
command=('%s gyp-win-tool asm-wrapper '
'$arch $asm $defines $includes $asmflags /c /Fo $out $in' %
sys.executable))
if flavor != 'mac' and flavor != 'win':
master_ninja.rule(
'alink',
description='AR $out',
command='rm -f $out && $ar rcs $arflags $out $in')
master_ninja.rule(
'alink_thin',
description='AR $out',
command='rm -f $out && $ar rcsT $arflags $out $in')
# This allows targets that only need to depend on $lib's API to declare an
# order-only dependency on $lib.TOC and avoid relinking such downstream
# dependencies when $lib changes only in non-public ways.
# The resulting string leaves an uninterpolated %{suffix} which
# is used in the final substitution below.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ]; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then mv $lib.tmp $lib.TOC ; '
'fi; fi'
% { 'solink':
'$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s',
'extract_toc':
('{ $readelf -d $lib | grep SONAME ; '
'$nm -gD -f p $lib | cut -f1-2 -d\' \'; }')})
master_ninja.rule(
'solink',
description='SOLINK $lib',
restat=True,
command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
rspfile='$link_file_list',
rspfile_content=
'-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive $libs',
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib',
restat=True,
command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
rspfile='$link_file_list',
rspfile_content='-Wl,--start-group $in -Wl,--end-group $solibs $libs',
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld $ldflags -o $out '
'-Wl,--start-group $in -Wl,--end-group $solibs $libs'),
pool='link_pool')
elif flavor == 'win':
master_ninja.rule(
'alink',
description='LIB $out',
command=('%s gyp-win-tool link-wrapper $arch False '
'$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' %
sys.executable),
rspfile='$out.rsp',
rspfile_content='$in_newline $libflags')
_AddWinLinkRules(master_ninja, embed_manifest=True)
_AddWinLinkRules(master_ninja, embed_manifest=False)
else:
master_ninja.rule(
'objc',
description='OBJC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'objcxx',
description='OBJCXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
'$cflags_pch_objcc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'alink',
description='LIBTOOL-STATIC $out, POSTBUILDS',
command='rm -f $out && '
'./gyp-mac-tool filter-libtool libtool $libtool_flags '
'-static -o $out $in'
'$postbuilds')
master_ninja.rule(
'lipo',
description='LIPO $out, POSTBUILDS',
command='rm -f $out && lipo -create $in -output $out$postbuilds')
master_ninja.rule(
'solipo',
description='SOLIPO $out, POSTBUILDS',
command=(
'rm -f $lib $lib.TOC && lipo -create $in -output $lib$postbuilds &&'
'%(extract_toc)s > $lib.TOC'
% { 'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'}))
# Record the public interface of $lib in $lib.TOC. See the corresponding
# comment in the posix section above for details.
solink_base = '$ld %(type)s $ldflags -o $lib %(suffix)s'
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ] || '
# Always force dependent targets to relink if this library
# reexports something. Handling this correctly would require
# recursive TOC dumping but this is rare in practice, so punt.
'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; '
'else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then '
'mv $lib.tmp $lib.TOC ; '
'fi; '
'fi'
% { 'solink': solink_base,
'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})
solink_suffix = '@$link_file_list$postbuilds'
master_ninja.rule(
'solink',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-shared'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_notoc',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix':solink_suffix, 'type': '-shared'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-bundle'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_module_notoc',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix': solink_suffix, 'type': '-bundle'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out, POSTBUILDS',
command=('$ld $ldflags -o $out '
'$in $solibs $libs$postbuilds'),
pool='link_pool')
master_ninja.rule(
'preprocess_infoplist',
description='PREPROCESS INFOPLIST $out',
command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
'plutil -convert xml1 $out $out'))
master_ninja.rule(
'copy_infoplist',
description='COPY INFOPLIST $in',
command='$env ./gyp-mac-tool copy-info-plist $in $out $binary $keys')
master_ninja.rule(
'merge_infoplist',
description='MERGE INFOPLISTS $in',
command='$env ./gyp-mac-tool merge-info-plist $out $in')
master_ninja.rule(
'compile_xcassets',
description='COMPILE XCASSETS $in',
command='$env ./gyp-mac-tool compile-xcassets $keys $in')
master_ninja.rule(
'mac_tool',
description='MACTOOL $mactool_cmd $in',
command='$env ./gyp-mac-tool $mactool_cmd $in $out $binary')
master_ninja.rule(
'package_framework',
description='PACKAGE FRAMEWORK $out, POSTBUILDS',
command='./gyp-mac-tool package-framework $out $version$postbuilds '
'&& touch $out')
if flavor == 'win':
master_ninja.rule(
'stamp',
description='STAMP $out',
command='%s gyp-win-tool stamp $out' % sys.executable)
master_ninja.rule(
'copy',
description='COPY $in $out',
command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable)
else:
master_ninja.rule(
'stamp',
description='STAMP $out',
command='${postbuilds}touch $out')
master_ninja.rule(
'copy',
description='COPY $in $out',
command='rm -rf $out && cp -af $in $out')
master_ninja.newline()
all_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_targets.add(target)
all_outputs = set()
# target_outputs is a map from qualified target name to a Target object.
target_outputs = {}
# target_short_names is a map from target short name to a list of Target
# objects.
target_short_names = {}
# short name of targets that were skipped because they didn't contain anything
# interesting.
# NOTE: there may be overlap between this an non_empty_target_names.
empty_target_names = set()
# Set of non-empty short target names.
# NOTE: there may be overlap between this an empty_target_names.
non_empty_target_names = set()
for qualified_target in target_list:
# qualified_target is like: third_party/icu/icu.gyp:icui18n#target
build_file, name, toolset = \
gyp.common.ParseQualifiedTarget(qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
spec = target_dicts[qualified_target]
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
# If build_file is a symlink, we must not follow it because there's a chance
# it could point to a path above toplevel_dir, and we cannot correctly deal
# with that case at the moment.
build_file = gyp.common.RelativePath(build_file, options.toplevel_dir,
False)
qualified_target_for_hash = gyp.common.QualifiedTarget(build_file, name,
toolset)
hash_for_rules = hashlib.md5(qualified_target_for_hash).hexdigest()
base_path = os.path.dirname(build_file)
obj = 'obj'
if toolset != 'target':
obj += '.' + toolset
output_file = os.path.join(obj, base_path, name + '.ninja')
ninja_output = StringIO()
writer = NinjaWriter(hash_for_rules, target_outputs, base_path, build_dir,
ninja_output,
toplevel_build, output_file,
flavor, toplevel_dir=options.toplevel_dir)
target = writer.WriteSpec(spec, config_name, generator_flags)
if ninja_output.tell() > 0:
# Only create files for ninja files that actually have contents.
with OpenOutput(os.path.join(toplevel_build, output_file)) as ninja_file:
ninja_file.write(ninja_output.getvalue())
ninja_output.close()
master_ninja.subninja(output_file)
if target:
if name != target.FinalOutput() and spec['toolset'] == 'target':
target_short_names.setdefault(name, []).append(target)
target_outputs[qualified_target] = target
if qualified_target in all_targets:
all_outputs.add(target.FinalOutput())
non_empty_target_names.add(name)
else:
empty_target_names.add(name)
if target_short_names:
# Write a short name to build this target. This benefits both the
# "build chrome" case as well as the gyp tests, which expect to be
# able to run actions and build libraries by their short name.
master_ninja.newline()
master_ninja.comment('Short names for targets.')
for short_name in target_short_names:
master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in
target_short_names[short_name]])
# Write phony targets for any empty targets that weren't written yet. As
# short names are not necessarily unique only do this for short names that
# haven't already been output for another target.
empty_target_names = empty_target_names - non_empty_target_names
if empty_target_names:
master_ninja.newline()
master_ninja.comment('Empty targets (output for completeness).')
for name in sorted(empty_target_names):
master_ninja.build(name, 'phony')
if all_outputs:
master_ninja.newline()
master_ninja.build('all', 'phony', list(all_outputs))
master_ninja.default(generator_flags.get('default_target', 'all'))
master_ninja_file.close()
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
builddir = os.path.join(options.toplevel_dir, 'out', config)
arguments = ['ninja', '-C', builddir]
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
(target_list, target_dicts, data, params, config_name) = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
# Update target_dicts for iOS device builds.
target_dicts = gyp.xcode_emulation.CloneConfigurationForDeviceAndEmulator(
target_dicts)
user_config = params.get('generator_flags', {}).get('config', None)
if gyp.common.GetFlavor(params) == 'win':
target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts)
target_list, target_dicts = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append(
(target_list, target_dicts, data, params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
|
unicri/edx-platform
|
refs/heads/master
|
lms/djangoapps/notification_prefs/tests.py
|
137
|
import json
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from mock import Mock, patch
from notification_prefs import NOTIFICATION_PREF_KEY
from notification_prefs.views import ajax_enable, ajax_disable, ajax_status, set_subscription, UsernameCipher
from student.tests.factories import UserFactory
from edxmako.tests import mako_middleware_process_request
from openedx.core.djangoapps.user_api.models import UserPreference
from util.testing import UrlResetMixin
@override_settings(SECRET_KEY="test secret key")
class NotificationPrefViewTest(UrlResetMixin, TestCase):
INITIALIZATION_VECTOR = "\x00" * 16
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(NotificationPrefViewTest, self).setUp()
self.user = UserFactory.create(username="testuser")
# Tokens are intentionally hard-coded instead of computed to help us
# avoid breaking existing links.
self.tokens = {
self.user: "AAAAAAAAAAAAAAAAAAAAAA8mMQo96FZfb1YKv1R5X6s=",
# Username with length equal to AES block length to test padding
UserFactory.create(username="sixteencharsuser"):
"AAAAAAAAAAAAAAAAAAAAAPxPWCuI2Ay9TATBVnfw7eIj-hUh6erQ_-VkbDqHqm8D",
# Even longer username
UserFactory.create(username="thisusernameissoveryverylong"):
"AAAAAAAAAAAAAAAAAAAAAPECbYqPI7_W4mRF8LbTaHuHt3tNXPggZ1Bke-zDyEiZ",
# Non-ASCII username
UserFactory.create(username=u"\u4e2d\u56fd"):
"AAAAAAAAAAAAAAAAAAAAAMjfGAhZKIZsI3L-Z7nflTA="
}
self.request_factory = RequestFactory()
def create_prefs(self):
"""Create all test preferences in the database"""
for (user, token) in self.tokens.items():
UserPreference.objects.create(user=user, key=NOTIFICATION_PREF_KEY, value=token)
def assertPrefValid(self, user):
"""Ensure that the correct preference for the user is persisted"""
pref = UserPreference.objects.get(user=user, key=NOTIFICATION_PREF_KEY)
self.assertTrue(pref) # check exists and only 1 (.get)
# now coerce username to utf-8 encoded str, since we test with non-ascii unicdoe above and
# the unittest framework has hard time coercing to unicode.
# decrypt also can't take a unicode input, so coerce its input to str
self.assertEqual(str(user.username.encode('utf-8')), UsernameCipher().decrypt(str(pref.value)))
def assertNotPrefExists(self, user):
"""Ensure that the user does not have a persisted preference"""
self.assertFalse(
UserPreference.objects.filter(user=user, key=NOTIFICATION_PREF_KEY).exists()
)
# AJAX status view
def test_ajax_status_get_0(self):
request = self.request_factory.get("dummy")
request.user = self.user
response = ajax_status(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), {"status": 0})
def test_ajax_status_get_1(self):
self.create_prefs()
request = self.request_factory.get("dummy")
request.user = self.user
response = ajax_status(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), {"status": 1})
def test_ajax_status_post(self):
request = self.request_factory.post("dummy")
request.user = self.user
response = ajax_status(request)
self.assertEqual(response.status_code, 405)
def test_ajax_status_anon_user(self):
request = self.request_factory.get("dummy")
request.user = AnonymousUser()
self.assertRaises(PermissionDenied, ajax_status, request)
# AJAX enable view
def test_ajax_enable_get(self):
request = self.request_factory.get("dummy")
request.user = self.user
response = ajax_enable(request)
self.assertEqual(response.status_code, 405)
self.assertNotPrefExists(self.user)
def test_ajax_enable_anon_user(self):
request = self.request_factory.post("dummy")
request.user = AnonymousUser()
self.assertRaises(PermissionDenied, ajax_enable, request)
self.assertNotPrefExists(self.user)
@patch("Crypto.Random.new")
def test_ajax_enable_success(self, mock_random_new):
mock_stream = Mock()
mock_stream.read.return_value = self.INITIALIZATION_VECTOR
mock_random_new.return_value = mock_stream
def test_user(user):
request = self.request_factory.post("dummy")
request.user = user
response = ajax_enable(request)
self.assertEqual(response.status_code, 204)
self.assertPrefValid(user)
for user in self.tokens.keys():
test_user(user)
def test_ajax_enable_already_enabled(self):
self.create_prefs()
request = self.request_factory.post("dummy")
request.user = self.user
response = ajax_enable(request)
self.assertEqual(response.status_code, 204)
self.assertPrefValid(self.user)
def test_ajax_enable_distinct_values(self):
request = self.request_factory.post("dummy")
request.user = self.user
ajax_enable(request)
other_user = UserFactory.create()
request.user = other_user
ajax_enable(request)
self.assertNotEqual(
UserPreference.objects.get(user=self.user, key=NOTIFICATION_PREF_KEY).value,
UserPreference.objects.get(user=other_user, key=NOTIFICATION_PREF_KEY).value
)
# AJAX disable view
def test_ajax_disable_get(self):
self.create_prefs()
request = self.request_factory.get("dummy")
request.user = self.user
response = ajax_disable(request)
self.assertEqual(response.status_code, 405)
self.assertPrefValid(self.user)
def test_ajax_disable_anon_user(self):
self.create_prefs()
request = self.request_factory.post("dummy")
request.user = AnonymousUser()
self.assertRaises(PermissionDenied, ajax_disable, request)
self.assertPrefValid(self.user)
def test_ajax_disable_success(self):
self.create_prefs()
request = self.request_factory.post("dummy")
request.user = self.user
response = ajax_disable(request)
self.assertEqual(response.status_code, 204)
self.assertNotPrefExists(self.user)
def test_ajax_disable_already_disabled(self):
request = self.request_factory.post("dummy")
request.user = self.user
response = ajax_disable(request)
self.assertEqual(response.status_code, 204)
self.assertNotPrefExists(self.user)
# Unsubscribe view
def test_unsubscribe_post(self):
request = self.request_factory.post("dummy")
response = set_subscription(request, "dummy", subscribe=False)
self.assertEqual(response.status_code, 405)
def test_unsubscribe_invalid_token(self):
def test_invalid_token(token, message):
request = self.request_factory.get("dummy")
self.assertRaisesRegexp(Http404, "^{}$".format(message), set_subscription, request, token, False)
# Invalid base64 encoding
test_invalid_token("ZOMG INVALID BASE64 CHARS!!!", "base64url")
test_invalid_token("Non-ASCII\xff", "base64url")
test_invalid_token(self.tokens[self.user][:-1], "base64url")
# Token not long enough to contain initialization vector
test_invalid_token("AAAAAAAAAAA=", "initialization_vector")
# Token length not a multiple of AES block length
test_invalid_token(self.tokens[self.user][:-4], "aes")
# Invalid padding (ends in 0 byte)
# Encrypted value: "testuser" + "\x00" * 8
test_invalid_token("AAAAAAAAAAAAAAAAAAAAAMoazRI7ePLjEWXN1N7keLw=", "padding")
# Invalid padding (ends in byte > 16)
# Encrypted value: "testusertestuser"
test_invalid_token("AAAAAAAAAAAAAAAAAAAAAC6iLXGhjkFytJoJSBJZzJ4=", "padding")
# Invalid padding (entire string is padding)
# Encrypted value: "\x10" * 16
test_invalid_token("AAAAAAAAAAAAAAAAAAAAANRGw8HDEmlcLVFawgY9wI8=", "padding")
# Nonexistent user
# Encrypted value: "nonexistentuser\x01"
test_invalid_token("AAAAAAAAAAAAAAAAAAAAACpyUxTGIrUjnpuUsNi7mAY=", "username")
def test_unsubscribe_success(self):
self.create_prefs()
def test_user(user):
request = self.request_factory.get("dummy")
request.user = AnonymousUser()
mako_middleware_process_request(request)
response = set_subscription(request, self.tokens[user], subscribe=False)
self.assertEqual(response.status_code, 200)
self.assertNotPrefExists(user)
for user in self.tokens.keys():
test_user(user)
def test_unsubscribe_twice(self):
self.create_prefs()
request = self.request_factory.get("dummy")
request.user = AnonymousUser()
mako_middleware_process_request(request)
set_subscription(request, self.tokens[self.user], False)
response = set_subscription(request, self.tokens[self.user], subscribe=False)
self.assertEqual(response.status_code, 200)
self.assertNotPrefExists(self.user)
def test_resubscribe_success(self):
def test_user(user):
# start without a pref key
self.assertFalse(UserPreference.objects.filter(user=user, key=NOTIFICATION_PREF_KEY))
request = self.request_factory.get("dummy")
request.user = AnonymousUser()
mako_middleware_process_request(request)
response = set_subscription(request, self.tokens[user], subscribe=True)
self.assertEqual(response.status_code, 200)
self.assertPrefValid(user)
for user in self.tokens.keys():
test_user(user)
|
geokala/cloudify-agent
|
refs/heads/master
|
system_tests/resources/ssh-agent-blueprint/plugins/mock-plugin/mock_plugin/tasks.py
|
5
|
#########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
from cloudify.decorators import operation
@operation
def run(**_):
pass
@operation
def get_env_variable(env_variable, **_):
return os.environ[env_variable]
|
pabloborrego93/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/session_inactivity_timeout/middleware.py
|
228
|
"""
Middleware to auto-expire inactive sessions after N seconds, which is configurable in
settings.
To enable this feature, set in a settings.py:
SESSION_INACTIVITY_TIMEOUT_IN_SECS = 300
This was taken from StackOverflow (http://stackoverflow.com/questions/14830669/how-to-expire-django-session-in-5minutes)
"""
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib import auth
LAST_TOUCH_KEYNAME = 'SessionInactivityTimeout:last_touch'
class SessionInactivityTimeout(object):
"""
Middleware class to keep track of activity on a given session
"""
def process_request(self, request):
"""
Standard entry point for processing requests in Django
"""
if not hasattr(request, "user") or not request.user.is_authenticated():
#Can't log out if not logged in
return
timeout_in_seconds = getattr(settings, "SESSION_INACTIVITY_TIMEOUT_IN_SECONDS", None)
# Do we have this feature enabled?
if timeout_in_seconds:
# what time is it now?
utc_now = datetime.utcnow()
# Get the last time user made a request to server, which is stored in session data
last_touch = request.session.get(LAST_TOUCH_KEYNAME)
# have we stored a 'last visited' in session? NOTE: first time access after login
# this key will not be present in the session data
if last_touch:
# compute the delta since last time user came to the server
time_since_last_activity = utc_now - last_touch
# did we exceed the timeout limit?
if time_since_last_activity > timedelta(seconds=timeout_in_seconds):
# yes? Then log the user out
del request.session[LAST_TOUCH_KEYNAME]
auth.logout(request)
return
request.session[LAST_TOUCH_KEYNAME] = utc_now
|
mne-tools/mne-python
|
refs/heads/main
|
mne/utils/fetching.py
|
8
|
# -*- coding: utf-8 -*-
"""File downloading functions."""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import os
import shutil
import time
from .progressbar import ProgressBar
from .numerics import hashfunc
from .misc import sizeof_fmt
from ._logging import logger, verbose
# Adapted from nilearn
def _get_http(url, temp_file_name, initial_size, timeout, verbose_bool):
"""Safely (resume a) download to a file from http(s)."""
from urllib import request
from urllib.error import HTTPError, URLError
# Actually do the reading
response = None
extra = ''
if initial_size > 0:
logger.debug(' Resuming at %s' % (initial_size,))
req = request.Request(
url, headers={'Range': 'bytes=%s-' % (initial_size,)})
try:
response = request.urlopen(req, timeout=timeout)
content_range = response.info().get('Content-Range', None)
if (content_range is None or not content_range.startswith(
'bytes %s-' % (initial_size,))):
raise IOError('Server does not support resuming')
except (KeyError, HTTPError, URLError, IOError):
initial_size = 0
response = None
else:
extra = ', resuming at %s' % (sizeof_fmt(initial_size),)
if response is None:
response = request.urlopen(request.Request(url), timeout=timeout)
file_size = int(response.headers.get('Content-Length', '0').strip())
file_size += initial_size
url = response.geturl()
logger.info('Downloading %s (%s%s)' % (url, sizeof_fmt(file_size), extra))
del url
mode = 'ab' if initial_size > 0 else 'wb'
progress = ProgressBar(file_size, initial_size, unit='B',
mesg='Downloading', unit_scale=True,
unit_divisor=1024)
del file_size
chunk_size = 8192 # 2 ** 13
with open(temp_file_name, mode) as local_file:
while True:
t0 = time.time()
chunk = response.read(chunk_size)
dt = time.time() - t0
if dt < 0.01:
chunk_size *= 2
elif dt > 0.1 and chunk_size > 8192:
chunk_size = chunk_size // 2
if not chunk:
break
local_file.write(chunk)
progress.update_with_increment_value(len(chunk))
@verbose
def _fetch_file(url, file_name, print_destination=True, resume=True,
hash_=None, timeout=30., hash_type='md5', verbose=None):
"""Load requested file, downloading it if needed or requested.
Parameters
----------
url: string
The url of file to be downloaded.
file_name: string
Name, along with the path, of where downloaded file will be saved.
print_destination: bool, optional
If true, destination of where file was saved will be printed after
download finishes.
resume: bool, optional
If true, try to resume partially downloaded files.
hash_ : str | None
The hash of the file to check. If None, no checking is
performed.
timeout : float
The URL open timeout.
hash_type : str
The type of hashing to use such as "md5" or "sha1"
%(verbose)s
"""
# Adapted from NISL:
# https://github.com/nisl/tutorial/blob/master/nisl/datasets.py
from urllib import parse
if hash_ is not None and (not isinstance(hash_, str) or
len(hash_) != 32) and hash_type == 'md5':
raise ValueError('Bad hash value given, should be a 32-character '
'string:\n%s' % (hash_,))
temp_file_name = file_name + ".part"
verbose_bool = (logger.level <= 20) # 20 is info
scheme = parse.urlparse(url).scheme
if scheme not in ('http', 'https'):
raise NotImplementedError('Cannot use scheme %r' % (scheme,))
try:
# Triage resume
if not os.path.exists(temp_file_name):
resume = False
if resume:
with open(temp_file_name, 'rb', buffering=0) as local_file:
local_file.seek(0, 2)
initial_size = local_file.tell()
del local_file
else:
initial_size = 0
_get_http(url, temp_file_name, initial_size, timeout, verbose_bool)
# check hash sum eg md5sum
if hash_ is not None:
logger.info('Verifying hash %s.' % (hash_,))
hashsum = hashfunc(temp_file_name, hash_type=hash_type)
if hash_ != hashsum:
raise RuntimeError('Hash mismatch for downloaded file %s, '
'expected %s but got %s'
% (temp_file_name, hash_, hashsum))
shutil.move(temp_file_name, file_name)
if print_destination is True:
logger.info('File saved as %s.\n' % file_name)
except Exception:
logger.error('Error while fetching file %s.'
' Dataset fetching aborted.' % url)
raise
def _url_to_local_path(url, path):
"""Mirror a url path in a local destination (keeping folder structure)."""
from urllib import parse, request
destination = parse.urlparse(url).path
# First char should be '/', and it needs to be discarded
if len(destination) < 2 or destination[0] != '/':
raise ValueError('Invalid URL')
destination = os.path.join(path, request.url2pathname(destination)[1:])
return destination
|
kenshay/ImageScripter
|
refs/heads/master
|
ProgramData/SystemFiles/Python/Lib/site-packages/pip/_vendor/chardet/langturkishmodel.py
|
269
|
# -*- coding: utf-8 -*-
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Özgür Baskın - Turkish Language Model
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin5_TurkishCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
255, 23, 37, 47, 39, 29, 52, 36, 45, 53, 60, 16, 49, 20, 46, 42,
48, 69, 44, 35, 31, 51, 38, 62, 65, 43, 56,255,255,255,255,255,
255, 1, 21, 28, 12, 2, 18, 27, 25, 3, 24, 10, 5, 13, 4, 15,
26, 64, 7, 8, 9, 14, 32, 57, 58, 11, 22,255,255,255,255,255,
180,179,178,177,176,175,174,173,172,171,170,169,168,167,166,165,
164,163,162,161,160,159,101,158,157,156,155,154,153,152,151,106,
150,149,148,147,146,145,144,100,143,142,141,140,139,138,137,136,
94, 80, 93,135,105,134,133, 63,132,131,130,129,128,127,126,125,
124,104, 73, 99, 79, 85,123, 54,122, 98, 92,121,120, 91,103,119,
68,118,117, 97,116,115, 50, 90,114,113,112,111, 55, 41, 40, 86,
89, 70, 59, 78, 71, 82, 88, 33, 77, 66, 84, 83,110, 75, 61, 96,
30, 67,109, 74, 87,102, 34, 95, 81,108, 76, 72, 17, 6, 19,107,
)
TurkishLangModel = (
3,2,3,3,3,1,3,3,3,3,3,3,3,3,2,1,1,3,3,1,3,3,0,3,3,3,3,3,0,3,1,3,
3,2,1,0,0,1,1,0,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,2,2,0,0,1,0,0,1,
3,2,2,3,3,0,3,3,3,3,3,3,3,2,3,1,0,3,3,1,3,3,0,3,3,3,3,3,0,3,0,3,
3,1,1,0,1,0,1,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,2,2,0,0,0,1,0,1,
3,3,2,3,3,0,3,3,3,3,3,3,3,2,3,1,1,3,3,0,3,3,1,2,3,3,3,3,0,3,0,3,
3,1,1,0,0,0,1,0,0,0,0,1,1,0,1,2,1,0,0,0,1,0,0,0,0,2,0,0,0,0,0,1,
3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,1,3,3,2,0,3,2,1,2,2,1,3,3,0,0,0,2,
2,2,0,1,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,1,0,0,1,
3,3,3,2,3,3,1,2,3,3,3,3,3,3,3,1,3,2,1,0,3,2,0,1,2,3,3,2,1,0,0,2,
2,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,2,0,0,0,
1,0,1,3,3,1,3,3,3,3,3,3,3,1,2,0,0,2,3,0,2,3,0,0,2,2,2,3,0,3,0,1,
2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,0,3,2,0,2,3,2,3,3,1,0,0,2,
3,2,0,0,1,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,1,1,1,0,2,0,0,1,
3,3,3,2,3,3,2,3,3,3,3,2,3,3,3,0,3,3,0,0,2,1,0,0,2,3,2,2,0,0,0,2,
2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,0,1,0,2,0,0,1,
3,3,3,2,3,3,3,3,3,3,3,2,3,3,3,0,3,2,0,1,3,2,1,1,3,2,3,2,1,0,0,2,
2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,
3,3,3,2,3,3,3,3,3,3,3,2,3,3,3,0,3,2,2,0,2,3,0,0,2,2,2,2,0,0,0,2,
3,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,2,0,1,0,0,0,
3,3,3,3,3,3,3,2,2,2,2,3,2,3,3,0,3,3,1,1,2,2,0,0,2,2,3,2,0,0,1,3,
0,3,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,
3,3,3,2,3,3,3,2,1,2,2,3,2,3,3,0,3,2,0,0,1,1,0,1,1,2,1,2,0,0,0,1,
0,3,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,0,0,
3,3,3,2,3,3,2,3,2,2,2,3,3,3,3,1,3,1,1,0,3,2,1,1,3,3,2,3,1,0,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,2,0,0,1,
3,2,2,3,3,0,3,3,3,3,3,3,3,2,2,1,0,3,3,1,3,3,0,1,3,3,2,3,0,3,0,3,
2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
2,2,2,3,3,0,3,3,3,3,3,3,3,3,3,0,0,3,2,0,3,3,0,3,2,3,3,3,0,3,1,3,
2,0,0,0,0,0,0,0,0,0,0,1,0,1,2,0,1,0,0,0,0,0,0,0,2,2,0,0,1,0,0,1,
3,3,3,1,2,3,3,1,0,0,1,0,0,3,3,2,3,0,0,2,0,0,2,0,2,0,0,0,2,0,2,0,
0,3,1,0,1,0,0,0,2,2,1,0,1,1,2,1,2,2,2,0,2,1,1,0,0,0,2,0,0,0,0,0,
1,2,1,3,3,0,3,3,3,3,3,2,3,0,0,0,0,2,3,0,2,3,1,0,2,3,1,3,0,3,0,2,
3,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,1,3,3,2,2,3,2,2,0,1,2,3,0,1,2,1,0,1,0,0,0,1,0,2,2,0,0,0,1,
1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,
3,3,3,1,3,3,1,1,3,3,1,1,3,3,1,0,2,1,2,0,2,1,0,0,1,1,2,1,0,0,0,2,
2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,1,0,2,1,3,0,0,2,0,0,3,3,0,3,0,0,1,0,1,2,0,0,1,1,2,2,0,1,0,
0,1,2,1,1,0,1,0,1,1,1,1,1,0,1,1,1,2,2,1,2,0,1,0,0,0,0,0,0,1,0,0,
3,3,3,2,3,2,3,3,0,2,2,2,3,3,3,0,3,0,0,0,2,2,0,1,2,1,1,1,0,0,0,1,
0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
3,3,3,3,3,3,2,1,2,2,3,3,3,3,2,0,2,0,0,0,2,2,0,0,2,1,3,3,0,0,1,1,
1,1,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,
1,1,2,3,3,0,3,3,3,3,3,3,2,2,0,2,0,2,3,2,3,2,2,2,2,2,2,2,1,3,2,3,
2,0,2,1,2,2,2,2,1,1,2,2,1,2,2,1,2,0,0,2,1,1,0,2,1,0,0,1,0,0,0,1,
2,3,3,1,1,1,0,1,1,1,2,3,2,1,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,2,3,2,2,1,3,3,3,0,2,1,2,0,2,1,0,0,1,1,1,1,1,0,0,1,
2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,2,0,1,0,0,0,
3,3,3,2,3,3,3,3,3,2,3,1,2,3,3,1,2,0,0,0,0,0,0,0,3,2,1,1,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
3,3,3,2,2,3,3,2,1,1,1,1,1,3,3,0,3,1,0,0,1,1,0,0,3,1,2,1,0,0,0,0,
0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,
3,3,3,2,2,3,2,2,2,3,2,1,1,3,3,0,3,0,0,0,0,1,0,0,3,1,1,2,0,0,0,1,
1,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,3,3,0,3,3,3,3,3,2,2,2,1,2,0,2,1,2,2,1,1,0,1,2,2,2,2,2,2,2,
0,0,2,1,2,1,2,1,0,1,1,3,1,2,1,1,2,0,0,2,0,1,0,1,0,1,0,0,0,1,0,1,
3,3,3,1,3,3,3,0,1,1,0,2,2,3,1,0,3,0,0,0,1,0,0,0,1,0,0,1,0,1,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,2,2,1,0,0,1,0,0,3,3,1,3,0,0,1,1,0,2,0,3,0,0,0,2,0,1,1,
0,1,2,0,1,2,2,0,2,2,2,2,1,0,2,1,1,0,2,0,2,1,2,0,0,0,0,0,0,0,0,0,
3,3,3,1,3,2,3,2,0,2,2,2,1,3,2,0,2,1,2,0,1,2,0,0,1,0,2,2,0,0,0,2,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,
3,3,3,0,3,3,1,1,2,3,1,0,3,2,3,0,3,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0,
1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,3,0,3,3,2,3,3,2,2,0,0,0,0,1,2,0,1,3,0,0,0,3,1,1,0,3,0,2,
2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,1,2,2,1,0,3,1,1,1,1,3,3,2,3,0,0,1,0,1,2,0,2,2,0,2,2,0,2,1,
0,2,2,1,1,1,1,0,2,1,1,0,1,1,1,1,2,1,2,1,2,0,1,0,1,0,0,0,0,0,0,0,
3,3,3,0,1,1,3,0,0,1,1,0,0,2,2,0,3,0,0,1,1,0,1,0,0,0,0,0,2,0,0,0,
0,3,1,0,1,0,1,0,2,0,0,1,0,1,0,1,1,1,2,1,1,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,0,2,0,2,0,1,1,1,0,0,3,3,0,2,0,0,1,0,0,2,1,1,0,1,0,1,0,1,0,
0,2,0,1,2,0,2,0,2,1,1,0,1,0,2,1,1,0,2,1,1,0,1,0,0,0,1,1,0,0,0,0,
3,2,3,0,1,0,0,0,0,0,0,0,0,1,2,0,1,0,0,1,0,0,1,0,0,0,0,0,2,0,0,0,
0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,2,1,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,0,0,2,3,0,0,1,0,1,0,2,3,2,3,0,0,1,3,0,2,1,0,0,0,0,2,0,1,0,
0,2,1,0,0,1,1,0,2,1,0,0,1,0,0,1,1,0,1,1,2,0,1,0,0,0,0,1,0,0,0,0,
3,2,2,0,0,1,1,0,0,0,0,0,0,3,1,1,1,0,0,0,0,0,1,0,0,0,0,0,2,0,1,0,
0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,3,3,0,2,3,2,2,1,2,2,1,1,2,0,1,3,2,2,2,0,0,2,2,0,0,0,1,2,1,
3,0,2,1,1,0,1,1,1,0,1,2,2,2,1,1,2,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,
0,1,1,2,3,0,3,3,3,2,2,2,2,1,0,1,0,1,0,1,2,2,0,0,2,2,1,3,1,1,2,1,
0,0,1,1,2,0,1,1,0,0,1,2,0,2,1,1,2,0,0,1,0,0,0,1,0,1,0,1,0,0,0,0,
3,3,2,0,0,3,1,0,0,0,0,0,0,3,2,1,2,0,0,1,0,0,2,0,0,0,0,0,2,0,1,0,
0,2,1,1,0,0,1,0,1,2,0,0,1,1,0,0,2,1,1,1,1,0,2,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,1,0,0,0,0,1,0,0,3,3,2,2,0,0,1,0,0,2,0,1,0,0,0,2,0,1,0,
0,0,1,1,0,0,2,0,2,1,0,0,1,1,2,1,2,0,2,1,2,1,1,1,0,0,1,1,0,0,0,0,
3,3,2,0,0,2,2,0,0,0,1,1,0,2,2,1,3,1,0,1,0,1,2,0,0,0,0,0,1,0,1,0,
0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,0,0,0,1,0,0,1,0,0,2,3,1,2,0,0,1,0,0,2,0,0,0,1,0,2,0,2,0,
0,1,1,2,2,1,2,0,2,1,1,0,0,1,1,0,1,1,1,1,2,1,1,0,0,0,0,0,0,0,0,0,
3,3,3,0,2,1,2,1,0,0,1,1,0,3,3,1,2,0,0,1,0,0,2,0,2,0,1,1,2,0,0,0,
0,0,1,1,1,1,2,0,1,1,0,1,1,1,1,0,0,0,1,1,1,0,1,0,0,0,1,0,0,0,0,0,
3,3,3,0,2,2,3,2,0,0,1,0,0,2,3,1,0,0,0,0,0,0,2,0,2,0,0,0,2,0,0,0,
0,1,1,0,0,0,1,0,0,1,0,1,1,0,1,0,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,0,0,0,0,0,0,0,1,0,0,2,2,2,2,0,0,1,0,0,2,0,0,0,0,0,2,0,1,0,
0,0,2,1,1,0,1,0,2,1,1,0,0,1,1,2,1,0,2,0,2,0,1,0,0,0,2,0,0,0,0,0,
0,0,0,2,2,0,2,1,1,1,1,2,2,0,0,1,0,1,0,0,1,3,0,0,0,0,1,0,0,2,1,0,
0,0,1,0,1,0,0,0,0,0,2,1,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
2,0,0,2,3,0,2,3,1,2,2,0,2,0,0,2,0,2,1,1,1,2,1,0,0,1,2,1,1,2,1,0,
1,0,2,0,1,0,1,1,0,0,2,2,1,2,1,1,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,0,2,1,2,0,0,0,1,0,0,3,2,0,1,0,0,1,0,0,2,0,0,0,1,2,1,0,1,0,
0,0,0,0,1,0,1,0,0,1,0,0,0,0,1,0,1,0,1,1,1,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,2,2,0,2,2,1,1,0,1,1,1,1,1,0,0,1,2,1,1,1,0,1,0,0,0,1,1,1,1,
0,0,2,1,0,1,1,1,0,1,1,2,1,2,1,1,2,0,1,1,2,1,0,2,0,0,0,0,0,0,0,0,
3,2,2,0,0,2,0,0,0,0,0,0,0,2,2,0,2,0,0,1,0,0,2,0,0,0,0,0,2,0,0,0,
0,2,1,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,0,2,2,0,1,1,0,1,0,0,1,0,0,0,1,0,1,0,0,0,0,0,1,0,0,0,0,
2,0,1,0,1,0,1,1,0,0,1,2,0,1,0,1,1,0,0,1,0,1,0,2,0,0,0,0,0,0,0,0,
2,2,2,0,1,1,0,0,0,1,0,0,0,1,2,0,1,0,0,1,0,0,1,0,0,0,0,1,2,0,1,0,
0,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,1,1,1,0,0,0,0,1,2,0,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
1,1,2,0,1,0,0,0,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,
0,0,1,2,2,0,2,1,2,1,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
2,2,2,0,0,0,1,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,0,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin5TurkishModel = {
'char_to_order_map': Latin5_TurkishCharToOrderMap,
'precedence_matrix': TurkishLangModel,
'typical_positive_ratio': 0.970290,
'keep_english_letter': True,
'charset_name': "ISO-8859-9",
'language': 'Turkish',
}
|
spark0001/spark2.1.1
|
refs/heads/master
|
examples/src/main/python/mllib/summary_statistics_example.py
|
128
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from pyspark import SparkContext
# $example on$
import numpy as np
from pyspark.mllib.stat import Statistics
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="SummaryStatisticsExample") # SparkContext
# $example on$
mat = sc.parallelize(
[np.array([1.0, 10.0, 100.0]), np.array([2.0, 20.0, 200.0]), np.array([3.0, 30.0, 300.0])]
) # an RDD of Vectors
# Compute column summary statistics.
summary = Statistics.colStats(mat)
print(summary.mean()) # a dense vector containing the mean value for each column
print(summary.variance()) # column-wise variance
print(summary.numNonzeros()) # number of nonzeros in each column
# $example off$
sc.stop()
|
kfoss/keras
|
refs/heads/master
|
keras/models.py
|
1
|
from __future__ import absolute_import
from __future__ import print_function
import theano
import theano.tensor as T
import numpy as np
import warnings, time, copy
from . import optimizers
from . import objectives
from . import regularizers
from . import constraints
from . import callbacks as cbks
import time, copy, pprint
from .utils.generic_utils import Progbar, printv
from .layers import containers
from six.moves import range
def standardize_y(y):
if not hasattr(y, 'shape'):
y = np.asarray(y)
if len(y.shape) == 1:
y = np.expand_dims(y, 1)
return y
def make_batches(size, batch_size):
nb_batch = int(np.ceil(size/float(batch_size)))
return [(i*batch_size, min(size, (i+1)*batch_size)) for i in range(0, nb_batch)]
def standardize_X(X):
if type(X) == list:
return X
else:
return [X]
def slice_X(X, start=None, stop=None):
if type(X) == list:
if hasattr(start, '__len__'):
return [x[start] for x in X]
else:
return [x[start:stop] for x in X]
else:
if hasattr(start, '__len__'):
return X[start]
else:
return X[start:stop]
def weighted_objective(fn):
def weighted(y_true, y_pred, weights):
# it's important that 0 * Inf == 0, not NaN, so I need to mask first
masked_y_true = y_true[weights.nonzero()[:-1]]
masked_y_pred = y_pred[weights.nonzero()[:-1]]
masked_weights = weights[weights.nonzero()]
obj_output = fn(masked_y_true, masked_y_pred)
return (masked_weights.flatten() * obj_output.flatten()).mean()
return weighted
def standardize_weights(y, sample_weight=None, class_weight=None):
if sample_weight is not None:
return standardize_y(sample_weight)
elif isinstance(class_weight, dict):
if len(y.shape) > 2:
raise Exception('class_weight not supported for 3+ dimensional targets.')
if y.shape[1] > 1:
y_classes = y.argmax(axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
else:
y_classes = y
return np.expand_dims(np.array(list(map(lambda x: class_weight[x], y_classes))), 1)
else:
return np.ones(y.shape[:-1] + (1,))
class Model(object):
def _fit(self, f, ins, out_labels=[], batch_size=128, nb_epoch=100, verbose=1, callbacks=[], \
validation_split=0., val_f=None, val_ins=None, shuffle=True, metrics=[]):
'''
Abstract fit function for f(*ins). Assume that f returns a list, labelled by out_labels.
'''
do_validation = False
if val_f and val_ins:
do_validation = True
if verbose:
print("Train on %d samples, validate on %d samples" % (len(ins[0]), len(val_ins[0])))
else:
if 0 < validation_split < 1:
do_validation = True
split_at = int(len(ins[0]) * (1 - validation_split))
(ins, val_ins) = (slice_X(ins, 0, split_at), slice_X(ins, split_at))
if verbose:
print("Train on %d samples, validate on %d samples" % (len(ins[0]), len(val_ins[0])))
nb_train_sample = len(ins[0])
index_array = np.arange(nb_train_sample)
history = cbks.History()
if verbose:
callbacks = [history, cbks.BaseLogger()] + callbacks
else:
callbacks = [history] + callbacks
callbacks = cbks.CallbackList(callbacks)
callbacks._set_model(self)
callbacks._set_params({
'batch_size': batch_size,
'nb_epoch': nb_epoch,
'nb_sample': nb_train_sample,
'verbose': verbose,
'do_validation': do_validation,
'metrics':metrics,
})
callbacks.on_train_begin()
self.stop_training = False
for epoch in range(nb_epoch):
callbacks.on_epoch_begin(epoch)
if shuffle:
np.random.shuffle(index_array)
batches = make_batches(nb_train_sample, batch_size)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
ins_batch = slice_X(ins, batch_ids)
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(batch_ids)
callbacks.on_batch_begin(batch_index, batch_logs)
outs = f(*ins_batch)
if type(outs) != list:
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
if batch_index == len(batches) - 1: # last batch
# validation
epoch_logs = {}
if do_validation:
# replace with self._evaluate
val_outs = val_f(*val_ins)
if type(val_outs) != list:
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if self.stop_training:
break
callbacks.on_train_end()
return history
def _predict_loop(self, f, ins, batch_size=128, verbose=0):
'''
Abstract method to loop over some data in batches.
'''
nb_sample = len(ins[0])
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(*ins_batch)
if type(batch_outs) != list:
batch_outs = [batch_outs]
if batch_index == 0:
for batch_out in batch_outs:
shape = (nb_sample,) + batch_out.shape[1:]
outs.append(np.zeros(shape))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
return outs
def _test_loop(self, f, ins, batch_size=128, verbose=0):
'''
Abstract method to loop over some data in batches.
'''
nb_sample = len(ins[0])
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(*ins_batch)
if type(batch_outs) == list:
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i, out in enumerate(outs):
outs[i] /= nb_sample
return outs
class Sequential(Model, containers.Sequential):
'''
Inherits from Model the following methods:
- _fit
- _predict
- _evaluate
Inherits from containers.Sequential the following methods:
- __init__
- add
- get_output
- get_input
- get_weights
- set_weights
'''
def compile(self, optimizer, loss, class_mode="categorical", theano_mode=None):
self.optimizer = optimizers.get(optimizer)
self.loss = weighted_objective(objectives.get(loss))
# input of model
self.X_train = self.get_input(train=True)
self.X_test = self.get_input(train=False)
self.y_train = self.get_output(train=True)
self.y_test = self.get_output(train=False)
# target of model
self.y = T.zeros_like(self.y_train)
self.weights = T.ones_like(self.y_train)
train_loss = self.loss(self.y, self.y_train, self.weights)
test_loss = self.loss(self.y, self.y_test, self.weights)
train_loss.name = 'train_loss'
test_loss.name = 'test_loss'
self.y.name = 'y'
if class_mode == "categorical":
train_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_train, axis=-1)))
test_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_test, axis=-1)))
elif class_mode == "binary":
train_accuracy = T.mean(T.eq(self.y, T.round(self.y_train)))
test_accuracy = T.mean(T.eq(self.y, T.round(self.y_test)))
else:
raise Exception("Invalid class mode:" + str(class_mode))
self.class_mode = class_mode
for r in self.regularizers:
train_loss = r(train_loss)
updates = self.optimizer.get_updates(self.params, self.constraints, train_loss)
if type(self.X_train) == list:
train_ins = self.X_train + [self.y, self.weights]
test_ins = self.X_test + [self.y, self.weights]
predict_ins = self.X_test
else:
train_ins = [self.X_train, self.y, self.weights]
test_ins = [self.X_test, self.y, self.weights]
predict_ins = [self.X_test]
self._train = theano.function(train_ins, train_loss,
updates=updates, allow_input_downcast=True, mode=theano_mode)
self._train_with_acc = theano.function(train_ins, [train_loss, train_accuracy],
updates=updates, allow_input_downcast=True, mode=theano_mode)
self._predict = theano.function(predict_ins, self.y_test,
allow_input_downcast=True, mode=theano_mode)
self._test = theano.function(test_ins, test_loss,
allow_input_downcast=True, mode=theano_mode)
self._test_with_acc = theano.function(test_ins, [test_loss, test_accuracy],
allow_input_downcast=True, mode=theano_mode)
def train(self, X, y, accuracy=False, sample_weight=None):
warnings.warn('The "train" method is deprecated, use "train_on_batch" instead.')
return self.train_on_batch(X, y, accuracy, sample_weight)
def test(self, X, y, accuracy=False):
warnings.warn('The "test" method is deprecated, use "test_on_batch" instead.')
return self.test_on_batch(X, y, accuracy)
def train_on_batch(self, X, y, accuracy=False, sample_weight=None):
X = standardize_X(X)
y = standardize_y(y)
if sample_weight is None:
sample_weight = np.ones(list(y.shape[0:-1]) + [1])
else:
sample_weight = standardize_y(sample_weight)
ins = X + [y, sample_weight]
if accuracy:
return self._train_with_acc(*ins)
else:
return self._train(*ins)
def test_on_batch(self, X, y, accuracy=False):
X = standardize_X(X)
y = standardize_y(y)
sample_weight = np.ones(y.shape[:-1] + (1,))
ins = X + [y, sample_weight]
if accuracy:
return self._test_with_acc(*ins)
else:
return self._test(*ins)
def predict_on_batch(self, X):
ins = standardize_X(X)
return self._predict(*ins)
def fit(self, X, y, batch_size=128, nb_epoch=100, verbose=1, callbacks=[],
validation_split=0., validation_data=None, shuffle=True, show_accuracy=False,
class_weight=None, sample_weight=None):
X = standardize_X(X)
y = standardize_y(y)
sample_weight = standardize_weights(y, class_weight=class_weight, sample_weight=sample_weight)
val_f = None
val_ins = None
if validation_data or validation_split:
if show_accuracy:
val_f = self._test_with_acc
else:
val_f = self._test
if validation_data:
try:
X_val, y_val = validation_data
except:
raise Exception("Invalid format for validation data; provide a tuple (X_val, y_val). \
X_val may be a numpy array or a list of numpy arrays depending on your model input.")
X_val = standardize_X(X_val)
y_val = standardize_y(y_val)
val_ins = X_val + [y_val, np.ones(y_val.shape[:-1] + (1,))]
if show_accuracy:
f = self._train_with_acc
out_labels = ['loss', 'acc']
else:
f = self._train
out_labels = ['loss']
ins = X + [y, sample_weight]
metrics = ['loss', 'acc', 'val_loss', 'val_acc']
return self._fit(f, ins, out_labels=out_labels, batch_size=batch_size, nb_epoch=nb_epoch, verbose=verbose, callbacks=callbacks, \
validation_split=validation_split, val_f=val_f, val_ins=val_ins, shuffle=shuffle, metrics=metrics)
def predict(self, X, batch_size=128, verbose=0):
X = standardize_X(X)
return self._predict_loop(self._predict, X, batch_size, verbose)[0]
def predict_proba(self, X, batch_size=128, verbose=1):
preds = self.predict(X, batch_size, verbose)
if preds.min() < 0 or preds.max() > 1:
warnings.warn("Network returning invalid probability values.")
return preds
def predict_classes(self, X, batch_size=128, verbose=1):
proba = self.predict(X, batch_size=batch_size, verbose=verbose)
if self.class_mode == "categorical":
return proba.argmax(axis=-1)
else:
return (proba > 0.5).astype('int32')
def evaluate(self, X, y, batch_size=128, show_accuracy=False, verbose=1, sample_weight=None):
X = standardize_X(X)
y = standardize_y(y)
sample_weight = standardize_weights(y, sample_weight=sample_weight)
ins = X + [y, sample_weight]
if show_accuracy:
f = self._test_with_acc
else:
f = self._test
outs = self._test_loop(f, ins, batch_size, verbose)
if show_accuracy:
return outs
else:
return outs[0]
def get_config(self, verbose=0):
layers = []
for i, l in enumerate(self.layers):
config = l.get_config()
layers.append(config)
if verbose:
printv(layers)
return layers
def save_weights(self, filepath, overwrite=False):
# Save weights from all layers to HDF5
import h5py
import os.path
# if file exists and should not be overwritten
if not overwrite and os.path.isfile(filepath):
import sys
get_input = input
if sys.version_info[:2] <= (2, 7):
get_input = raw_input
overwrite = get_input('[WARNING] %s already exists - overwrite? [y/n]' % (filepath))
while overwrite not in ['y', 'n']:
overwrite = get_input('Enter "y" (overwrite) or "n" (cancel).')
if overwrite == 'n':
return
print('[TIP] Next time specify overwrite=True in save_weights!')
f = h5py.File(filepath, 'w')
f.attrs['nb_layers'] = len(self.layers)
for k, l in enumerate(self.layers):
g = f.create_group('layer_{}'.format(k))
weights = l.get_weights()
g.attrs['nb_params'] = len(weights)
for n, param in enumerate(weights):
param_name = 'param_{}'.format(n)
param_dset = g.create_dataset(param_name, param.shape, dtype=param.dtype)
param_dset[:] = param
f.flush()
f.close()
def load_weights(self, filepath):
'''
This method does not make use of Sequential.set_weights()
for backwards compatibility.
'''
# Loads weights from HDF5 file
import h5py
f = h5py.File(filepath)
for k in range(f.attrs['nb_layers']):
g = f['layer_{}'.format(k)]
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
self.layers[k].set_weights(weights)
f.close()
class Graph(Model, containers.Graph):
def compile(self, optimizer, loss, theano_mode=None):
# loss is a dictionary mapping output name to loss functions
ys = []
ys_train = []
ys_test = []
train_loss = 0.
test_loss = 0.
for output_name in self.output_order:
loss_fn = loss[output_name]
output = self.outputs[output_name]
y_train = output.get_output(True)
y_test = output.get_output(False)
y = T.zeros_like(y_test)
ys.append(y)
ys_train.append(y_train)
ys_test.append(y_test)
train_loss += objectives.get(loss_fn)(y, y_train).mean()
test_loss += objectives.get(loss_fn)(y, y_test).mean()
train_loss.name = 'train_loss'
test_loss.name = 'test_loss'
ins = [self.inputs[name].input for name in self.input_order]
train_ins = ins + ys
test_ins = ins + ys
for r in self.regularizers:
train_loss = r(train_loss)
self.optimizer = optimizers.get(optimizer)
updates = self.optimizer.get_updates(self.params, self.constraints, train_loss)
self._train = theano.function(train_ins, train_loss,
updates=updates, allow_input_downcast=True, mode=theano_mode)
self._test = theano.function(test_ins, test_loss,
allow_input_downcast=True, mode=theano_mode)
self._predict = theano.function(inputs=ins, outputs=ys_test,
allow_input_downcast=True, mode=theano_mode)
def train_on_batch(self, data):
# data is a dictionary mapping output and input names to arrays
ins = [data[name] for name in self.input_order] + [standardize_y(data[name]) for name in self.output_order]
return self._train(*ins)
def test_on_batch(self, data):
# data is a dictionary mapping input names to arrays
ins = [data[name] for name in self.input_order] + [standardize_y(data[name]) for name in self.output_order]
return self._test(*ins)
def predict_on_batch(self, data):
# data is a dictionary mapping input names to arrays
ins = [data[name] for name in self.input_order]
return self._predict(*ins)
def fit(self, data, batch_size=128, nb_epoch=100, verbose=1, callbacks=[],
validation_split=0., validation_data=None, shuffle=True):
ins = [data[name] for name in self.input_order] + [standardize_y(data[name]) for name in self.output_order]
val_f = None
val_ins = None
if validation_data or validation_split:
val_f = self._test
if validation_data:
val_ins = [validation_data[name] for name in self.input_order] + [standardize_y(validation_data[name]) for name in self.output_order]
f = self._train
out_labels = self.output_order
metrics = self.output_order + ['val_' + m for m in self.output_order]
history = self._fit(f, ins, out_labels=out_labels, batch_size=batch_size, nb_epoch=nb_epoch, verbose=verbose, callbacks=callbacks, \
validation_split=validation_split, val_f=val_f, val_ins=val_ins, shuffle=shuffle, metrics=metrics)
return history
def evaluate(self, data, batch_size=128, verbose=0):
ins = [data[name] for name in self.input_order] + [standardize_y(data[name]) for name in self.output_order]
outs = self._test_loop(self._test, ins, batch_size, verbose)
return outs[0]
def predict(self, data, batch_size=128, verbose=0):
ins = [data[name] for name in self.input_order]
outs = self._predict_loop(self._predict, ins, batch_size, verbose)
return dict(zip(self.output_order, outs))
def save_weights(self, filepath, overwrite=False):
# Save weights from all layers to HDF5
import h5py
import os.path
# if file exists and should not be overwritten
if not overwrite and os.path.isfile(filepath):
import sys
get_input = input
if sys.version_info[:2] <= (2, 7):
get_input = raw_input
overwrite = get_input('[WARNING] %s already exists - overwrite? [y/n]' % (filepath))
while overwrite not in ['y', 'n']:
overwrite = get_input('Enter "y" (overwrite) or "n" (cancel).')
if overwrite == 'n':
return
print('[TIP] Next time specify overwrite=True in save_weights!')
f = h5py.File(filepath, 'w')
g = f.create_group('graph')
weights = self.get_weights()
g.attrs['nb_params'] = len(weights)
for n, param in enumerate(weights):
param_name = 'param_{}'.format(n)
param_dset = g.create_dataset(param_name, param.shape, dtype=param.dtype)
param_dset[:] = param
f.flush()
f.close()
def load_weights(self, filepath):
# Loads weights from HDF5 file
import h5py
f = h5py.File(filepath)
g = f['graph']
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
self.set_weights(weights)
f.close()
def get_config(self, verbose=1):
config = super(Graph, self).get_config()
if verbose:
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(config)
return config
|
EduTechLabs/reckon
|
refs/heads/master
|
rocken_project/rocken_project/wsgi.py
|
1
|
"""
WSGI config for rocken_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rocken_project.settings")
application = get_wsgi_application()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.