repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
dgasmith/psi4
|
psi4/driver/qcdb/qcformat.py
|
3
|
4080
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Parent classes for quantum chemistry program input and output file
formats.
"""
import re
class InputFormat(object):
def __init__(self, mem, mtd, bas, mol, sys, cast):
# total job memory in MB
self.memory = mem
# computational method
self.method = mtd.lower()
# qcdb.Molecule object
self.molecule = mol
# database member index
self.index = sys
# orbital basis set
self.basis = bas.lower()
# do cast up from sto-3g basis?
self.castup = cast
def corresponding_aux_basis(self):
"""For Dunning basis sets, returns strings from which auxiliary
basis sets and heavy-aug can be constructed. Note that
valence/core-valence/etc. is conserved and X-zeta/(X+d)zeta is
not, since this is the usual aux basis pattern.
*augbasis* is round up to the nearest aug-cc-pVXZ
*rootbasis* is round down to the nearest cc-pVXZ
*auxbasis* is round up to the nearest cc-pVXZ or aug-cc-pVXZ
"""
Dunmatch = re.compile(r'^(.*cc-)(pv|pcv|pwcv).*?([dtq56]).*z$').match(self.basis)
if Dunmatch:
rootbas = 'cc-' + Dunmatch.group(2) + Dunmatch.group(3) + 'z'
augbas = 'aug-cc-' + Dunmatch.group(2) + Dunmatch.group(3) + 'z'
if Dunmatch.group(1) == 'cc-':
auxbas = rootbas
else:
auxbas = augbas
else:
rootbas = None
augbas = None
auxbas = None
return [rootbas, augbas, auxbas]
class InputFormat2(object):
def __init__(self, mem, mol, mtd, der, opt):
# total job memory in MB
self.memory = mem
# qcdb.Molecule object
self.molecule = mol
# computational method
self.method = mtd.lower()
# computational derivative level
self.dertype = der
# options dictionary
self.options = opt
# orbital basis set
self.basis = opt['GLOBALS']['BASIS']['value'].lower()
# do cast up from sto-3g basis?
self.castup = opt['SCF']['BASIS_GUESS']['value']
def corresponding_aux_basis(self):
"""For Dunning basis sets, returns strings from which auxiliary
basis sets and heavy-aug can be constructed. Note that
valence/core-valence/etc. is conserved and X-zeta/(X+d)zeta is
not, since this is the usual aux basis pattern.
*augbasis* is round up to the nearest aug-cc-pVXZ
*rootbasis* is round down to the nearest cc-pVXZ
*auxbasis* is round up to the nearest cc-pVXZ or aug-cc-pVXZ
"""
Dunmatch = re.compile(r'^(.*cc-)(pv|pcv|pwcv).*?([dtq56]).*z$').match(self.basis)
if Dunmatch:
rootbas = 'cc-' + Dunmatch.group(2) + Dunmatch.group(3) + 'z'
augbas = 'aug-cc-' + Dunmatch.group(2) + Dunmatch.group(3) + 'z'
if Dunmatch.group(1) == 'cc-':
auxbas = rootbas
else:
auxbas = augbas
else:
rootbas = None
augbas = None
auxbas = None
return [rootbas, augbas, auxbas]
|
lgpl-3.0
|
shsingh/ansible
|
lib/ansible/modules/network/fortios/fortios_firewall_interface_policy.py
|
7
|
19948
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_interface_policy
short_description: Configure IPv4 interface policies in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall feature and interface_policy category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
firewall_interface_policy:
description:
- Configure IPv4 interface policies.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
address_type:
description:
- Policy address type (IPv4 or IPv6).
type: str
choices:
- ipv4
- ipv6
application_list:
description:
- Application list name. Source application.list.name.
type: str
application_list_status:
description:
- Enable/disable application control.
type: str
choices:
- enable
- disable
av_profile:
description:
- Antivirus profile. Source antivirus.profile.name.
type: str
av_profile_status:
description:
- Enable/disable antivirus.
type: str
choices:
- enable
- disable
comments:
description:
- Comments.
type: str
dlp_sensor:
description:
- DLP sensor name. Source dlp.sensor.name.
type: str
dlp_sensor_status:
description:
- Enable/disable DLP.
type: str
choices:
- enable
- disable
dsri:
description:
- Enable/disable DSRI.
type: str
choices:
- enable
- disable
dstaddr:
description:
- Address object to limit traffic monitoring to network traffic sent to the specified address or range.
type: list
suboptions:
name:
description:
- Address name. Source firewall.address.name firewall.addrgrp.name.
required: true
type: str
interface:
description:
- Monitored interface name from available interfaces. Source system.zone.name system.interface.name.
type: str
ips_sensor:
description:
- IPS sensor name. Source ips.sensor.name.
type: str
ips_sensor_status:
description:
- Enable/disable IPS.
type: str
choices:
- enable
- disable
label:
description:
- Label.
type: str
logtraffic:
description:
- "Logging type to be used in this policy (Options: all | utm | disable)."
type: str
choices:
- all
- utm
- disable
policyid:
description:
- Policy ID.
required: true
type: int
scan_botnet_connections:
description:
- Enable/disable scanning for connections to Botnet servers.
type: str
choices:
- disable
- block
- monitor
service:
description:
- Service object from available options.
type: list
suboptions:
name:
description:
- Service name. Source firewall.service.custom.name firewall.service.group.name.
required: true
type: str
spamfilter_profile:
description:
- Antispam profile. Source spamfilter.profile.name.
type: str
spamfilter_profile_status:
description:
- Enable/disable antispam.
type: str
choices:
- enable
- disable
srcaddr:
description:
- Address object to limit traffic monitoring to network traffic sent from the specified address or range.
type: list
suboptions:
name:
description:
- Address name. Source firewall.address.name firewall.addrgrp.name.
required: true
type: str
status:
description:
- Enable/disable this policy.
type: str
choices:
- enable
- disable
webfilter_profile:
description:
- Web filter profile. Source webfilter.profile.name.
type: str
webfilter_profile_status:
description:
- Enable/disable web filtering.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure IPv4 interface policies.
fortios_firewall_interface_policy:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_interface_policy:
address_type: "ipv4"
application_list: "<your_own_value> (source application.list.name)"
application_list_status: "enable"
av_profile: "<your_own_value> (source antivirus.profile.name)"
av_profile_status: "enable"
comments: "<your_own_value>"
dlp_sensor: "<your_own_value> (source dlp.sensor.name)"
dlp_sensor_status: "enable"
dsri: "enable"
dstaddr:
-
name: "default_name_13 (source firewall.address.name firewall.addrgrp.name)"
interface: "<your_own_value> (source system.zone.name system.interface.name)"
ips_sensor: "<your_own_value> (source ips.sensor.name)"
ips_sensor_status: "enable"
label: "<your_own_value>"
logtraffic: "all"
policyid: "19"
scan_botnet_connections: "disable"
service:
-
name: "default_name_22 (source firewall.service.custom.name firewall.service.group.name)"
spamfilter_profile: "<your_own_value> (source spamfilter.profile.name)"
spamfilter_profile_status: "enable"
srcaddr:
-
name: "default_name_26 (source firewall.address.name firewall.addrgrp.name)"
status: "enable"
webfilter_profile: "<your_own_value> (source webfilter.profile.name)"
webfilter_profile_status: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_interface_policy_data(json):
option_list = ['address_type', 'application_list', 'application_list_status',
'av_profile', 'av_profile_status', 'comments',
'dlp_sensor', 'dlp_sensor_status', 'dsri',
'dstaddr', 'interface', 'ips_sensor',
'ips_sensor_status', 'label', 'logtraffic',
'policyid', 'scan_botnet_connections', 'service',
'spamfilter_profile', 'spamfilter_profile_status', 'srcaddr',
'status', 'webfilter_profile', 'webfilter_profile_status']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_interface_policy(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['firewall_interface_policy'] and data['firewall_interface_policy']:
state = data['firewall_interface_policy']['state']
else:
state = True
firewall_interface_policy_data = data['firewall_interface_policy']
filtered_data = underscore_to_hyphen(filter_firewall_interface_policy_data(firewall_interface_policy_data))
if state == "present":
return fos.set('firewall',
'interface-policy',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall',
'interface-policy',
mkey=filtered_data['policyid'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall(data, fos):
if data['firewall_interface_policy']:
resp = firewall_interface_policy(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"firewall_interface_policy": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"address_type": {"required": False, "type": "str",
"choices": ["ipv4", "ipv6"]},
"application_list": {"required": False, "type": "str"},
"application_list_status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"av_profile": {"required": False, "type": "str"},
"av_profile_status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"comments": {"required": False, "type": "str"},
"dlp_sensor": {"required": False, "type": "str"},
"dlp_sensor_status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dsri": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dstaddr": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"interface": {"required": False, "type": "str"},
"ips_sensor": {"required": False, "type": "str"},
"ips_sensor_status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"label": {"required": False, "type": "str"},
"logtraffic": {"required": False, "type": "str",
"choices": ["all", "utm", "disable"]},
"policyid": {"required": True, "type": "int"},
"scan_botnet_connections": {"required": False, "type": "str",
"choices": ["disable", "block", "monitor"]},
"service": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"spamfilter_profile": {"required": False, "type": "str"},
"spamfilter_profile_status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"srcaddr": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"webfilter_profile": {"required": False, "type": "str"},
"webfilter_profile_status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
loopCM/chromium
|
native_client_sdk/src/build_tools/sdk_tools/command/info.py
|
4
|
1216
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import command_common
import logging
import manifest_util
def Info(manifest, bundle_names):
valid_bundles, invalid_bundles = command_common.GetValidBundles(manifest,
bundle_names)
if invalid_bundles:
logging.warn('Unknown bundle(s): %s\n' % (', '.join(invalid_bundles)))
if not valid_bundles:
logging.warn('No valid bundles given.')
return
for bundle_name in valid_bundles:
bundle = manifest.GetBundle(bundle_name)
print bundle.name
for key in sorted(bundle.iterkeys()):
value = bundle[key]
if key == manifest_util.ARCHIVES_KEY:
archive = bundle.GetHostOSArchive()
print ' Archive:'
if archive:
for archive_key in sorted(archive.iterkeys()):
print ' %s: %s' % (archive_key, archive[archive_key])
else:
print ' No archives for this host.'
elif key not in (manifest_util.ARCHIVES_KEY, manifest_util.NAME_KEY):
print ' %s: %s' % (key, value)
print
|
bsd-3-clause
|
webmull/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/rebaselineserver.py
|
127
|
4570
|
# Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Starts a local HTTP server which displays layout test failures (given a test
results directory), provides comparisons of expected and actual results (both
images and text) and allows one-click rebaselining of tests."""
from webkitpy.common import system
from webkitpy.common.net.resultsjsonparser import for_each_test, JSONTestResult
from webkitpy.layout_tests.layout_package import json_results_generator
from webkitpy.tool.commands.abstractlocalservercommand import AbstractLocalServerCommand
from webkitpy.tool.servers.rebaselineserver import get_test_baselines, RebaselineHTTPServer, STATE_NEEDS_REBASELINE
class TestConfig(object):
def __init__(self, test_port, layout_tests_directory, results_directory, platforms, filesystem, scm):
self.test_port = test_port
self.layout_tests_directory = layout_tests_directory
self.results_directory = results_directory
self.platforms = platforms
self.filesystem = filesystem
self.scm = scm
class RebaselineServer(AbstractLocalServerCommand):
name = "rebaseline-server"
help_text = __doc__
argument_names = "/path/to/results/directory"
server = RebaselineHTTPServer
def _gather_baselines(self, results_json):
# Rebaseline server and it's associated JavaScript expected the tests subtree to
# be key-value pairs instead of hierarchical.
# FIXME: make the rebaseline server use the hierarchical tree.
new_tests_subtree = {}
def gather_baselines_for_test(test_name, result_dict):
result = JSONTestResult(test_name, result_dict)
if result.did_pass_or_run_as_expected():
return
result_dict['state'] = STATE_NEEDS_REBASELINE
result_dict['baselines'] = get_test_baselines(test_name, self._test_config)
new_tests_subtree[test_name] = result_dict
for_each_test(results_json['tests'], gather_baselines_for_test)
results_json['tests'] = new_tests_subtree
def _prepare_config(self, options, args, tool):
results_directory = args[0]
filesystem = system.filesystem.FileSystem()
scm = self._tool.scm()
print 'Parsing full_results.json...'
results_json_path = filesystem.join(results_directory, 'full_results.json')
results_json = json_results_generator.load_json(filesystem, results_json_path)
port = tool.port_factory.get()
layout_tests_directory = port.layout_tests_dir()
platforms = filesystem.listdir(filesystem.join(layout_tests_directory, 'platform'))
self._test_config = TestConfig(port, layout_tests_directory, results_directory, platforms, filesystem, scm)
print 'Gathering current baselines...'
self._gather_baselines(results_json)
return {
'test_config': self._test_config,
"results_json": results_json,
"platforms_json": {
'platforms': platforms,
'defaultPlatform': port.name(),
},
}
|
bsd-3-clause
|
philipgian/pre-commit
|
pre_commit/output.py
|
1
|
2217
|
from __future__ import unicode_literals
import sys
from pre_commit import color
from pre_commit import five
def get_hook_message(
start,
postfix='',
end_msg=None,
end_len=0,
end_color=None,
use_color=None,
cols=80,
):
"""Prints a message for running a hook.
This currently supports three approaches:
# Print `start` followed by dots, leaving 6 characters at the end
>>> print_hook_message('start', end_len=6)
start...............................................................
# Print `start` followed by dots with the end message colored if coloring
# is specified and a newline afterwards
>>> print_hook_message(
'start',
end_msg='end',
end_color=color.RED,
use_color=True,
)
start...................................................................end
# Print `start` followed by dots, followed by the `postfix` message
# uncolored, followed by the `end_msg` colored if specified and a newline
# afterwards
>>> print_hook_message(
'start',
postfix='postfix ',
end_msg='end',
end_color=color.RED,
use_color=True,
)
start...........................................................postfix end
"""
if bool(end_msg) == bool(end_len):
raise ValueError('Expected one of (`end_msg`, `end_len`)')
if end_msg is not None and (end_color is None or use_color is None):
raise ValueError(
'`end_color` and `use_color` are required with `end_msg`'
)
if end_len:
return start + '.' * (cols - len(start) - end_len - 1)
else:
return '{}{}{}{}\n'.format(
start,
'.' * (cols - len(start) - len(postfix) - len(end_msg) - 1),
postfix,
color.format_color(end_msg, end_color, use_color),
)
stdout_byte_stream = getattr(sys.stdout, 'buffer', sys.stdout)
def write(s, stream=stdout_byte_stream):
stream.write(five.to_bytes(s))
stream.flush()
def write_line(s=None, stream=stdout_byte_stream):
if s is not None:
stream.write(five.to_bytes(s))
stream.write(b'\n')
stream.flush()
|
mit
|
Ryanglambert/pybrain
|
pybrain/structure/connections/fullnotself.py
|
31
|
1276
|
__author__ = 'Thomas Rueckstiess, [email protected]'
from scipy import reshape, dot, outer, eye
from pybrain.structure.connections import FullConnection
class FullNotSelfConnection(FullConnection):
"""Connection which connects every element from the first module's
output buffer to the second module's input buffer in a matrix multiplicative
manner, EXCEPT the corresponding elements with the same index of each buffer
(the diagonal of the parameter matrix is 0). Asserts that in and out dimensions
are equal. """
#:TODO: the values on the diagonal are counted as parameters but not used! FIX!
def __init__(self, *args, **kwargs):
FullConnection.__init__(self, *args, **kwargs)
assert self.indim == self.outdim, \
"Indim (%i) does not equal outdim (%i)" % (
self.indim, self.outdim)
def _forwardImplementation(self, inbuf, outbuf):
p = reshape(self.params, (self.outdim, self.indim)) * (1-eye(self.outdim))
outbuf += dot(p, inbuf)
def _backwardImplementation(self, outerr, inerr, inbuf):
p = reshape(self.params, (self.outdim, self.indim)) * (1-eye(self.outdim))
inerr += dot(p.T, outerr)
ds = self.derivs
ds += outer(inbuf, outerr).T.flatten()
|
bsd-3-clause
|
SNAPPETITE/backend
|
flask/lib/python2.7/site-packages/tempita/_looper.py
|
140
|
4161
|
"""
Helper for looping over sequences, particular in templates.
Often in a loop in a template it's handy to know what's next up,
previously up, if this is the first or last item in the sequence, etc.
These can be awkward to manage in a normal Python loop, but using the
looper you can get a better sense of the context. Use like::
>>> for loop, item in looper(['a', 'b', 'c']):
... print loop.number, item
... if not loop.last:
... print '---'
1 a
---
2 b
---
3 c
"""
import sys
from tempita.compat3 import basestring_
__all__ = ['looper']
class looper(object):
"""
Helper for looping (particularly in templates)
Use this like::
for loop, item in looper(seq):
if loop.first:
...
"""
def __init__(self, seq):
self.seq = seq
def __iter__(self):
return looper_iter(self.seq)
def __repr__(self):
return '<%s for %r>' % (
self.__class__.__name__, self.seq)
class looper_iter(object):
def __init__(self, seq):
self.seq = list(seq)
self.pos = 0
def __iter__(self):
return self
def __next__(self):
if self.pos >= len(self.seq):
raise StopIteration
result = loop_pos(self.seq, self.pos), self.seq[self.pos]
self.pos += 1
return result
if sys.version < "3":
next = __next__
class loop_pos(object):
def __init__(self, seq, pos):
self.seq = seq
self.pos = pos
def __repr__(self):
return '<loop pos=%r at %r>' % (
self.seq[self.pos], self.pos)
def index(self):
return self.pos
index = property(index)
def number(self):
return self.pos + 1
number = property(number)
def item(self):
return self.seq[self.pos]
item = property(item)
def __next__(self):
try:
return self.seq[self.pos + 1]
except IndexError:
return None
__next__ = property(__next__)
if sys.version < "3":
next = __next__
def previous(self):
if self.pos == 0:
return None
return self.seq[self.pos - 1]
previous = property(previous)
def odd(self):
return not self.pos % 2
odd = property(odd)
def even(self):
return self.pos % 2
even = property(even)
def first(self):
return self.pos == 0
first = property(first)
def last(self):
return self.pos == len(self.seq) - 1
last = property(last)
def length(self):
return len(self.seq)
length = property(length)
def first_group(self, getter=None):
"""
Returns true if this item is the start of a new group,
where groups mean that some attribute has changed. The getter
can be None (the item itself changes), an attribute name like
``'.attr'``, a function, or a dict key or list index.
"""
if self.first:
return True
return self._compare_group(self.item, self.previous, getter)
def last_group(self, getter=None):
"""
Returns true if this item is the end of a new group,
where groups mean that some attribute has changed. The getter
can be None (the item itself changes), an attribute name like
``'.attr'``, a function, or a dict key or list index.
"""
if self.last:
return True
return self._compare_group(self.item, self.__next__, getter)
def _compare_group(self, item, other, getter):
if getter is None:
return item != other
elif (isinstance(getter, basestring_)
and getter.startswith('.')):
getter = getter[1:]
if getter.endswith('()'):
getter = getter[:-2]
return getattr(item, getter)() != getattr(other, getter)()
else:
return getattr(item, getter) != getattr(other, getter)
elif hasattr(getter, '__call__'):
return getter(item) != getter(other)
else:
return item[getter] != other[getter]
|
mit
|
alexteodor/odoo
|
addons/sales_team/__openerp__.py
|
51
|
1799
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sale Team',
'version': '1.0',
'author': 'OpenERP SA',
'category': 'Sales Management',
'summary': 'Sales Team',
'description': """
Using this application you can manage Sales Team with CRM and/or Sales
=======================================================================
""",
'website': 'https://www.odoo.com/page/crm',
'depends': ['base','mail','web_kanban_sparkline',],
'data': ['security/sales_team_security.xml',
'security/ir.model.access.csv',
'res_config_view.xml',
'sales_team_data.xml',
'sales_team.xml',],
'demo': ['sales_team_demo.xml'],
'css': ['static/src/css/sales_team.css'],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
sxjscience/tvm
|
tests/python/unittest/test_te_schedule.py
|
4
|
11434
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import te
import pickle as pkl
def test_schedule_create():
m = te.size_var("m")
n = te.size_var("n")
l = te.size_var("l")
A = te.placeholder((m, l), name="A")
B = te.placeholder((n, l), name="B")
AA = te.compute((m, l), lambda i, j: A[i, j])
T = te.compute((m, n, l), lambda i, j, k: AA(i, k) * B(j, k))
s = te.create_schedule(T.op)
s[AA].set_scope("shared")
xo, xi = s[T].split(T.op.axis[0], factor=10)
xi1, xi2 = s[T].split(xi, factor=2)
s[AA].compute_at(s[T], xi1)
xo, xi = s[AA].split(AA.op.axis[0], factor=10)
s[T].reorder(xi2, xi1)
assert T.op.axis[1] in s[T].leaf_iter_vars
# save load json
json_str = tvm.ir.save_json(s)
s_loaded = tvm.ir.load_json(json_str)
assert isinstance(s_loaded, tvm.te.schedule.Schedule)
assert str(s_loaded.outputs[0].body) == str(s.outputs[0].body)
# pickle unpickle
dump = pkl.dumps(s)
s_loaded = pkl.loads(dump)
assert isinstance(s_loaded, tvm.te.schedule.Schedule)
assert str(s_loaded.outputs[0].body) == str(s.outputs[0].body)
def test_reorder():
m = te.size_var("m")
A = te.placeholder((m,), name="A")
T = te.compute(m, lambda i: A[i + 1])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=10)
xi1, xi2 = s[T].split(xi, factor=2)
order = (xi2, xi1, xo)
assert tuple(s[T].leaf_iter_vars) != order
s[T].reorder(*order)
assert tuple(s[T].leaf_iter_vars) == order
try:
# pass duplicate IterVar
# must raise an error
s[T].reorder(xi2, xi1, xi2)
assert False
except tvm.error.TVMError:
pass
def test_split():
m = te.size_var("m")
A = te.placeholder((m,), name="A")
T = te.compute((m,), lambda i: A[i])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=10)
assert tuple(s[T].leaf_iter_vars) == (xo, xi)
def test_tile():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
xo, yo, xi, yi = s[T].tile(T.op.axis[0], T.op.axis[1], x_factor=10, y_factor=5)
assert tuple(s[T].leaf_iter_vars) == (xo, yo, xi, yi)
def test_fuse():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
xo, yo, xi, yi = s[T].tile(T.op.axis[0], T.op.axis[1], x_factor=10, y_factor=5)
fused = s[T].fuse(xo, yo)
assert any(isinstance(x, tvm.te.schedule.Fuse) for x in s[T].relations)
assert tuple(s[T].leaf_iter_vars) == (fused, xi, yi)
def test_fuse_with_split():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
y = T.op.axis[1]
xo, xi = s[T].split(T.op.axis[0], factor=10)
fused = s[T].fuse(xi, y)
assert any(isinstance(x, tvm.te.schedule.Fuse) for x in s[T].relations)
assert tuple(s[T].leaf_iter_vars) == (xo, fused)
def test_fuse_with_out_of_order_axis():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
y = T.op.axis[1]
xo, xi = s[T].split(T.op.axis[0], factor=10)
with pytest.raises(RuntimeError):
fused = s[T].fuse(xo, y) # should throw here
def test_fuse_with_out_of_order_axis_with_reorder():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
y = T.op.axis[1]
xo, xi = s[T].split(T.op.axis[0], factor=10)
s[T].reorder(y, xo, xi)
fused = s[T].fuse(y, xo) # should be ok
s = te.create_schedule(T.op)
y = T.op.axis[1]
xo, xi = s[T].split(T.op.axis[0], factor=10)
s[T].reorder(y, xo, xi)
with pytest.raises(RuntimeError):
fused = s[T].fuse(y, xi) # should throw here
def test_singleton():
A = te.placeholder((), name="A")
T = te.compute((), lambda: A() + 1)
s = te.create_schedule(T.op)
fused = s[T].fuse()
assert any(isinstance(x, tvm.te.schedule.Singleton) for x in s[T].relations)
assert tuple(s[T].leaf_iter_vars) == (fused,)
dump = pkl.dumps(s)
s_loaded = pkl.loads(dump)
assert isinstance(s_loaded, tvm.te.schedule.Schedule)
def test_vectorize():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
xo, yo, xi, yi = s[T].tile(T.op.axis[0], T.op.axis[1], x_factor=10, y_factor=5)
s[T].vectorize(yi)
s[T].unroll(xi)
UNROLL = tvm.te.schedule.IterVar.Unrolled
VECTORIZE = tvm.te.schedule.IterVar.Vectorized
assert s[T].iter_var_attrs[xi].iter_type == UNROLL
assert s[T].iter_var_attrs[yi].iter_type == VECTORIZE
def test_vectorize_commreduce():
V = te.placeholder((128,), name="V")
ax = te.reduce_axis((0, 128), name="ax")
O = te.compute((1,), lambda _: te.sum(V[ax], axis=[ax]))
s = te.create_schedule(O.op)
with pytest.raises(RuntimeError):
s[O].vectorize(ax) # should throw here
def test_pragma():
m = 100
A = te.placeholder((m,), name="A")
T = te.compute((m,), lambda i: A[i])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=10)
s[T].pragma(xo, "pragma1")
s[T].pragma(xi, "vectorize")
VECTORIZE = tvm.te.schedule.IterVar.Vectorized
assert s[T].iter_var_attrs[xo].pragma_keys[0].value == "pragma1"
assert s[T].iter_var_attrs[xi].iter_type == VECTORIZE
def test_rfactor():
n = te.size_var("n")
k1 = te.reduce_axis((0, n), name="k1")
k2 = te.reduce_axis((0, n), name="k2")
A = te.placeholder((n, n, n), name="A")
B = te.compute((n,), lambda i: te.sum(A[i, k1, k2], axis=[k1, k2]))
# normal schedule
s = te.create_schedule(B.op)
BF = s.rfactor(B, k1)
assert tuple(BF.shape) == (n, n)
assert set(BF.op.body[0].axis) == set([k2])
assert s[B].op.body[0].axis[0].dom.extent == n
assert len(s[B].all_iter_vars) == 2
# schedule with splot
s = te.create_schedule(B.op)
ko, ki = s[B].split(k1, factor=4)
xo, xi = s[B].split(B.op.axis[0], factor=8)
BF = s.rfactor(B, ki)
assert BF.shape[0].value == 4
assert BF.shape[1] == n
assert BF.op.body[0].axis[0] == k2
assert BF.op.body[0].axis[1].var == ko.var
assert s[B].op.body[0].axis[0].dom.extent.value == 4
# schedule with factor_axis
s = te.create_schedule(B.op)
ko, ki = s[B].split(k1, factor=4)
xo, xi = s[B].split(B.op.axis[0], factor=8)
BF = s.rfactor(B, ki, 1)
assert n == BF.shape[0]
assert BF.shape[1].value == 4
assert BF.op.body[0].axis[0] == k2
assert BF.op.body[0].axis[1].var == ko.var
assert s[B].op.body[0].axis[0].dom.extent.value == 4
def test_tensor_intrin():
n = 16
x = te.placeholder((n,), name="x")
y = te.placeholder((n,), name="y")
z = te.compute(x.shape, lambda i: x[i] + y[i], name="z")
def intrin_func(ins, outs):
assert isinstance(ins[0], tvm.te.schedule.Buffer)
assert ins[0].shape[0].value == n
return tvm.tir.call_packed("vadd", ins[0].data, outs[0].data, ins[0].shape[0])
intrin = te.decl_tensor_intrin(z.op, intrin_func)
assert intrin.op == z.op
assert intrin.reduce_init is None
assert tuple(intrin.inputs) == tuple(z.op.input_tensors)
assert intrin.buffers[0].shape[0].value == n
m = 32
x = te.placeholder((m,), name="x")
y = te.placeholder((m,), name="y")
z = te.compute(x.shape, lambda i: x[i] + y[i], name="z")
s = te.create_schedule(z.op)
xo, xi = s[z].split(z.op.axis[0], factor=n)
s[z].tensorize(xi, intrin)
assert s[z].iter_var_attrs[xi].tensor_intrin == intrin
assert s[z].iter_var_attrs[xi].iter_type == tvm.te.schedule.IterVar.Tensorized
def test_tensor_intrin_scalar_params():
n = te.size_var("n")
x = te.placeholder((n,), name="x")
v = te.size_var("v")
w = te.size_var("w")
z = te.compute((n,), lambda i: x[i] * v + w, name="z")
def intrin_func(ins, outs, sp):
assert isinstance(ins[0], tvm.te.schedule.Buffer)
assert ins[0].shape[0] == n
assert sp[0] == v
assert sp[1] == w
return tvm.tir.call_packed("hw_func", ins[0].data, outs[0].data, sp[0], sp[1])
intrin = te.decl_tensor_intrin(
z.op, intrin_func, scalar_params=[v, w], default_buffer_params={"offset_factor": 1}
)
assert intrin.op == z.op
assert intrin.reduce_init is None
assert tuple(intrin.inputs) == tuple(z.op.input_tensors)
assert intrin.buffers[0].shape[0] == n
assert tuple(intrin.scalar_params) == tuple((v, w))
A = te.placeholder((10, 10), name="A")
# Pass scalar inputs to the TensorIntrin, interleaved with tensor inputs
C = te.compute((10, 10), lambda i, j: intrin(i * i, A[i, j], i + j), name="C")
s = te.create_schedule(C.op)
stmt = tvm.lower(s, [A, C])["main"].body
assert isinstance(stmt.body.body, tvm.tir.Evaluate)
assert len(stmt.body.body.value.args) == 5
assert str(stmt.body.body.value.args[3]) == "(i: int32*i)"
assert str(stmt.body.body.value.args[4]) == "(i: int32 + j: int32)"
def test_legalize_invalid_attach():
A = te.compute((10, 10), lambda i, j: 1.0, name="A")
B = te.compute((10, 10), lambda i, j: A[i][j], name="B")
# Case 1: Split an axis which is the target of a compute_at
s = te.create_schedule([B.op])
s[A].compute_at(s[B], B.op.axis[1])
s[B].split(B.op.axis[1], 2)
stmt = tvm.lower(s, [A, B], simple_mode=True)["main"].body
assert isinstance(stmt.body.body, tvm.tir.stmt.For)
# Case 2: Fuse an axis which is the target of a compute_at
s = te.create_schedule([B.op])
s[A].compute_at(s[B], B.op.axis[1])
s[B].fuse(B.op.axis[0], B.op.axis[1])
stmt = tvm.lower(s, [A, B], simple_mode=True)["main"].body
assert isinstance(stmt, tvm.tir.stmt.For)
if __name__ == "__main__":
test_singleton()
test_pragma()
test_tensor_intrin()
test_tensor_intrin_scalar_params()
test_rfactor()
test_schedule_create()
test_reorder()
test_tile()
test_split()
test_fuse()
test_fuse_with_split()
test_fuse_with_out_of_order_axis()
test_fuse_with_out_of_order_axis_with_reorder()
test_vectorize()
test_vectorize_commreduce()
test_legalize_invalid_attach()
|
apache-2.0
|
awkspace/ansible
|
lib/ansible/plugins/filter/json_query.py
|
197
|
1857
|
# (c) 2015, Filipe Niero Felisbino <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError, AnsibleFilterError
try:
import jmespath
HAS_LIB = True
except ImportError:
HAS_LIB = False
def json_query(data, expr):
'''Query data using jmespath query language ( http://jmespath.org ). Example:
- debug: msg="{{ instance | json_query(tagged_instances[*].block_device_mapping.*.volume_id') }}"
'''
if not HAS_LIB:
raise AnsibleError('You need to install "jmespath" prior to running '
'json_query filter')
try:
return jmespath.search(expr, data)
except jmespath.exceptions.JMESPathError as e:
raise AnsibleFilterError('JMESPathError in json_query filter plugin:\n%s' % e)
except Exception as e:
# For older jmespath, we can get ValueError and TypeError without much info.
raise AnsibleFilterError('Error in jmespath.search in json_query filter plugin:\n%s' % e)
class FilterModule(object):
''' Query filter '''
def filters(self):
return {
'json_query': json_query
}
|
gpl-3.0
|
AkademieOlympia/sympy
|
sympy/physics/unitsystems/prefixes.py
|
91
|
4190
|
# -*- coding: utf-8 -*-
"""
Module defining unit prefixe class and some constants.
Constant dict for SI and binary prefixes are defined as PREFIXES and
BIN_PREFIXES.
"""
from sympy import sympify
class Prefix(object):
"""
This class represent prefixes, with their name, symbol and factor.
Prefixes are used to create derived units from a given unit. They should
always be encapsulated into units.
The factor is constructed from a base (default is 10) to some power, and
it gives the total multiple or fraction. For example the kilometer km
is constructed from the meter (factor 1) and the kilo (10 to the power 3,
i.e. 1000). The base can be changed to allow e.g. binary prefixes.
A prefix multiplied by something will always return the product of this
other object times the factor, except if the other object:
- is a prefix and they can be combined into a new prefix;
- defines multiplication with prefixes (which is the case for the Unit
class).
"""
def __init__(self, name, abbrev, exponent, base=sympify(10)):
self.name = name
self.abbrev = abbrev
self.factor = base**exponent
def __str__(self):
return self.name
__repr__ = __str__
def __mul__(self, other):
fact = self.factor * other.factor
if fact == 1:
return 1
elif isinstance(other, Prefix):
# simplify prefix
for p in PREFIXES:
if PREFIXES[p].factor == fact:
return PREFIXES[p]
return fact
return self.factor * other
def __div__(self, other):
fact = self.factor / other.factor
if fact == 1:
return 1
elif isinstance(other, Prefix):
for p in PREFIXES:
if PREFIXES[p].factor == fact:
return PREFIXES[p]
return fact
return self.factor / other
__truediv__ = __div__
def __rdiv__(self, other):
if other == 1:
for p in PREFIXES:
if PREFIXES[p].factor == 1 / self.factor:
return PREFIXES[p]
return other / self.factor
__rtruediv__ = __rdiv__
def prefix_unit(unit, prefixes):
"""
Return a list of all units formed by unit and the given prefixes.
You can use the predefined PREFIXES or BIN_PREFIXES, but you can also
pass as argument a subdict of them if you don't want all prefixed units.
>>> from sympy.physics.unitsystems.prefixes import (PREFIXES,
... prefix_unit)
>>> from sympy.physics.unitsystems.systems import mks
>>> m = mks["m"]
>>> pref = {"m": PREFIXES["m"], "c": PREFIXES["c"], "d": PREFIXES["d"]}
>>> prefix_unit(m, pref) #doctest: +SKIP
[cm, dm, mm]
"""
from sympy.physics.unitsystems.units import Unit
prefixed_units = []
for prefix in prefixes:
prefixed_units.append(Unit(unit, abbrev=unit.abbrev,
prefix=prefixes[prefix]))
return prefixed_units
# http://physics.nist.gov/cuu/Units/prefixes.html
PREFIXES = {
'Y': Prefix('yotta', 'Y', 24),
'Z': Prefix('zetta', 'Z', 21),
'E': Prefix('exa', 'E', 18),
'P': Prefix('peta', 'P', 15),
'T': Prefix('tera', 'T', 12),
'G': Prefix('giga', 'G', 9),
'M': Prefix('mega', 'M', 6),
'k': Prefix('kilo', 'k', 3),
'h': Prefix('hecto', 'h', 2),
'da': Prefix('deca', 'da', 1),
'd': Prefix('deci', 'd', -1),
'c': Prefix('centi', 'c', -2),
'm': Prefix('milli', 'm', -3),
'ยต': Prefix('micro', 'ยต', -6),
'n': Prefix('nano', 'n', -9),
'p': Prefix('pico', 'p', -12),
'f': Prefix('femto', 'f', -15),
'a': Prefix('atto', 'a', -18),
'z': Prefix('zepto', 'z', -21),
'y': Prefix('yocto', 'y', -24)
}
# http://physics.nist.gov/cuu/Units/binary.html
BIN_PREFIXES = {
'Ki': Prefix('kibi', 'Y', 10, 2),
'Mi': Prefix('mebi', 'Y', 20, 2),
'Gi': Prefix('gibi', 'Y', 30, 2),
'Ti': Prefix('tebi', 'Y', 40, 2),
'Pi': Prefix('pebi', 'Y', 50, 2),
'Ei': Prefix('exbi', 'Y', 60, 2)
}
|
bsd-3-clause
|
krisys/django
|
django/core/serializers/xml_serializer.py
|
17
|
15681
|
"""
XML serializer.
"""
from __future__ import unicode_literals
from collections import OrderedDict
from xml.dom import pulldom
from xml.sax import handler
from xml.sax.expatreader import ExpatParser as _ExpatParser
from django.apps import apps
from django.conf import settings
from django.core.serializers import base
from django.db import DEFAULT_DB_ALIAS, models
from django.utils.encoding import smart_text
from django.utils.xmlutils import (
SimplerXMLGenerator, UnserializableContentError,
)
class Serializer(base.Serializer):
"""
Serializes a QuerySet to XML.
"""
def indent(self, level):
if self.options.get('indent') is not None:
self.xml.ignorableWhitespace('\n' + ' ' * self.options.get('indent') * level)
def start_serialization(self):
"""
Start serialization -- open the XML document and the root element.
"""
self.xml = SimplerXMLGenerator(self.stream, self.options.get("encoding", settings.DEFAULT_CHARSET))
self.xml.startDocument()
self.xml.startElement("django-objects", {"version": "1.0"})
def end_serialization(self):
"""
End serialization -- end the document.
"""
self.indent(0)
self.xml.endElement("django-objects")
self.xml.endDocument()
def start_object(self, obj):
"""
Called as each object is handled.
"""
if not hasattr(obj, "_meta"):
raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj))
self.indent(1)
model = obj._meta.proxy_for_model if obj._deferred else obj.__class__
attrs = OrderedDict([("model", smart_text(model._meta))])
if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'):
obj_pk = obj._get_pk_val()
if obj_pk is not None:
attrs['pk'] = smart_text(obj_pk)
self.xml.startElement("object", attrs)
def end_object(self, obj):
"""
Called after handling all fields for an object.
"""
self.indent(1)
self.xml.endElement("object")
def handle_field(self, obj, field):
"""
Called to handle each field on an object (except for ForeignKeys and
ManyToManyFields)
"""
self.indent(2)
self.xml.startElement("field", OrderedDict([
("name", field.name),
("type", field.get_internal_type()),
]))
# Get a "string version" of the object's data.
if getattr(obj, field.name) is not None:
try:
self.xml.characters(field.value_to_string(obj))
except UnserializableContentError:
raise ValueError("%s.%s (pk:%s) contains unserializable characters" % (
obj.__class__.__name__, field.name, obj._get_pk_val()))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_fk_field(self, obj, field):
"""
Called to handle a ForeignKey (we need to treat them slightly
differently from regular fields).
"""
self._start_relational_field(field)
related_att = getattr(obj, field.get_attname())
if related_att is not None:
if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):
related = getattr(obj, field.name)
# If related object has a natural key, use it
related = related.natural_key()
# Iterable natural keys are rolled out as subelements
for key_value in related:
self.xml.startElement("natural", {})
self.xml.characters(smart_text(key_value))
self.xml.endElement("natural")
else:
self.xml.characters(smart_text(related_att))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_m2m_field(self, obj, field):
"""
Called to handle a ManyToManyField. Related objects are only
serialized as references to the object's PK (i.e. the related *data*
is not dumped, just the relation).
"""
if field.remote_field.through._meta.auto_created:
self._start_relational_field(field)
if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):
# If the objects in the m2m have a natural key, use it
def handle_m2m(value):
natural = value.natural_key()
# Iterable natural keys are rolled out as subelements
self.xml.startElement("object", {})
for key_value in natural:
self.xml.startElement("natural", {})
self.xml.characters(smart_text(key_value))
self.xml.endElement("natural")
self.xml.endElement("object")
else:
def handle_m2m(value):
self.xml.addQuickElement("object", attrs={
'pk': smart_text(value._get_pk_val())
})
for relobj in getattr(obj, field.name).iterator():
handle_m2m(relobj)
self.xml.endElement("field")
def _start_relational_field(self, field):
"""
Helper to output the <field> element for relational fields
"""
self.indent(2)
self.xml.startElement("field", OrderedDict([
("name", field.name),
("rel", field.remote_field.__class__.__name__),
("to", smart_text(field.remote_field.model._meta)),
]))
class Deserializer(base.Deserializer):
"""
Deserialize XML.
"""
def __init__(self, stream_or_string, **options):
super(Deserializer, self).__init__(stream_or_string, **options)
self.event_stream = pulldom.parse(self.stream, self._make_parser())
self.db = options.pop('using', DEFAULT_DB_ALIAS)
self.ignore = options.pop('ignorenonexistent', False)
def _make_parser(self):
"""Create a hardened XML parser (no custom/external entities)."""
return DefusedExpatParser()
def __next__(self):
for event, node in self.event_stream:
if event == "START_ELEMENT" and node.nodeName == "object":
self.event_stream.expandNode(node)
return self._handle_object(node)
raise StopIteration
def _handle_object(self, node):
"""
Convert an <object> node to a DeserializedObject.
"""
# Look up the model using the model loading mechanism. If this fails,
# bail.
Model = self._get_model_from_node(node, "model")
# Start building a data dictionary from the object.
data = {}
if node.hasAttribute('pk'):
data[Model._meta.pk.attname] = Model._meta.pk.to_python(
node.getAttribute('pk'))
# Also start building a dict of m2m data (this is saved as
# {m2m_accessor_attribute : [list_of_related_objects]})
m2m_data = {}
field_names = {f.name for f in Model._meta.get_fields()}
# Deserialize each field.
for field_node in node.getElementsByTagName("field"):
# If the field is missing the name attribute, bail (are you
# sensing a pattern here?)
field_name = field_node.getAttribute("name")
if not field_name:
raise base.DeserializationError("<field> node is missing the 'name' attribute")
# Get the field from the Model. This will raise a
# FieldDoesNotExist if, well, the field doesn't exist, which will
# be propagated correctly unless ignorenonexistent=True is used.
if self.ignore and field_name not in field_names:
continue
field = Model._meta.get_field(field_name)
# As is usually the case, relation fields get the special treatment.
if field.remote_field and isinstance(field.remote_field, models.ManyToManyRel):
m2m_data[field.name] = self._handle_m2m_field_node(field_node, field)
elif field.remote_field and isinstance(field.remote_field, models.ManyToOneRel):
data[field.attname] = self._handle_fk_field_node(field_node, field)
else:
if field_node.getElementsByTagName('None'):
value = None
else:
value = field.to_python(getInnerText(field_node).strip())
data[field.name] = value
obj = base.build_instance(Model, data, self.db)
# Return a DeserializedObject so that the m2m data has a place to live.
return base.DeserializedObject(obj, m2m_data)
def _handle_fk_field_node(self, node, field):
"""
Handle a <field> node for a ForeignKey
"""
# Check if there is a child node named 'None', returning None if so.
if node.getElementsByTagName('None'):
return None
else:
model = field.remote_field.model
if hasattr(model._default_manager, 'get_by_natural_key'):
keys = node.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj = model._default_manager.db_manager(self.db).get_by_natural_key(*field_value)
obj_pk = getattr(obj, field.remote_field.field_name)
# If this is a natural foreign key to an object that
# has a FK/O2O as the foreign key, use the FK value
if field.remote_field.model._meta.pk.remote_field:
obj_pk = obj_pk.pk
else:
# Otherwise, treat like a normal PK
field_value = getInnerText(node).strip()
obj_pk = model._meta.get_field(field.remote_field.field_name).to_python(field_value)
return obj_pk
else:
field_value = getInnerText(node).strip()
return model._meta.get_field(field.remote_field.field_name).to_python(field_value)
def _handle_m2m_field_node(self, node, field):
"""
Handle a <field> node for a ManyToManyField.
"""
model = field.remote_field.model
default_manager = model._default_manager
if hasattr(default_manager, 'get_by_natural_key'):
def m2m_convert(n):
keys = n.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj_pk = default_manager.db_manager(self.db).get_by_natural_key(*field_value).pk
else:
# Otherwise, treat like a normal PK value.
obj_pk = model._meta.pk.to_python(n.getAttribute('pk'))
return obj_pk
else:
def m2m_convert(n):
return model._meta.pk.to_python(n.getAttribute('pk'))
return [m2m_convert(c) for c in node.getElementsByTagName("object")]
def _get_model_from_node(self, node, attr):
"""
Helper to look up a model from a <object model=...> or a <field
rel=... to=...> node.
"""
model_identifier = node.getAttribute(attr)
if not model_identifier:
raise base.DeserializationError(
"<%s> node is missing the required '%s' attribute"
% (node.nodeName, attr))
try:
return apps.get_model(model_identifier)
except (LookupError, TypeError):
raise base.DeserializationError(
"<%s> node has invalid model identifier: '%s'"
% (node.nodeName, model_identifier))
def getInnerText(node):
"""
Get all the inner text of a DOM node (recursively).
"""
# inspired by http://mail.python.org/pipermail/xml-sig/2005-March/011022.html
inner_text = []
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE or child.nodeType == child.CDATA_SECTION_NODE:
inner_text.append(child.data)
elif child.nodeType == child.ELEMENT_NODE:
inner_text.extend(getInnerText(child))
else:
pass
return "".join(inner_text)
# Below code based on Christian Heimes' defusedxml
class DefusedExpatParser(_ExpatParser):
"""
An expat parser hardened against XML bomb attacks.
Forbids DTDs, external entity references
"""
def __init__(self, *args, **kwargs):
_ExpatParser.__init__(self, *args, **kwargs)
self.setFeature(handler.feature_external_ges, False)
self.setFeature(handler.feature_external_pes, False)
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
raise DTDForbidden(name, sysid, pubid)
def entity_decl(self, name, is_parameter_entity, value, base,
sysid, pubid, notation_name):
raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name)
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name)
def external_entity_ref_handler(self, context, base, sysid, pubid):
raise ExternalReferenceForbidden(context, base, sysid, pubid)
def reset(self):
_ExpatParser.reset(self)
parser = self._parser
parser.StartDoctypeDeclHandler = self.start_doctype_decl
parser.EntityDeclHandler = self.entity_decl
parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
parser.ExternalEntityRefHandler = self.external_entity_ref_handler
class DefusedXmlException(ValueError):
"""Base exception."""
def __repr__(self):
return str(self)
class DTDForbidden(DefusedXmlException):
"""Document type definition is forbidden."""
def __init__(self, name, sysid, pubid):
super(DTDForbidden, self).__init__()
self.name = name
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "DTDForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
class EntitiesForbidden(DefusedXmlException):
"""Entity definition is forbidden."""
def __init__(self, name, value, base, sysid, pubid, notation_name):
super(EntitiesForbidden, self).__init__()
self.name = name
self.value = value
self.base = base
self.sysid = sysid
self.pubid = pubid
self.notation_name = notation_name
def __str__(self):
tpl = "EntitiesForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
class ExternalReferenceForbidden(DefusedXmlException):
"""Resolving an external reference is forbidden."""
def __init__(self, context, base, sysid, pubid):
super(ExternalReferenceForbidden, self).__init__()
self.context = context
self.base = base
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "ExternalReferenceForbidden(system_id='{}', public_id={})"
return tpl.format(self.sysid, self.pubid)
|
bsd-3-clause
|
trivoldus28/pulsarch-verilog
|
tools/local/bas-release/bas,3.9/lib/python/lib/python2.3/hotshot/stats.py
|
252
|
2582
|
"""Statistics analyzer for HotShot."""
import profile
import pstats
import hotshot.log
from hotshot.log import ENTER, EXIT
def load(filename):
return StatsLoader(filename).load()
class StatsLoader:
def __init__(self, logfn):
self._logfn = logfn
self._code = {}
self._stack = []
self.pop_frame = self._stack.pop
def load(self):
# The timer selected by the profiler should never be used, so make
# sure it doesn't work:
p = Profile()
p.get_time = _brokentimer
log = hotshot.log.LogReader(self._logfn)
taccum = 0
for event in log:
what, (filename, lineno, funcname), tdelta = event
if tdelta > 0:
taccum += tdelta
# We multiply taccum to convert from the microseconds we
# have to the seconds that the profile/pstats module work
# with; this allows the numbers to have some basis in
# reality (ignoring calibration issues for now).
if what == ENTER:
frame = self.new_frame(filename, lineno, funcname)
p.trace_dispatch_call(frame, taccum * .000001)
taccum = 0
elif what == EXIT:
frame = self.pop_frame()
p.trace_dispatch_return(frame, taccum * .000001)
taccum = 0
# no further work for line events
assert not self._stack
return pstats.Stats(p)
def new_frame(self, *args):
# args must be filename, firstlineno, funcname
# our code objects are cached since we don't need to create
# new ones every time
try:
code = self._code[args]
except KeyError:
code = FakeCode(*args)
self._code[args] = code
# frame objects are create fresh, since the back pointer will
# vary considerably
if self._stack:
back = self._stack[-1]
else:
back = None
frame = FakeFrame(code, back)
self._stack.append(frame)
return frame
class Profile(profile.Profile):
def simulate_cmd_complete(self):
pass
class FakeCode:
def __init__(self, filename, firstlineno, funcname):
self.co_filename = filename
self.co_firstlineno = firstlineno
self.co_name = self.__name__ = funcname
class FakeFrame:
def __init__(self, code, back):
self.f_back = back
self.f_code = code
def _brokentimer():
raise RuntimeError, "this timer should not be called"
|
gpl-2.0
|
ZhangXinNan/tensorflow
|
tensorflow/python/kernel_tests/distributions/uniform_test.py
|
11
|
11353
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Uniform distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import uniform as uniform_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
class UniformTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testUniformRange(self):
with self.test_session():
a = 3.0
b = 10.0
uniform = uniform_lib.Uniform(low=a, high=b)
self.assertAllClose(a, self.evaluate(uniform.low))
self.assertAllClose(b, self.evaluate(uniform.high))
self.assertAllClose(b - a, self.evaluate(uniform.range()))
@test_util.run_in_graph_and_eager_modes
def testUniformPDF(self):
with self.test_session():
a = constant_op.constant([-3.0] * 5 + [15.0])
b = constant_op.constant([11.0] * 5 + [20.0])
uniform = uniform_lib.Uniform(low=a, high=b)
a_v = -3.0
b_v = 11.0
x = np.array([-10.5, 4.0, 0.0, 10.99, 11.3, 17.0], dtype=np.float32)
def _expected_pdf():
pdf = np.zeros_like(x) + 1.0 / (b_v - a_v)
pdf[x > b_v] = 0.0
pdf[x < a_v] = 0.0
pdf[5] = 1.0 / (20.0 - 15.0)
return pdf
expected_pdf = _expected_pdf()
pdf = uniform.prob(x)
self.assertAllClose(expected_pdf, self.evaluate(pdf))
log_pdf = uniform.log_prob(x)
self.assertAllClose(np.log(expected_pdf), self.evaluate(log_pdf))
@test_util.run_in_graph_and_eager_modes
def testUniformShape(self):
with self.test_session():
a = constant_op.constant([-3.0] * 5)
b = constant_op.constant(11.0)
uniform = uniform_lib.Uniform(low=a, high=b)
self.assertEqual(self.evaluate(uniform.batch_shape_tensor()), (5,))
self.assertEqual(uniform.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(self.evaluate(uniform.event_shape_tensor()), [])
self.assertEqual(uniform.event_shape, tensor_shape.TensorShape([]))
@test_util.run_in_graph_and_eager_modes
def testUniformPDFWithScalarEndpoint(self):
with self.test_session():
a = constant_op.constant([0.0, 5.0])
b = constant_op.constant(10.0)
uniform = uniform_lib.Uniform(low=a, high=b)
x = np.array([0.0, 8.0], dtype=np.float32)
expected_pdf = np.array([1.0 / (10.0 - 0.0), 1.0 / (10.0 - 5.0)])
pdf = uniform.prob(x)
self.assertAllClose(expected_pdf, self.evaluate(pdf))
@test_util.run_in_graph_and_eager_modes
def testUniformCDF(self):
with self.test_session():
batch_size = 6
a = constant_op.constant([1.0] * batch_size)
b = constant_op.constant([11.0] * batch_size)
a_v = 1.0
b_v = 11.0
x = np.array([-2.5, 2.5, 4.0, 0.0, 10.99, 12.0], dtype=np.float32)
uniform = uniform_lib.Uniform(low=a, high=b)
def _expected_cdf():
cdf = (x - a_v) / (b_v - a_v)
cdf[x >= b_v] = 1
cdf[x < a_v] = 0
return cdf
cdf = uniform.cdf(x)
self.assertAllClose(_expected_cdf(), self.evaluate(cdf))
log_cdf = uniform.log_cdf(x)
self.assertAllClose(np.log(_expected_cdf()), self.evaluate(log_cdf))
@test_util.run_in_graph_and_eager_modes
def testUniformEntropy(self):
with self.test_session():
a_v = np.array([1.0, 1.0, 1.0])
b_v = np.array([[1.5, 2.0, 3.0]])
uniform = uniform_lib.Uniform(low=a_v, high=b_v)
expected_entropy = np.log(b_v - a_v)
self.assertAllClose(expected_entropy, self.evaluate(uniform.entropy()))
@test_util.run_in_graph_and_eager_modes
def testUniformAssertMaxGtMin(self):
with self.test_session():
a_v = np.array([1.0, 1.0, 1.0], dtype=np.float32)
b_v = np.array([1.0, 2.0, 3.0], dtype=np.float32)
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
"x < y"):
uniform = uniform_lib.Uniform(low=a_v, high=b_v, validate_args=True)
self.evaluate(uniform.low)
@test_util.run_in_graph_and_eager_modes
def testUniformSample(self):
with self.test_session():
a = constant_op.constant([3.0, 4.0])
b = constant_op.constant(13.0)
a1_v = 3.0
a2_v = 4.0
b_v = 13.0
n = constant_op.constant(100000)
uniform = uniform_lib.Uniform(low=a, high=b)
samples = uniform.sample(n, seed=137)
sample_values = self.evaluate(samples)
self.assertEqual(sample_values.shape, (100000, 2))
self.assertAllClose(
sample_values[::, 0].mean(), (b_v + a1_v) / 2, atol=1e-1, rtol=0.)
self.assertAllClose(
sample_values[::, 1].mean(), (b_v + a2_v) / 2, atol=1e-1, rtol=0.)
self.assertFalse(
np.any(sample_values[::, 0] < a1_v) or np.any(sample_values >= b_v))
self.assertFalse(
np.any(sample_values[::, 1] < a2_v) or np.any(sample_values >= b_v))
@test_util.run_in_graph_and_eager_modes
def _testUniformSampleMultiDimensional(self):
# DISABLED: Please enable this test once b/issues/30149644 is resolved.
with self.test_session():
batch_size = 2
a_v = [3.0, 22.0]
b_v = [13.0, 35.0]
a = constant_op.constant([a_v] * batch_size)
b = constant_op.constant([b_v] * batch_size)
uniform = uniform_lib.Uniform(low=a, high=b)
n_v = 100000
n = constant_op.constant(n_v)
samples = uniform.sample(n)
self.assertEqual(samples.get_shape(), (n_v, batch_size, 2))
sample_values = self.evaluate(samples)
self.assertFalse(
np.any(sample_values[:, 0, 0] < a_v[0]) or
np.any(sample_values[:, 0, 0] >= b_v[0]))
self.assertFalse(
np.any(sample_values[:, 0, 1] < a_v[1]) or
np.any(sample_values[:, 0, 1] >= b_v[1]))
self.assertAllClose(
sample_values[:, 0, 0].mean(), (a_v[0] + b_v[0]) / 2, atol=1e-2)
self.assertAllClose(
sample_values[:, 0, 1].mean(), (a_v[1] + b_v[1]) / 2, atol=1e-2)
@test_util.run_in_graph_and_eager_modes
def testUniformMean(self):
with self.test_session():
a = 10.0
b = 100.0
uniform = uniform_lib.Uniform(low=a, high=b)
if not stats:
return
s_uniform = stats.uniform(loc=a, scale=b - a)
self.assertAllClose(self.evaluate(uniform.mean()), s_uniform.mean())
@test_util.run_in_graph_and_eager_modes
def testUniformVariance(self):
with self.test_session():
a = 10.0
b = 100.0
uniform = uniform_lib.Uniform(low=a, high=b)
if not stats:
return
s_uniform = stats.uniform(loc=a, scale=b - a)
self.assertAllClose(self.evaluate(uniform.variance()), s_uniform.var())
@test_util.run_in_graph_and_eager_modes
def testUniformStd(self):
with self.test_session():
a = 10.0
b = 100.0
uniform = uniform_lib.Uniform(low=a, high=b)
if not stats:
return
s_uniform = stats.uniform(loc=a, scale=b - a)
self.assertAllClose(self.evaluate(uniform.stddev()), s_uniform.std())
@test_util.run_in_graph_and_eager_modes
def testUniformNans(self):
with self.test_session():
a = 10.0
b = [11.0, 100.0]
uniform = uniform_lib.Uniform(low=a, high=b)
no_nans = constant_op.constant(1.0)
nans = constant_op.constant(0.0) / constant_op.constant(0.0)
self.assertTrue(self.evaluate(math_ops.is_nan(nans)))
with_nans = array_ops.stack([no_nans, nans])
pdf = uniform.prob(with_nans)
is_nan = self.evaluate(math_ops.is_nan(pdf))
self.assertFalse(is_nan[0])
self.assertTrue(is_nan[1])
@test_util.run_in_graph_and_eager_modes
def testUniformSamplePdf(self):
with self.test_session():
a = 10.0
b = [11.0, 100.0]
uniform = uniform_lib.Uniform(a, b)
self.assertTrue(
self.evaluate(
math_ops.reduce_all(uniform.prob(uniform.sample(10)) > 0)))
@test_util.run_in_graph_and_eager_modes
def testUniformBroadcasting(self):
with self.test_session():
a = 10.0
b = [11.0, 20.0]
uniform = uniform_lib.Uniform(a, b)
pdf = uniform.prob([[10.5, 11.5], [9.0, 19.0], [10.5, 21.0]])
expected_pdf = np.array([[1.0, 0.1], [0.0, 0.1], [1.0, 0.0]])
self.assertAllClose(expected_pdf, self.evaluate(pdf))
@test_util.run_in_graph_and_eager_modes
def testUniformSampleWithShape(self):
with self.test_session():
a = 10.0
b = [11.0, 20.0]
uniform = uniform_lib.Uniform(a, b)
pdf = uniform.prob(uniform.sample((2, 3)))
# pylint: disable=bad-continuation
expected_pdf = [
[[1.0, 0.1], [1.0, 0.1], [1.0, 0.1]],
[[1.0, 0.1], [1.0, 0.1], [1.0, 0.1]],
]
# pylint: enable=bad-continuation
self.assertAllClose(expected_pdf, self.evaluate(pdf))
pdf = uniform.prob(uniform.sample())
expected_pdf = [1.0, 0.1]
self.assertAllClose(expected_pdf, self.evaluate(pdf))
def testFullyReparameterized(self):
a = constant_op.constant(0.1)
b = constant_op.constant(0.8)
with backprop.GradientTape() as tape:
tape.watch(a)
tape.watch(b)
uniform = uniform_lib.Uniform(a, b)
samples = uniform.sample(100)
grad_a, grad_b = tape.gradient(samples, [a, b])
self.assertIsNotNone(grad_a)
self.assertIsNotNone(grad_b)
# Eager doesn't pass due to a type mismatch in one of the ops.
def testUniformFloat64(self):
uniform = uniform_lib.Uniform(
low=np.float64(0.), high=np.float64(1.))
self.assertAllClose(
[1., 1.],
self.evaluate(uniform.prob(np.array([0.5, 0.6], dtype=np.float64))))
self.assertAllClose(
[0.5, 0.6],
self.evaluate(uniform.cdf(np.array([0.5, 0.6], dtype=np.float64))))
self.assertAllClose(0.5, self.evaluate(uniform.mean()))
self.assertAllClose(1 / 12., self.evaluate(uniform.variance()))
self.assertAllClose(0., self.evaluate(uniform.entropy()))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
dturner-tw/pants
|
tests/python/pants_test/android/tasks/test_zipalign.py
|
16
|
2041
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.android.tasks.zipalign import Zipalign
from pants_test.android.test_android_base import TestAndroidBase, distribution
class TestZipalign(TestAndroidBase):
"""Test class for the Zipalign task."""
@classmethod
def task_type(cls):
return Zipalign
def test_zipalign_smoke(self):
task = self.create_task(self.context())
task.execute()
def test_zipalign_binary(self):
with distribution() as dist:
with self.android_binary() as android_binary:
self.set_options(sdk_path=dist)
task = self.create_task(self.context())
target = android_binary
self.assertEqual(task.zipalign_binary(target),
os.path.join(dist, 'build-tools', target.build_tools_version, 'zipalign'))
def test_zipalign_out(self):
with distribution() as dist:
with self.android_binary() as android_binary:
self.set_options(sdk_path=dist)
task = self.create_task(self.context())
target = android_binary
self.assertEqual(task.zipalign_out(target), os.path.join(task._distdir, target.name))
def test_render_args(self):
with distribution() as dist:
with self.android_binary() as android_binary:
self.set_options(sdk_path=dist)
task = self.create_task(self.context())
target = android_binary
expected_args = [os.path.join(dist, 'build-tools', target.build_tools_version, 'zipalign'),
'-f', '4', 'package/path',
os.path.join(task._distdir, target.name,
'{0}.signed.apk'.format(target.manifest.package_name))]
self.assertEqual(task._render_args('package/path', target), expected_args)
|
apache-2.0
|
ghyde/letsencrypt
|
letsencrypt/plugins/disco_test.py
|
1
|
9977
|
"""Tests for letsencrypt.plugins.disco."""
import pkg_resources
import unittest
import mock
import zope.interface
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt.plugins import standalone
EP_SA = pkg_resources.EntryPoint(
"sa", "letsencrypt.plugins.standalone",
attrs=("Authenticator",),
dist=mock.MagicMock(key="letsencrypt"))
class PluginEntryPointTest(unittest.TestCase):
"""Tests for letsencrypt.plugins.disco.PluginEntryPoint."""
def setUp(self):
self.ep1 = pkg_resources.EntryPoint(
"ep1", "p1.ep1", dist=mock.MagicMock(key="p1"))
self.ep1prim = pkg_resources.EntryPoint(
"ep1", "p2.ep2", dist=mock.MagicMock(key="p2"))
# nested
self.ep2 = pkg_resources.EntryPoint(
"ep2", "p2.foo.ep2", dist=mock.MagicMock(key="p2"))
# project name != top-level package name
self.ep3 = pkg_resources.EntryPoint(
"ep3", "a.ep3", dist=mock.MagicMock(key="p3"))
from letsencrypt.plugins.disco import PluginEntryPoint
self.plugin_ep = PluginEntryPoint(EP_SA)
def test_entry_point_to_plugin_name(self):
from letsencrypt.plugins.disco import PluginEntryPoint
names = {
self.ep1: "p1:ep1",
self.ep1prim: "p2:ep1",
self.ep2: "p2:ep2",
self.ep3: "p3:ep3",
EP_SA: "sa",
}
for entry_point, name in names.iteritems():
self.assertEqual(
name, PluginEntryPoint.entry_point_to_plugin_name(entry_point))
def test_description(self):
self.assertEqual("Standalone Authenticator", self.plugin_ep.description)
def test_description_with_name(self):
self.plugin_ep.plugin_cls = mock.MagicMock(description="Desc")
self.assertEqual(
"Desc (sa)", self.plugin_ep.description_with_name)
def test_ifaces(self):
self.assertTrue(self.plugin_ep.ifaces((interfaces.IAuthenticator,)))
self.assertFalse(self.plugin_ep.ifaces((interfaces.IInstaller,)))
self.assertFalse(self.plugin_ep.ifaces((
interfaces.IInstaller, interfaces.IAuthenticator)))
def test__init__(self):
self.assertFalse(self.plugin_ep.initialized)
self.assertFalse(self.plugin_ep.prepared)
self.assertFalse(self.plugin_ep.misconfigured)
self.assertFalse(self.plugin_ep.available)
self.assertTrue(self.plugin_ep.problem is None)
self.assertTrue(self.plugin_ep.entry_point is EP_SA)
self.assertEqual("sa", self.plugin_ep.name)
self.assertTrue(self.plugin_ep.plugin_cls is standalone.Authenticator)
def test_init(self):
config = mock.MagicMock()
plugin = self.plugin_ep.init(config=config)
self.assertTrue(self.plugin_ep.initialized)
self.assertTrue(plugin.config is config)
# memoize!
self.assertTrue(self.plugin_ep.init() is plugin)
self.assertTrue(plugin.config is config)
# try to give different config
self.assertTrue(self.plugin_ep.init(123) is plugin)
self.assertTrue(plugin.config is config)
self.assertFalse(self.plugin_ep.prepared)
self.assertFalse(self.plugin_ep.misconfigured)
self.assertFalse(self.plugin_ep.available)
def test_verify(self):
iface1 = mock.MagicMock(__name__="iface1")
iface2 = mock.MagicMock(__name__="iface2")
iface3 = mock.MagicMock(__name__="iface3")
# pylint: disable=protected-access
self.plugin_ep._initialized = plugin = mock.MagicMock()
exceptions = zope.interface.exceptions
with mock.patch("letsencrypt.plugins."
"disco.zope.interface") as mock_zope:
mock_zope.exceptions = exceptions
def verify_object(iface, obj): # pylint: disable=missing-docstring
assert obj is plugin
assert iface is iface1 or iface is iface2 or iface is iface3
if iface is iface3:
raise mock_zope.exceptions.BrokenImplementation(None, None)
mock_zope.verify.verifyObject.side_effect = verify_object
self.assertTrue(self.plugin_ep.verify((iface1,)))
self.assertTrue(self.plugin_ep.verify((iface1, iface2)))
self.assertFalse(self.plugin_ep.verify((iface3,)))
self.assertFalse(self.plugin_ep.verify((iface1, iface3)))
def test_prepare(self):
config = mock.MagicMock()
self.plugin_ep.init(config=config)
self.plugin_ep.prepare()
self.assertTrue(self.plugin_ep.prepared)
self.assertFalse(self.plugin_ep.misconfigured)
# output doesn't matter that much, just test if it runs
str(self.plugin_ep)
def test_prepare_misconfigured(self):
plugin = mock.MagicMock()
plugin.prepare.side_effect = errors.MisconfigurationError
# pylint: disable=protected-access
self.plugin_ep._initialized = plugin
self.assertTrue(isinstance(self.plugin_ep.prepare(),
errors.MisconfigurationError))
self.assertTrue(self.plugin_ep.prepared)
self.assertTrue(self.plugin_ep.misconfigured)
self.assertTrue(isinstance(self.plugin_ep.problem,
errors.MisconfigurationError))
self.assertTrue(self.plugin_ep.available)
def test_prepare_no_installation(self):
plugin = mock.MagicMock()
plugin.prepare.side_effect = errors.NoInstallationError
# pylint: disable=protected-access
self.plugin_ep._initialized = plugin
self.assertTrue(isinstance(self.plugin_ep.prepare(),
errors.NoInstallationError))
self.assertTrue(self.plugin_ep.prepared)
self.assertFalse(self.plugin_ep.misconfigured)
self.assertFalse(self.plugin_ep.available)
def test_prepare_generic_plugin_error(self):
plugin = mock.MagicMock()
plugin.prepare.side_effect = errors.PluginError
# pylint: disable=protected-access
self.plugin_ep._initialized = plugin
self.assertTrue(isinstance(self.plugin_ep.prepare(), errors.PluginError))
self.assertTrue(self.plugin_ep.prepared)
self.assertFalse(self.plugin_ep.misconfigured)
self.assertFalse(self.plugin_ep.available)
def test_repr(self):
self.assertEqual("PluginEntryPoint#sa", repr(self.plugin_ep))
class PluginsRegistryTest(unittest.TestCase):
"""Tests for letsencrypt.plugins.disco.PluginsRegistry."""
def setUp(self):
from letsencrypt.plugins.disco import PluginsRegistry
self.plugin_ep = mock.MagicMock(name="mock")
self.plugin_ep.__hash__.side_effect = TypeError
self.plugins = {"mock": self.plugin_ep}
self.reg = PluginsRegistry(self.plugins)
def test_find_all(self):
from letsencrypt.plugins.disco import PluginsRegistry
with mock.patch("letsencrypt.plugins.disco.pkg_resources") as mock_pkg:
mock_pkg.iter_entry_points.return_value = iter([EP_SA])
plugins = PluginsRegistry.find_all()
self.assertTrue(plugins["sa"].plugin_cls is standalone.Authenticator)
self.assertTrue(plugins["sa"].entry_point is EP_SA)
def test_getitem(self):
self.assertEqual(self.plugin_ep, self.reg["mock"])
def test_iter(self):
self.assertEqual(["mock"], list(self.reg))
def test_len(self):
self.assertEqual(1, len(self.reg))
self.plugins.clear()
self.assertEqual(0, len(self.reg))
def test_init(self):
self.plugin_ep.init.return_value = "baz"
self.assertEqual(["baz"], self.reg.init("bar"))
self.plugin_ep.init.assert_called_once_with("bar")
def test_filter(self):
self.plugins.update({
"foo": "bar",
"bar": "foo",
"baz": "boo",
})
self.assertEqual(
{"foo": "bar", "baz": "boo"},
self.reg.filter(lambda p_ep: str(p_ep).startswith("b")))
def test_ifaces(self):
self.plugin_ep.ifaces.return_value = True
# pylint: disable=protected-access
self.assertEqual(self.plugins, self.reg.ifaces()._plugins)
self.plugin_ep.ifaces.return_value = False
self.assertEqual({}, self.reg.ifaces()._plugins)
def test_verify(self):
self.plugin_ep.verify.return_value = True
# pylint: disable=protected-access
self.assertEqual(
self.plugins, self.reg.verify(mock.MagicMock())._plugins)
self.plugin_ep.verify.return_value = False
self.assertEqual({}, self.reg.verify(mock.MagicMock())._plugins)
def test_prepare(self):
self.plugin_ep.prepare.return_value = "baz"
self.assertEqual(["baz"], self.reg.prepare())
self.plugin_ep.prepare.assert_called_once_with()
def test_available(self):
self.plugin_ep.available = True
# pylint: disable=protected-access
self.assertEqual(self.plugins, self.reg.available()._plugins)
self.plugin_ep.available = False
self.assertEqual({}, self.reg.available()._plugins)
def test_find_init(self):
self.assertTrue(self.reg.find_init(mock.Mock()) is None)
self.plugin_ep.initalized = True
self.assertTrue(
self.reg.find_init(self.plugin_ep.init()) is self.plugin_ep)
def test_repr(self):
self.plugin_ep.__repr__ = lambda _: "PluginEntryPoint#mock"
self.assertEqual("PluginsRegistry(PluginEntryPoint#mock)",
repr(self.reg))
def test_str(self):
self.plugin_ep.__str__ = lambda _: "Mock"
self.plugins["foo"] = "Mock"
self.assertEqual("Mock\n\nMock", str(self.reg))
self.plugins.clear()
self.assertEqual("No plugins", str(self.reg))
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
apache-2.0
|
neno1978/pelisalacarta
|
python/main-classic/lib/requests/packages/chardet/compat.py
|
2943
|
1157
|
######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
|
gpl-3.0
|
yujikato/DIRAC
|
src/DIRAC/Interfaces/scripts/dirac_admin_get_pilot_output.py
|
2
|
1395
|
#!/usr/bin/env python
########################################################################
# File : dirac-admin-get-pilot-output
# Author : Stuart Paterson
########################################################################
"""
Retrieve output of a Grid pilot
Usage:
dirac-admin-get-pilot-output [options] ... PilotID ...
Arguments:
PilotID: Grid ID of the pilot
Example:
$ dirac-admin-get-pilot-output https://marlb.in2p3.fr:9000/26KCLKBFtxXKHF4_ZrQjkw
$ ls -la
drwxr-xr-x 2 hamar marseill 2048 Feb 21 14:13 pilot_26KCLKBFtxXKHF4_ZrQjkw
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
@DIRACScript()
def main():
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
if len(args) < 1:
Script.showHelp()
from DIRAC import exit as DIRACExit
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
exitCode = 0
errorList = []
for gridID in args:
result = diracAdmin.getPilotOutput(gridID)
if not result['OK']:
errorList.append((gridID, result['Message']))
exitCode = 2
for error in errorList:
print("ERROR %s: %s" % error)
DIRACExit(exitCode)
if __name__ == "__main__":
main()
|
gpl-3.0
|
balloob/home-assistant
|
homeassistant/components/deconz/sensor.py
|
5
|
8316
|
"""Support for deCONZ sensors."""
from pydeconz.sensor import (
Battery,
Consumption,
Daylight,
Humidity,
LightLevel,
Power,
Pressure,
Switch,
Temperature,
Thermostat,
)
from homeassistant.components.sensor import DOMAIN
from homeassistant.const import (
ATTR_TEMPERATURE,
ATTR_VOLTAGE,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_POWER,
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_TEMPERATURE,
ENERGY_KILO_WATT_HOUR,
LIGHT_LUX,
PERCENTAGE,
POWER_WATT,
PRESSURE_HPA,
TEMP_CELSIUS,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from .const import ATTR_DARK, ATTR_ON, NEW_SENSOR
from .deconz_device import DeconzDevice
from .gateway import get_gateway_from_config_entry
ATTR_CURRENT = "current"
ATTR_POWER = "power"
ATTR_DAYLIGHT = "daylight"
ATTR_EVENT_ID = "event_id"
DEVICE_CLASS = {
Humidity: DEVICE_CLASS_HUMIDITY,
LightLevel: DEVICE_CLASS_ILLUMINANCE,
Power: DEVICE_CLASS_POWER,
Pressure: DEVICE_CLASS_PRESSURE,
Temperature: DEVICE_CLASS_TEMPERATURE,
}
ICON = {
Daylight: "mdi:white-balance-sunny",
Pressure: "mdi:gauge",
Temperature: "mdi:thermometer",
}
UNIT_OF_MEASUREMENT = {
Consumption: ENERGY_KILO_WATT_HOUR,
Humidity: PERCENTAGE,
LightLevel: LIGHT_LUX,
Power: POWER_WATT,
Pressure: PRESSURE_HPA,
Temperature: TEMP_CELSIUS,
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the deCONZ sensors."""
gateway = get_gateway_from_config_entry(hass, config_entry)
gateway.entities[DOMAIN] = set()
battery_handler = DeconzBatteryHandler(gateway)
@callback
def async_add_sensor(sensors):
"""Add sensors from deCONZ.
Create DeconzBattery if sensor has a battery attribute.
Create DeconzSensor if not a battery, switch or thermostat and not a binary sensor.
"""
entities = []
for sensor in sensors:
if not gateway.option_allow_clip_sensor and sensor.type.startswith("CLIP"):
continue
if sensor.battery is not None:
battery_handler.remove_tracker(sensor)
known_batteries = set(gateway.entities[DOMAIN])
new_battery = DeconzBattery(sensor, gateway)
if new_battery.unique_id not in known_batteries:
entities.append(new_battery)
else:
battery_handler.create_tracker(sensor)
if (
not sensor.BINARY
and sensor.type
not in Battery.ZHATYPE + Switch.ZHATYPE + Thermostat.ZHATYPE
and sensor.uniqueid not in gateway.entities[DOMAIN]
):
entities.append(DeconzSensor(sensor, gateway))
if entities:
async_add_entities(entities)
gateway.listeners.append(
async_dispatcher_connect(
hass, gateway.async_signal_new_device(NEW_SENSOR), async_add_sensor
)
)
async_add_sensor(
[gateway.api.sensors[key] for key in sorted(gateway.api.sensors, key=int)]
)
class DeconzSensor(DeconzDevice):
"""Representation of a deCONZ sensor."""
TYPE = DOMAIN
@callback
def async_update_callback(self, force_update=False):
"""Update the sensor's state."""
keys = {"on", "reachable", "state"}
if force_update or self._device.changed_keys.intersection(keys):
super().async_update_callback(force_update=force_update)
@property
def state(self):
"""Return the state of the sensor."""
return self._device.state
@property
def device_class(self):
"""Return the class of the sensor."""
return DEVICE_CLASS.get(type(self._device))
@property
def icon(self):
"""Return the icon to use in the frontend."""
return ICON.get(type(self._device))
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this sensor."""
return UNIT_OF_MEASUREMENT.get(type(self._device))
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
attr = {}
if self._device.on is not None:
attr[ATTR_ON] = self._device.on
if self._device.secondary_temperature is not None:
attr[ATTR_TEMPERATURE] = self._device.secondary_temperature
if self._device.type in Consumption.ZHATYPE:
attr[ATTR_POWER] = self._device.power
elif self._device.type in Daylight.ZHATYPE:
attr[ATTR_DAYLIGHT] = self._device.daylight
elif self._device.type in LightLevel.ZHATYPE:
if self._device.dark is not None:
attr[ATTR_DARK] = self._device.dark
if self._device.daylight is not None:
attr[ATTR_DAYLIGHT] = self._device.daylight
elif self._device.type in Power.ZHATYPE:
attr[ATTR_CURRENT] = self._device.current
attr[ATTR_VOLTAGE] = self._device.voltage
return attr
class DeconzBattery(DeconzDevice):
"""Battery class for when a device is only represented as an event."""
TYPE = DOMAIN
@callback
def async_update_callback(self, force_update=False):
"""Update the battery's state, if needed."""
keys = {"battery", "reachable"}
if force_update or self._device.changed_keys.intersection(keys):
super().async_update_callback(force_update=force_update)
@property
def unique_id(self):
"""Return a unique identifier for this device."""
return f"{self.serial}-battery"
@property
def state(self):
"""Return the state of the battery."""
return self._device.battery
@property
def name(self):
"""Return the name of the battery."""
return f"{self._device.name} Battery Level"
@property
def device_class(self):
"""Return the class of the sensor."""
return DEVICE_CLASS_BATTERY
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return PERCENTAGE
@property
def device_state_attributes(self):
"""Return the state attributes of the battery."""
attr = {}
if self._device.type in Switch.ZHATYPE:
for event in self.gateway.events:
if self._device == event.device:
attr[ATTR_EVENT_ID] = event.event_id
return attr
class DeconzSensorStateTracker:
"""Track sensors without a battery state and signal when battery state exist."""
def __init__(self, sensor, gateway):
"""Set up tracker."""
self.sensor = sensor
self.gateway = gateway
sensor.register_callback(self.async_update_callback)
@callback
def close(self):
"""Clean up tracker."""
self.sensor.remove_callback(self.async_update_callback)
self.gateway = None
self.sensor = None
@callback
def async_update_callback(self, ignore_update=False):
"""Sensor state updated."""
if "battery" in self.sensor.changed_keys:
async_dispatcher_send(
self.gateway.hass,
self.gateway.async_signal_new_device(NEW_SENSOR),
[self.sensor],
)
class DeconzBatteryHandler:
"""Creates and stores trackers for sensors without a battery state."""
def __init__(self, gateway):
"""Set up battery handler."""
self.gateway = gateway
self._trackers = set()
@callback
def create_tracker(self, sensor):
"""Create new tracker for battery state."""
for tracker in self._trackers:
if sensor == tracker.sensor:
return
self._trackers.add(DeconzSensorStateTracker(sensor, self.gateway))
@callback
def remove_tracker(self, sensor):
"""Remove tracker of battery state."""
for tracker in self._trackers:
if sensor == tracker.sensor:
tracker.close()
self._trackers.remove(tracker)
break
|
apache-2.0
|
jgao54/airflow
|
airflow/migrations/versions/64de9cddf6c9_add_task_fails_journal_table.py
|
12
|
1697
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add task fails journal table
Revision ID: 64de9cddf6c9
Revises: 211e584da130
Create Date: 2016-08-03 14:02:59.203021
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '64de9cddf6c9'
down_revision = '211e584da130'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'task_fail',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('task_id', sa.String(length=250), nullable=False),
sa.Column('dag_id', sa.String(length=250), nullable=False),
sa.Column('execution_date', sa.DateTime(), nullable=False),
sa.Column('start_date', sa.DateTime(), nullable=True),
sa.Column('end_date', sa.DateTime(), nullable=True),
sa.Column('duration', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
)
def downgrade():
op.drop_table('task_fail')
|
apache-2.0
|
dodocat/git-repo
|
progress.py
|
143
|
2036
|
#
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from time import time
from trace import IsTrace
_NOT_TTY = not os.isatty(2)
class Progress(object):
def __init__(self, title, total=0, units=''):
self._title = title
self._total = total
self._done = 0
self._lastp = -1
self._start = time()
self._show = False
self._units = units
def update(self, inc=1):
self._done += inc
if _NOT_TTY or IsTrace():
return
if not self._show:
if 0.5 <= time() - self._start:
self._show = True
else:
return
if self._total <= 0:
sys.stderr.write('\r%s: %d, ' % (
self._title,
self._done))
sys.stderr.flush()
else:
p = (100 * self._done) / self._total
if self._lastp != p:
self._lastp = p
sys.stderr.write('\r%s: %3d%% (%d%s/%d%s) ' % (
self._title,
p,
self._done, self._units,
self._total, self._units))
sys.stderr.flush()
def end(self):
if _NOT_TTY or IsTrace() or not self._show:
return
if self._total <= 0:
sys.stderr.write('\r%s: %d, done. \n' % (
self._title,
self._done))
sys.stderr.flush()
else:
p = (100 * self._done) / self._total
sys.stderr.write('\r%s: %3d%% (%d%s/%d%s), done. \n' % (
self._title,
p,
self._done, self._units,
self._total, self._units))
sys.stderr.flush()
|
apache-2.0
|
codervince/flashingredlight
|
env/lib/python2.7/site-packages/flask/views.py
|
782
|
5642
|
# -*- coding: utf-8 -*-
"""
flask.views
~~~~~~~~~~~
This module provides class-based views inspired by the ones in Django.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from .globals import request
from ._compat import with_metaclass
http_method_funcs = frozenset(['get', 'post', 'head', 'options',
'delete', 'put', 'trace', 'patch'])
class View(object):
"""Alternative way to use view functions. A subclass has to implement
:meth:`dispatch_request` which is called with the view arguments from
the URL routing system. If :attr:`methods` is provided the methods
do not have to be passed to the :meth:`~flask.Flask.add_url_rule`
method explicitly::
class MyView(View):
methods = ['GET']
def dispatch_request(self, name):
return 'Hello %s!' % name
app.add_url_rule('/hello/<name>', view_func=MyView.as_view('myview'))
When you want to decorate a pluggable view you will have to either do that
when the view function is created (by wrapping the return value of
:meth:`as_view`) or you can use the :attr:`decorators` attribute::
class SecretView(View):
methods = ['GET']
decorators = [superuser_required]
def dispatch_request(self):
...
The decorators stored in the decorators list are applied one after another
when the view function is created. Note that you can *not* use the class
based decorators since those would decorate the view class and not the
generated view function!
"""
#: A for which methods this pluggable view can handle.
methods = None
#: The canonical way to decorate class-based views is to decorate the
#: return value of as_view(). However since this moves parts of the
#: logic from the class declaration to the place where it's hooked
#: into the routing system.
#:
#: You can place one or more decorators in this list and whenever the
#: view function is created the result is automatically decorated.
#:
#: .. versionadded:: 0.8
decorators = []
def dispatch_request(self):
"""Subclasses have to override this method to implement the
actual view function code. This method is called with all
the arguments from the URL rule.
"""
raise NotImplementedError()
@classmethod
def as_view(cls, name, *class_args, **class_kwargs):
"""Converts the class into an actual view function that can be used
with the routing system. Internally this generates a function on the
fly which will instantiate the :class:`View` on each request and call
the :meth:`dispatch_request` method on it.
The arguments passed to :meth:`as_view` are forwarded to the
constructor of the class.
"""
def view(*args, **kwargs):
self = view.view_class(*class_args, **class_kwargs)
return self.dispatch_request(*args, **kwargs)
if cls.decorators:
view.__name__ = name
view.__module__ = cls.__module__
for decorator in cls.decorators:
view = decorator(view)
# we attach the view class to the view function for two reasons:
# first of all it allows us to easily figure out what class-based
# view this thing came from, secondly it's also used for instantiating
# the view class so you can actually replace it with something else
# for testing purposes and debugging.
view.view_class = cls
view.__name__ = name
view.__doc__ = cls.__doc__
view.__module__ = cls.__module__
view.methods = cls.methods
return view
class MethodViewType(type):
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
if 'methods' not in d:
methods = set(rv.methods or [])
for key in d:
if key in http_method_funcs:
methods.add(key.upper())
# if we have no method at all in there we don't want to
# add a method list. (This is for instance the case for
# the baseclass or another subclass of a base method view
# that does not introduce new methods).
if methods:
rv.methods = sorted(methods)
return rv
class MethodView(with_metaclass(MethodViewType, View)):
"""Like a regular class-based view but that dispatches requests to
particular methods. For instance if you implement a method called
:meth:`get` it means you will response to ``'GET'`` requests and
the :meth:`dispatch_request` implementation will automatically
forward your request to that. Also :attr:`options` is set for you
automatically::
class CounterAPI(MethodView):
def get(self):
return session.get('counter', 0)
def post(self):
session['counter'] = session.get('counter', 0) + 1
return 'OK'
app.add_url_rule('/counter', view_func=CounterAPI.as_view('counter'))
"""
def dispatch_request(self, *args, **kwargs):
meth = getattr(self, request.method.lower(), None)
# if the request method is HEAD and we don't have a handler for it
# retry with GET
if meth is None and request.method == 'HEAD':
meth = getattr(self, 'get', None)
assert meth is not None, 'Unimplemented method %r' % request.method
return meth(*args, **kwargs)
|
mit
|
nevir/plexability
|
extern/gyp/pylib/gyp/generator/make.py
|
17
|
88238
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This is all roughly based on the Makefile system used by the Linux
# kernel, but is a non-recursive make -- we put the entire dependency
# graph in front of make and let it figure it out.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level Makefile. This means that all
# variables in .mk-files clobber one another. Be careful to use :=
# where appropriate for immediate evaluation, and similarly to watch
# that you're not relying on a variable value to last beween different
# .mk files.
#
# TODOs:
#
# Global settings and utility functions are currently stuffed in the
# toplevel Makefile. It may make sense to generate some .mk files on
# the side to keep the the files readable.
import os
import re
import sys
import subprocess
import gyp
import gyp.common
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/$(TARGET)/geni',
'SHARED_INTERMEDIATE_DIR': '$(obj)/gen',
'PRODUCT_DIR': '$(builddir)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(abspath $<)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(BUILDTYPE)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Request sorted dependencies in the order from dependents to dependencies.
generator_wants_sorted_dependencies = False
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Make generator.
import gyp.generator.xcode as xcode_generator
global generator_additional_non_configuration_keys
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
global generator_additional_path_sections
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'})
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)')
default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)')
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
android_ndk_version = generator_flags.get('android_ndk_version', None)
# Android NDK requires a strict link order.
if android_ndk_version:
global generator_wants_sorted_dependencies
generator_wants_sorted_dependencies = True
def ensure_directory_exists(path):
dir = os.path.dirname(path)
if dir and not os.path.exists(dir):
os.makedirs(dir)
# The .d checking code below uses these functions:
# wildcard, sort, foreach, shell, wordlist
# wildcard can handle spaces, the rest can't.
# Since I could find no way to make foreach work with spaces in filenames
# correctly, the .d files have spaces replaced with another character. The .d
# file for
# Chromium\ Framework.framework/foo
# is for example
# out/Release/.deps/out/Release/Chromium?Framework.framework/foo
# This is the replacement character.
SPACE_REPLACEMENT = '?'
LINK_COMMANDS_LINUX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
# We support two kinds of shared objects (.so):
# 1) shared_library, which is just bundling together many dependent libraries
# into a link line.
# 2) loadable_module, which is generating a module intended for dlopen().
#
# They differ only slightly:
# In the former case, we want to package all dependent code into the .so.
# In the latter case, we want to package just the API exposed by the
# outermost module.
# This means shared_library uses --whole-archive, while loadable_module doesn't.
# (Note that --whole-archive is incompatible with the --start-group used in
# normal linking.)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
"""
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
# TODO(thakis): Find out and document the difference between shared_library and
# loadable_module on mac.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
# TODO(thakis): The solink_module rule is likely wrong. Xcode seems to pass
# -bundle -single_module here (for osmesa.so).
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_ANDROID = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
quiet_cmd_link_host = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
cmd_link_host = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
quiet_cmd_solink_module_host = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module_host = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
# Header of toplevel Makefile.
# This should go into the build tree, but it's easier to keep it here for now.
SHARED_HEADER = ("""\
# We borrow heavily from the kernel build setup, though we are simpler since
# we don't have Kconfig tweaking settings on us.
# The implicit make rules have it looking for RCS files, among other things.
# We instead explicitly write all the rules we care about.
# It's even quicker (saves ~200ms) to pass -r on the command line.
MAKEFLAGS=-r
# The source directory tree.
srcdir := %(srcdir)s
abs_srcdir := $(abspath $(srcdir))
# The name of the builddir.
builddir_name ?= %(builddir)s
# The V=1 flag on command line makes us verbosely print command lines.
ifdef V
quiet=
else
quiet=quiet_
endif
# Specify BUILDTYPE=Release on the command line for a release build.
BUILDTYPE ?= %(default_configuration)s
# Directory all our build output goes into.
# Note that this must be two directories beneath src/ for unit tests to pass,
# as they reach into the src/ directory for data with relative paths.
builddir ?= $(builddir_name)/$(BUILDTYPE)
abs_builddir := $(abspath $(builddir))
depsdir := $(builddir)/.deps
# Object output directory.
obj := $(builddir)/obj
abs_obj := $(abspath $(obj))
# We build up a list of every single one of the targets so we can slurp in the
# generated dependency rule Makefiles in one pass.
all_deps :=
%(make_global_settings)s
# C++ apps need to be linked with g++.
#
# Note: flock is used to seralize linking. Linking is a memory-intensive
# process so running parallel links can often lead to thrashing. To disable
# the serialization, override LINK via an envrionment variable as follows:
#
# export LINK=g++
#
# This will allow make to invoke N linker processes as specified in -jN.
LINK ?= %(flock)s $(builddir)/linker.lock $(CXX.target)
CC.target ?= %(CC.target)s
CFLAGS.target ?= $(CFLAGS)
CXX.target ?= %(CXX.target)s
CXXFLAGS.target ?= $(CXXFLAGS)
LINK.target ?= %(LINK.target)s
LDFLAGS.target ?= $(LDFLAGS)
AR.target ?= $(AR)
# TODO(evan): move all cross-compilation logic to gyp-time so we don't need
# to replicate this environment fallback in make as well.
CC.host ?= %(CC.host)s
CFLAGS.host ?=
CXX.host ?= %(CXX.host)s
CXXFLAGS.host ?=
LINK.host ?= %(LINK.host)s
LDFLAGS.host ?=
AR.host ?= %(AR.host)s
# Define a dir function that can handle spaces.
# http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions
# "leading spaces cannot appear in the text of the first argument as written.
# These characters can be put into the argument value by variable substitution."
empty :=
space := $(empty) $(empty)
# http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces
replace_spaces = $(subst $(space),""" + SPACE_REPLACEMENT + """,$1)
unreplace_spaces = $(subst """ + SPACE_REPLACEMENT + """,$(space),$1)
dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1)))
# Flags to make gcc output dependency info. Note that you need to be
# careful here to use the flags that ccache and distcc can understand.
# We write to a dep file on the side first and then rename at the end
# so we can't end up with a broken dep file.
depfile = $(depsdir)/$(call replace_spaces,$@).d
DEPFLAGS = -MMD -MF $(depfile).raw
# We have to fixup the deps output in a few ways.
# (1) the file output should mention the proper .o file.
# ccache or distcc lose the path to the target, so we convert a rule of
# the form:
# foobar.o: DEP1 DEP2
# into
# path/to/foobar.o: DEP1 DEP2
# (2) we want missing files not to cause us to fail to build.
# We want to rewrite
# foobar.o: DEP1 DEP2 \\
# DEP3
# to
# DEP1:
# DEP2:
# DEP3:
# so if the files are missing, they're just considered phony rules.
# We have to do some pretty insane escaping to get those backslashes
# and dollar signs past make, the shell, and sed at the same time.
# Doesn't work with spaces, but that's fine: .d files have spaces in
# their names replaced with other characters."""
r"""
define fixup_dep
# The depfile may not exist if the input file didn't have any #includes.
touch $(depfile).raw
# Fixup path as in (1).
sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile)
# Add extra rules as in (2).
# We remove slashes and replace spaces with new lines;
# remove blank lines;
# delete the first line and append a colon to the remaining lines.
sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\
grep -v '^$$' |\
sed -e 1d -e 's|$$|:|' \
>> $(depfile)
rm $(depfile).raw
endef
"""
"""
# Command definitions:
# - cmd_foo is the actual command to run;
# - quiet_cmd_foo is the brief-output summary of the command.
quiet_cmd_cc = CC($(TOOLSET)) $@
cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_cxx = CXX($(TOOLSET)) $@
cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
%(extra_commands)s
quiet_cmd_touch = TOUCH $@
cmd_touch = touch $@
quiet_cmd_copy = COPY $@
# send stderr to /dev/null to ignore messages when linking directories.
cmd_copy = ln -f "$<" "$@" 2>/dev/null || (rm -rf "$@" && cp -af "$<" "$@")
%(link_commands)s
"""
r"""
# Define an escape_quotes function to escape single quotes.
# This allows us to handle quotes properly as long as we always use
# use single quotes and escape_quotes.
escape_quotes = $(subst ','\'',$(1))
# This comment is here just to include a ' to unconfuse syntax highlighting.
# Define an escape_vars function to escape '$' variable syntax.
# This allows us to read/write command lines with shell variables (e.g.
# $LD_LIBRARY_PATH), without triggering make substitution.
escape_vars = $(subst $$,$$$$,$(1))
# Helper that expands to a shell command to echo a string exactly as it is in
# make. This uses printf instead of echo because printf's behaviour with respect
# to escape sequences is more portable than echo's across different shells
# (e.g., dash, bash).
exact_echo = printf '%%s\n' '$(call escape_quotes,$(1))'
"""
"""
# Helper to compare the command we're about to run against the command
# we logged the last time we ran the command. Produces an empty
# string (false) when the commands match.
# Tricky point: Make has no string-equality test function.
# The kernel uses the following, but it seems like it would have false
# positives, where one string reordered its arguments.
# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\
# $(filter-out $(cmd_$@), $(cmd_$(1))))
# We instead substitute each for the empty string into the other, and
# say they're equal if both substitutions produce the empty string.
# .d files contain """ + SPACE_REPLACEMENT + \
""" instead of spaces, take that into account.
command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\\
$(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1))))
# Helper that is non-empty when a prerequisite changes.
# Normally make does this implicitly, but we force rules to always run
# so we can check their command lines.
# $? -- new prerequisites
# $| -- order-only dependencies
prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?))
# Helper that executes all postbuilds until one fails.
define do_postbuilds
@E=0;\\
for p in $(POSTBUILDS); do\\
eval $$p;\\
E=$$?;\\
if [ $$E -ne 0 ]; then\\
break;\\
fi;\\
done;\\
if [ $$E -ne 0 ]; then\\
rm -rf "$@";\\
exit $$E;\\
fi
endef
# do_cmd: run a command via the above cmd_foo names, if necessary.
# Should always run for a given target to handle command-line changes.
# Second argument, if non-zero, makes it do asm/C/C++ dependency munging.
# Third argument, if non-zero, makes it do POSTBUILDS processing.
# Note: We intentionally do NOT call dirx for depfile, since it contains """ + \
SPACE_REPLACEMENT + """ for
# spaces already and dirx strips the """ + SPACE_REPLACEMENT + \
""" characters.
define do_cmd
$(if $(or $(command_changed),$(prereq_changed)),
@$(call exact_echo, $($(quiet)cmd_$(1)))
@mkdir -p "$(call dirx,$@)" "$(dir $(depfile))"
$(if $(findstring flock,$(word %(flock_index)d,$(cmd_$1))),
@$(cmd_$(1))
@echo " $(quiet_cmd_$(1)): Finished",
@$(cmd_$(1))
)
@$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile)
@$(if $(2),$(fixup_dep))
$(if $(and $(3), $(POSTBUILDS)),
$(call do_postbuilds)
)
)
endef
# Declare the "%(default_target)s" target first so it is the default,
# even though we don't have the deps yet.
.PHONY: %(default_target)s
%(default_target)s:
# make looks for ways to re-generate included makefiles, but in our case, we
# don't have a direct way. Explicitly telling make that it has nothing to do
# for them makes it go faster.
%%.d: ;
# Use FORCE_DO_CMD to force a target to run. Should be coupled with
# do_cmd.
.PHONY: FORCE_DO_CMD
FORCE_DO_CMD:
""")
SHARED_HEADER_MAC_COMMANDS = """
quiet_cmd_objc = CXX($(TOOLSET)) $@
cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_objcxx = CXX($(TOOLSET)) $@
cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# Commands for precompiled header files.
quiet_cmd_pch_c = CXX($(TOOLSET)) $@
cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_cc = CXX($(TOOLSET)) $@
cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_m = CXX($(TOOLSET)) $@
cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_pch_mm = CXX($(TOOLSET)) $@
cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# gyp-mac-tool is written next to the root Makefile by gyp.
# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
# already.
quiet_cmd_mac_tool = MACTOOL $(4) $<
cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@"
quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@
cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4)
quiet_cmd_infoplist = INFOPLIST $@
cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@"
"""
SHARED_HEADER_SUN_COMMANDS = """
# gyp-sun-tool is written next to the root Makefile by gyp.
# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
# already.
quiet_cmd_sun_tool = SUNTOOL $(4) $<
cmd_sun_tool = ./gyp-sun-tool $(4) $< "$@"
"""
def WriteRootHeaderSuffixRules(writer):
extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower)
writer.write('# Suffix rules, putting all outputs into $(obj).\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n# Try building from generated source, too.\n')
for ext in extensions:
writer.write(
'$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\
# Suffix rules, putting all outputs into $(obj).
""")
SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\
# Try building from generated source, too.
""")
SHARED_FOOTER = """\
# "all" is a concatenation of the "all" targets from all the included
# sub-makefiles. This is just here to clarify.
all:
# Add in dependency-tracking rules. $(all_deps) is the list of every single
# target in our tree. Only consider the ones with .d (dependency) info:
d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d))
ifneq ($(d_files),)
include $(d_files)
endif
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Maps every compilable file extension to the do_cmd that compiles it.
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 'cc',
'.S': 'cc',
}
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS):
if res:
return True
return False
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def Target(filename):
"""Translate a compilable filename to its .o target."""
return os.path.splitext(filename)[0] + '.o'
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
def EscapeMakeVariableExpansion(s):
"""Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally."""
return s.replace('$', '$$')
def EscapeCppDefine(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = EscapeShellArgument(s)
s = EscapeMakeVariableExpansion(s)
# '#' characters must be escaped even embedded in a string, else Make will
# treat it as the start of a comment.
return s.replace('#', r'\#')
def QuoteIfNecessary(string):
"""TODO: Should this ideally be replaced with one or more of the above
functions?"""
if '"' in string:
string = '"' + string.replace('"', '\\"') + '"'
return string
def StringToMakefileVariable(string):
"""Convert a string to a value that is acceptable as a make variable name."""
return re.sub('[^a-zA-Z0-9_]', '_', string)
srcdir_prefix = ''
def Sourceify(path):
"""Convert a path to its source directory form."""
if '$(' in path:
return path
if os.path.isabs(path):
return path
return srcdir_prefix + path
def QuoteSpaces(s, quote=r'\ '):
return s.replace(' ', quote)
# Map from qualified target to path to output.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class MakefileWriter:
"""MakefileWriter packages up the writing of one target-specific foobar.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, generator_flags, flavor):
self.generator_flags = generator_flags
self.flavor = flavor
self.suffix_rules_srcdir = {}
self.suffix_rules_objdir1 = {}
self.suffix_rules_objdir2 = {}
# Generate suffix rules for all compilable extensions.
for ext in COMPILABLE_EXTENSIONS.keys():
# Suffix rules for source folder.
self.suffix_rules_srcdir.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
# Suffix rules for generated source files.
self.suffix_rules_objdir1.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
self.suffix_rules_objdir2.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
ensure_directory_exists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
else:
self.xcode_settings = None
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
extra_link_deps = []
extra_mac_bundle_resources = []
mac_bundle_deps = []
if self.is_mac_bundle:
self.output = self.ComputeMacBundleOutput(spec)
self.output_binary = self.ComputeMacBundleBinaryOutput(spec)
else:
self.output = self.output_binary = self.ComputeOutput(spec)
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self._INSTALLABLE_TARGETS = ('executable', 'loadable_module',
'shared_library')
if (self.is_standalone_static_library or
self.type in self._INSTALLABLE_TARGETS):
self.alias = os.path.basename(self.output)
install_path = self._InstallableTargetInstallPath()
else:
self.alias = self.output
install_path = self.output
self.WriteLn("TOOLSET := " + self.toolset)
self.WriteLn("TARGET := " + self.target)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs, part_of_all)
# Bundle resources.
if self.is_mac_bundle:
all_mac_bundle_resources = (
spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources)
self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps)
self.WriteMacInfoPlist(mac_bundle_deps)
# Sources.
all_sources = spec.get('sources', []) + extra_sources
if all_sources:
self.WriteSources(
configs, deps, all_sources, extra_outputs,
extra_link_deps, part_of_all,
gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)),
self.Pchify))
sources = filter(Compilable, all_sources)
if sources:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1)
extensions = set([os.path.splitext(s)[1] for s in sources])
for ext in extensions:
if ext in self.suffix_rules_srcdir:
self.WriteLn(self.suffix_rules_srcdir[ext])
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2)
for ext in extensions:
if ext in self.suffix_rules_objdir1:
self.WriteLn(self.suffix_rules_objdir1[ext])
for ext in extensions:
if ext in self.suffix_rules_objdir2:
self.WriteLn(self.suffix_rules_objdir2[ext])
self.WriteLn('# End of this set of suffix rules')
# Add dependency from bundle to bundle binary.
if self.is_mac_bundle:
mac_bundle_deps.append(self.output_binary)
self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps,
mac_bundle_deps, extra_outputs, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = install_path
# Update global list of link dependencies.
if self.type in ('static_library', 'shared_library'):
target_link_deps[qualified_target] = self.output_binary
# Currently any versions have the same effect, but in future the behavior
# could be different.
if self.generator_flags.get('android_ndk_version', None):
self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps)
self.fp.close()
def WriteSubMake(self, output_filename, makefile_path, targets, build_dir):
"""Write a "sub-project" Makefile.
This is a small, wrapper Makefile that calls the top-level Makefile to build
the targets from a single gyp file (i.e. a sub-project).
Arguments:
output_filename: sub-project Makefile name to write
makefile_path: path to the top-level Makefile
targets: list of "all" targets for this sub-project
build_dir: build output directory, relative to the sub-project
"""
ensure_directory_exists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
# For consistency with other builders, put sub-project build output in the
# sub-project dir (see test/subdirectory/gyptest-subdir-all.py).
self.WriteLn('export builddir_name ?= %s' %
os.path.join(os.path.dirname(output_filename), build_dir))
self.WriteLn('.PHONY: all')
self.WriteLn('all:')
if makefile_path:
makefile_path = ' -C ' + makefile_path
self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets)))
self.fp.close()
def WriteActions(self, actions, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for action in actions:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Write the actual command.
action_commands = action['action']
if self.flavor == 'mac':
action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action_commands]
command = gyp.common.EncodePOSIXShellList(action_commands)
if 'message' in action:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message']))
else:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name))
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# command and cd_action get written to a toplevel variable called
# cmd_foo. Toplevel variables can't handle things that change per
# makefile like $(TARGET), so hardcode the target.
command = command.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the action runs an executable from this
# build which links to shared libs from this build.
# actions run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:'
'$(builddir)/lib.target:$$LD_LIBRARY_PATH; '
'export LD_LIBRARY_PATH; '
'%s%s'
% (name, cd_action, command))
self.WriteLn()
outputs = map(self.Absolutify, outputs)
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the obj
# variable for the action rule with an absolute version so that the output
# goes in the right place.
# Only write the 'obj' and 'builddir' rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
# Same for environment.
self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0]))
self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0]))
self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv())
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)),
part_of_all=part_of_all, command=name)
# Stuff the outputs in a variable so we can refer to them later.
outputs_variable = 'action_%s_outputs' % name
self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs)))
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for rule in rules:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
count = 0
self.WriteLn('### Generated for rule %s:' % name)
all_outputs = []
for rule_source in rule.get('rule_sources', []):
dirs = set()
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
for out in outputs:
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
inputs = map(Sourceify, map(self.Absolutify, [rule_source] +
rule.get('inputs', [])))
actions = ['$(call do_cmd,%s_%d)' % (name, count)]
if name == 'resources_grit':
# HACK: This is ugly. Grit intentionally doesn't touch the
# timestamp of its output file when the file doesn't change,
# which is fine in hash-based dependency systems like scons
# and forge, but not kosher in the make world. After some
# discussion, hacking around it here seems like the least
# amount of pain.
actions += ['@touch --no-create $@']
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
outputs = map(self.Absolutify, outputs)
all_outputs += outputs
# Only write the 'obj' and 'builddir' rules for the "primary" output
# (:1); it's superfluous for the "extra outputs", and this avoids
# accidentally writing duplicate dummy rules for those outputs.
self.WriteLn('%s: obj := $(abs_obj)' % outputs[0])
self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0])
self.WriteMakeRule(outputs, inputs + ['FORCE_DO_CMD'], actions)
for output in outputs:
assert ' ' not in output, (
"Spaces in rule filenames not yet supported (%s)" % output)
self.WriteLn('all_deps += %s' % ' '.join(outputs))
action = [self.ExpandInputRoot(ac, rule_source_root,
rule_source_dirname)
for ac in rule['action']]
mkdirs = ''
if len(dirs) > 0:
mkdirs = 'mkdir -p %s; ' % ' '.join(dirs)
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# action, cd_action, and mkdirs get written to a toplevel variable
# called cmd_foo. Toplevel variables can't handle things that change
# per makefile like $(TARGET), so hardcode the target.
if self.flavor == 'mac':
action = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action]
action = gyp.common.EncodePOSIXShellList(action)
action = action.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
mkdirs = mkdirs.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the rule runs an executable from this
# build which links to shared libs from this build.
# rules run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn(
"cmd_%(name)s_%(count)d = LD_LIBRARY_PATH="
"$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; "
"export LD_LIBRARY_PATH; "
"%(cd_action)s%(mkdirs)s%(action)s" % {
'action': action,
'cd_action': cd_action,
'count': count,
'mkdirs': mkdirs,
'name': name,
})
self.WriteLn(
'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % {
'count': count,
'name': name,
})
self.WriteLn()
count += 1
outputs_variable = 'rule_%s_outputs' % name
self.WriteList(all_outputs, outputs_variable)
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn('### Finished generating for rule: %s' % name)
self.WriteLn()
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs, part_of_all):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Generated for copy rule.')
variable = StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# Absolutify() may call normpath, and will strip trailing slashes.
path = Sourceify(self.Absolutify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.Absolutify(os.path.join(copy['destination'],
filename)))
# If the output path has variables in it, which happens in practice for
# 'copies', writing the environment as target-local doesn't work,
# because the variables are already needed for the target name.
# Copying the environment variables into global make variables doesn't
# work either, because then the .d files will potentially contain spaces
# after variable expansion, and .d file handling cannot handle spaces.
# As a workaround, manually expand variables at gyp time. Since 'copies'
# can't run scripts, there's no need to write the env then.
# WriteDoCmd() will escape spaces for .d files.
env = self.GetSortedXcodeEnv()
output = gyp.xcode_emulation.ExpandEnvVars(output, env)
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
self.WriteDoCmd([output], [path], 'copy', part_of_all)
outputs.append(output)
self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteMacBundleResources(self, resources, bundle_deps):
"""Writes Makefile code for 'mac_bundle_resources'."""
self.WriteLn('### Generated for mac_bundle_resources')
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
map(Sourceify, map(self.Absolutify, resources))):
self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource',
part_of_all=True)
bundle_deps.append(output)
def WriteMacInfoPlist(self, bundle_deps):
"""Write Makefile code for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
lambda p: Sourceify(self.Absolutify(p)))
if not info_plist:
return
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' +
os.path.basename(info_plist))
self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D',
quoter=EscapeCppDefine)
self.WriteMakeRule([intermediate_plist], [info_plist],
['$(call do_cmd,infoplist)',
# "Convert" the plist so that any weird whitespace changes from the
# preprocessor do not affect the XML parser in mac_tool.
'@plutil -convert xml1 $@ $@'])
info_plist = intermediate_plist
# plists can contain envvars and substitute them into the file.
self.WriteSortedXcodeEnv(
out, self.GetSortedXcodeEnv(additional_settings=extra_env))
self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist',
part_of_all=True)
bundle_deps.append(out)
def WriteSources(self, configs, deps, sources,
extra_outputs, extra_link_deps,
part_of_all, precompiled_header):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
configs, deps, sources: input from gyp.
extra_outputs: a list of extra outputs this action should be dependent on;
used to serialize action/rules before compilation
extra_link_deps: a list that will be filled in with any outputs of
compilation (to be used in link lines)
part_of_all: flag indicating this target is part of 'all'
"""
# Write configuration-specific variables for CFLAGS, etc.
for configname in sorted(configs.keys()):
config = configs[configname]
self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D',
quoter=EscapeCppDefine)
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(configname)
cflags_c = self.xcode_settings.GetCflagsC(configname)
cflags_cc = self.xcode_settings.GetCflagsCC(configname)
cflags_objc = self.xcode_settings.GetCflagsObjC(configname)
cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname)
else:
cflags = config.get('cflags')
cflags_c = config.get('cflags_c')
cflags_cc = config.get('cflags_cc')
self.WriteLn("# Flags passed to all source files.");
self.WriteList(cflags, 'CFLAGS_%s' % configname)
self.WriteLn("# Flags passed to only C files.");
self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname)
self.WriteLn("# Flags passed to only C++ files.");
self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname)
if self.flavor == 'mac':
self.WriteLn("# Flags passed to only ObjC files.");
self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname)
self.WriteLn("# Flags passed to only ObjC++ files.");
self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname)
includes = config.get('include_dirs')
if includes:
includes = map(Sourceify, map(self.Absolutify, includes))
self.WriteList(includes, 'INCS_%s' % configname, prefix='-I')
compilable = filter(Compilable, sources)
objs = map(self.Objectify, map(self.Absolutify, map(Target, compilable)))
self.WriteList(objs, 'OBJS')
for obj in objs:
assert ' ' not in obj, (
"Spaces in object filenames not supported (%s)" % obj)
self.WriteLn('# Add to the list of files we specially track '
'dependencies for.')
self.WriteLn('all_deps += $(OBJS)')
self.WriteLn()
# Make sure our dependencies are built first.
if deps:
self.WriteMakeRule(['$(OBJS)'], deps,
comment = 'Make sure our dependencies are built '
'before any of us.',
order_only = True)
# Make sure the actions and rules run first.
# If they generate any extra headers etc., the per-.o file dep tracking
# will catch the proper rebuilds, so order only is still ok here.
if extra_outputs:
self.WriteMakeRule(['$(OBJS)'], extra_outputs,
comment = 'Make sure our actions/rules run '
'before any of us.',
order_only = True)
pchdeps = precompiled_header.GetObjDependencies(compilable, objs )
if pchdeps:
self.WriteLn('# Dependencies from obj files to their precompiled headers')
for source, obj, gch in pchdeps:
self.WriteLn('%s: %s' % (obj, gch))
self.WriteLn('# End precompiled header dependencies')
if objs:
extra_link_deps.append('$(OBJS)')
self.WriteLn("""\
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.""")
self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)")
self.WriteLn("$(OBJS): GYP_CFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('c') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_CXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('cc') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE))")
if self.flavor == 'mac':
self.WriteLn("$(OBJS): GYP_OBJCFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('m') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE)) "
"$(CFLAGS_OBJC_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('mm') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE)) "
"$(CFLAGS_OBJCC_$(BUILDTYPE))")
self.WritePchTargets(precompiled_header.GetPchBuildCommands())
# If there are any object files in our input file list, link them into our
# output.
extra_link_deps += filter(Linkable, sources)
self.WriteLn()
def WritePchTargets(self, pch_commands):
"""Writes make rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
extra_flags = {
'c': '$(CFLAGS_C_$(BUILDTYPE))',
'cc': '$(CFLAGS_CC_$(BUILDTYPE))',
'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))',
'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))',
}[lang]
var_name = {
'c': 'GYP_PCH_CFLAGS',
'cc': 'GYP_PCH_CXXFLAGS',
'm': 'GYP_PCH_OBJCFLAGS',
'mm': 'GYP_PCH_OBJCXXFLAGS',
}[lang]
self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) +
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"$(CFLAGS_$(BUILDTYPE)) " +
extra_flags)
self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, input))
self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang)
self.WriteLn('')
assert ' ' not in gch, (
"Spaces in gch filenames not supported (%s)" % gch)
self.WriteLn('all_deps += %s' % gch)
self.WriteLn('')
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
assert not self.is_mac_bundle
if self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
return self.xcode_settings.GetExecutablePath()
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.a'
elif self.type in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.so'
elif self.type == 'none':
target = '%s.stamp' % target
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
return target_prefix + target + target_ext
def _InstallImmediately(self):
return self.toolset == 'target' and self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module')
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
assert not self.is_mac_bundle
path = os.path.join('$(obj).' + self.toolset, self.path)
if self.type == 'executable' or self._InstallImmediately():
path = '$(builddir)'
path = spec.get('product_dir', path)
return os.path.join(path, self.ComputeOutputBasename(spec))
def ComputeMacBundleOutput(self, spec):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetWrapperName())
def ComputeMacBundleBinaryOutput(self, spec):
"""Return the 'output' (full output path) to the binary in a bundle."""
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetExecutablePath())
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
# TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?
# This hack makes it work:
# link_deps.extend(spec.get('libraries', []))
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteDependencyOnExtraOutputs(self, target, extra_outputs):
self.WriteMakeRule([self.output_binary], extra_outputs,
comment = 'Build our special outputs first.',
order_only = True)
def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps,
extra_outputs, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
extra_outputs: any extra outputs that our target should depend on
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if extra_outputs:
self.WriteDependencyOnExtraOutputs(self.output_binary, extra_outputs)
self.WriteMakeRule(extra_outputs, deps,
comment=('Preserve order dependency of '
'special output on deps.'),
order_only = True)
target_postbuilds = {}
if self.type != 'none':
for configname in sorted(configs.keys()):
config = configs[configname]
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(configname,
generator_default_variables['PRODUCT_DIR'],
lambda p: Sourceify(self.Absolutify(p)))
# TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on.
gyp_to_build = gyp.common.InvertRelativePath(self.path)
target_postbuild = self.xcode_settings.GetTargetPostbuilds(
configname,
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output))),
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output_binary))))
if target_postbuild:
target_postbuilds[configname] = target_postbuild
else:
ldflags = config.get('ldflags', [])
# Compute an rpath for this output if needed.
if any(dep.endswith('.so') or '.so.' in dep for dep in deps):
# We want to get the literal string "$ORIGIN" into the link command,
# so we need lots of escaping.
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset)
ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' %
self.toolset)
self.WriteList(ldflags, 'LDFLAGS_%s' % configname)
if self.flavor == 'mac':
self.WriteList(self.xcode_settings.GetLibtoolflags(configname),
'LIBTOOLFLAGS_%s' % configname)
libraries = spec.get('libraries')
if libraries:
# Remove duplicate entries
libraries = gyp.common.uniquer(libraries)
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
self.WriteList(libraries, 'LIBS')
self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary))
if self.flavor == 'mac':
self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
# Postbuild actions. Like actions, but implicitly depend on the target's
# output.
postbuilds = []
if self.flavor == 'mac':
if target_postbuilds:
postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))')
postbuilds.extend(
gyp.xcode_emulation.GetSpecPostbuildCommands(spec))
if postbuilds:
# Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE),
# so we must output its definition first, since we declare variables
# using ":=".
self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv())
for configname in target_postbuilds:
self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' %
(QuoteSpaces(self.output),
configname,
gyp.common.EncodePOSIXShellList(target_postbuilds[configname])))
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path]))
for i in xrange(len(postbuilds)):
if not postbuilds[i].startswith('$'):
postbuilds[i] = EscapeShellArgument(postbuilds[i])
self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output))
self.WriteLn('%s: POSTBUILDS := %s' % (
QuoteSpaces(self.output), ' '.join(postbuilds)))
# A bundle directory depends on its dependencies such as bundle resources
# and bundle binary. When all dependencies have been built, the bundle
# needs to be packaged.
if self.is_mac_bundle:
# If the framework doesn't contain a binary, then nothing depends
# on the actions -- make the framework depend on them directly too.
self.WriteDependencyOnExtraOutputs(self.output, extra_outputs)
# Bundle dependencies. Note that the code below adds actions to this
# target, so if you move these two lines, move the lines below as well.
self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS')
self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output))
# After the framework is built, package it. Needs to happen before
# postbuilds, since postbuilds depend on this.
if self.type in ('shared_library', 'loadable_module'):
self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' %
self.xcode_settings.GetFrameworkVersion())
# Bundle postbuilds can depend on the whole bundle, so run them after
# the bundle is packaged, not already after the bundle binary is done.
if postbuilds:
self.WriteLn('\t@$(call do_postbuilds)')
postbuilds = [] # Don't write postbuilds for target's output.
# Needed by test/mac/gyptest-rebuild.py.
self.WriteLn('\t@true # No-op, used by tests')
# Since this target depends on binary and resources which are in
# nested subfolders, the framework directory will be older than
# its dependencies usually. To prevent this rule from executing
# on every build (expensive, especially with postbuilds), expliclity
# update the time on the framework directory.
self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output))
if postbuilds:
assert not self.is_mac_bundle, ('Postbuilds for bundles should be done '
'on the bundle, not the binary (target \'%s\')' % self.target)
assert 'product_dir' not in spec, ('Postbuilds do not work with '
'custom product_dir')
if self.type == 'executable':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'link_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all,
postbuilds=postbuilds)
elif self.type == 'static_library':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in alink input filenames not supported (%s)" % link_dep)
if (self.flavor not in ('mac', 'openbsd', 'win') and not
self.is_standalone_static_library):
self.WriteDoCmd([self.output_binary], link_deps, 'alink_thin',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'shared_library':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'loadable_module':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in module input filenames not supported (%s)" % link_dep)
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd(
[self.output_binary], link_deps, 'solink_module', part_of_all,
postbuilds=postbuilds)
elif self.type == 'none':
# Write a stamp line.
self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all,
postbuilds=postbuilds)
else:
print "WARNING: no output for", self.type, target
# Add an alias for each target (if there are any outputs).
# Installable target aliases are created below.
if ((self.output and self.output != self.target) and
(self.type not in self._INSTALLABLE_TARGETS)):
self.WriteMakeRule([self.target], [self.output],
comment='Add target alias', phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [self.target],
comment = 'Add target alias to "all" target.',
phony = True)
# Add special-case rules for our installable targets.
# 1) They need to install to the build dir or "product" dir.
# 2) They get shortcuts for building (e.g. "make chrome").
# 3) They are part of "make all".
if (self.type in self._INSTALLABLE_TARGETS or
self.is_standalone_static_library):
if self.type == 'shared_library':
file_desc = 'shared library'
elif self.type == 'static_library':
file_desc = 'static library'
else:
file_desc = 'executable'
install_path = self._InstallableTargetInstallPath()
installable_deps = [self.output]
if (self.flavor == 'mac' and not 'product_dir' in spec and
self.toolset == 'target'):
# On mac, products are created in install_path immediately.
assert install_path == self.output, '%s != %s' % (
install_path, self.output)
# Point the target alias to the final binary output.
self.WriteMakeRule([self.target], [install_path],
comment='Add target alias', phony = True)
if install_path != self.output:
assert not self.is_mac_bundle # See comment a few lines above.
self.WriteDoCmd([install_path], [self.output], 'copy',
comment = 'Copy this to the %s output path.' %
file_desc, part_of_all=part_of_all)
installable_deps.append(install_path)
if self.output != self.alias and self.alias != self.target:
self.WriteMakeRule([self.alias], installable_deps,
comment = 'Short alias for building this %s.' %
file_desc, phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [install_path],
comment = 'Add %s to "all" target.' % file_desc,
phony = True)
def WriteList(self, value_list, variable=None, prefix='',
quoter=QuoteIfNecessary):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None,
postbuilds=False):
"""Write a Makefile rule that uses do_cmd.
This makes the outputs dependent on the command line that was run,
as well as support the V= make command line flag.
"""
suffix = ''
if postbuilds:
assert ',' not in command
suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS
self.WriteMakeRule(outputs, inputs,
actions = ['$(call do_cmd,%s%s)' % (command, suffix)],
comment = comment,
force = True)
# Add our outputs to the list of targets we read depfiles from.
# all_deps is only used for deps file reading, and for deps files we replace
# spaces with ? because escaping doesn't work with make's $(sort) and
# other functions.
outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs]
self.WriteLn('all_deps += %s' % ' '.join(outputs))
def WriteMakeRule(self, outputs, inputs, actions=None, comment=None,
order_only=False, force=False, phony=False):
"""Write a Makefile rule, with some extra tricks.
outputs: a list of outputs for the rule (note: this is not directly
supported by make; see comments below)
inputs: a list of inputs for the rule
actions: a list of shell commands to run for the rule
comment: a comment to put in the Makefile above the rule (also useful
for making this Python script's code self-documenting)
order_only: if true, makes the dependency order-only
force: if true, include FORCE_DO_CMD as an order-only dep
phony: if true, the rule does not actually generate the named output, the
output is just a name to run the rule
"""
outputs = map(QuoteSpaces, outputs)
inputs = map(QuoteSpaces, inputs)
if comment:
self.WriteLn('# ' + comment)
if phony:
self.WriteLn('.PHONY: ' + ' '.join(outputs))
# TODO(evanm): just make order_only a list of deps instead of these hacks.
if order_only:
order_insert = '| '
pick_output = ' '.join(outputs)
else:
order_insert = ''
pick_output = outputs[0]
if force:
force_append = ' FORCE_DO_CMD'
else:
force_append = ''
if actions:
self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0])
self.WriteLn('%s: %s%s%s' % (pick_output, order_insert, ' '.join(inputs),
force_append))
if actions:
for action in actions:
self.WriteLn('\t%s' % action)
if not order_only and len(outputs) > 1:
# If we have more than one output, a rule like
# foo bar: baz
# that for *each* output we must run the action, potentially
# in parallel. That is not what we're trying to write -- what
# we want is that we run the action once and it generates all
# the files.
# http://www.gnu.org/software/hello/manual/automake/Multiple-Outputs.html
# discusses this problem and has this solution:
# 1) Write the naive rule that would produce parallel runs of
# the action.
# 2) Make the outputs seralized on each other, so we won't start
# a parallel run until the first run finishes, at which point
# we'll have generated all the outputs and we're done.
self.WriteLn('%s: %s' % (' '.join(outputs[1:]), outputs[0]))
# Add a dummy command to the "extra outputs" rule, otherwise make seems to
# think these outputs haven't (couldn't have?) changed, and thus doesn't
# flag them as changed (i.e. include in '$?') when evaluating dependent
# rules, which in turn causes do_cmd() to skip running dependent commands.
self.WriteLn('%s: ;' % (' '.join(outputs[1:])))
self.WriteLn()
def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps):
"""Write a set of LOCAL_XXX definitions for Android NDK.
These variable definitions will be used by Android NDK but do nothing for
non-Android applications.
Arguments:
module_name: Android NDK module name, which must be unique among all
module names.
all_sources: A list of source files (will be filtered by Compilable).
link_deps: A list of link dependencies, which must be sorted in
the order from dependencies to dependents.
"""
if self.type not in ('executable', 'shared_library', 'static_library'):
return
self.WriteLn('# Variable definitions for Android applications')
self.WriteLn('include $(CLEAR_VARS)')
self.WriteLn('LOCAL_MODULE := ' + module_name)
self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) '
'$(DEFS_$(BUILDTYPE)) '
# LOCAL_CFLAGS is applied to both of C and C++. There is
# no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C
# sources.
'$(CFLAGS_C_$(BUILDTYPE)) '
# $(INCS_$(BUILDTYPE)) includes the prefix '-I' while
# LOCAL_C_INCLUDES does not expect it. So put it in
# LOCAL_CFLAGS.
'$(INCS_$(BUILDTYPE))')
# LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred.
self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))')
self.WriteLn('LOCAL_C_INCLUDES :=')
self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)')
# Detect the C++ extension.
cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0}
default_cpp_ext = '.cpp'
for filename in all_sources:
ext = os.path.splitext(filename)[1]
if ext in cpp_ext:
cpp_ext[ext] += 1
if cpp_ext[ext] > cpp_ext[default_cpp_ext]:
default_cpp_ext = ext
self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext)
self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)),
'LOCAL_SRC_FILES')
# Filter out those which do not match prefix and suffix and produce
# the resulting list without prefix and suffix.
def DepsToModules(deps, prefix, suffix):
modules = []
for filepath in deps:
filename = os.path.basename(filepath)
if filename.startswith(prefix) and filename.endswith(suffix):
modules.append(filename[len(prefix):-len(suffix)])
return modules
# Retrieve the default value of 'SHARED_LIB_SUFFIX'
params = {'flavor': 'linux'}
default_variables = {}
CalculateVariables(default_variables, params)
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['SHARED_LIB_PREFIX'],
default_variables['SHARED_LIB_SUFFIX']),
'LOCAL_SHARED_LIBRARIES')
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['STATIC_LIB_PREFIX'],
generator_default_variables['STATIC_LIB_SUFFIX']),
'LOCAL_STATIC_LIBRARIES')
if self.type == 'executable':
self.WriteLn('include $(BUILD_EXECUTABLE)')
elif self.type == 'shared_library':
self.WriteLn('include $(BUILD_SHARED_LIBRARY)')
elif self.type == 'static_library':
self.WriteLn('include $(BUILD_STATIC_LIBRARY)')
self.WriteLn()
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def GetSortedXcodeEnv(self, additional_settings=None):
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, "$(abs_builddir)",
os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)",
additional_settings)
def GetSortedXcodePostbuildEnv(self):
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE', '')
# Even if strip_save_file is empty, explicitly write it. Else a postbuild
# might pick up an export from an earlier target.
return self.GetSortedXcodeEnv(
additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file})
def WriteSortedXcodeEnv(self, target, env):
for k, v in env:
# For
# foo := a\ b
# the escaped space does the right thing. For
# export foo := a\ b
# it does not -- the backslash is written to the env as literal character.
# So don't escape spaces in |env[k]|.
self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v))
def Objectify(self, path):
"""Convert a path to its output directory form."""
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset)
if not '$(obj)' in path:
path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path)
return path
def Pchify(self, path, lang):
"""Convert a prefix header path to its output directory form."""
path = self.Absolutify(path)
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' %
(self.toolset, lang))
return path
return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path)
def Absolutify(self, path):
"""Convert a subdirectory-relative path into a base-relative path.
Skips over paths that contain variables."""
if '$(' in path:
# Don't call normpath in this case, as it might collapse the
# path too aggressively if it features '..'. However it's still
# important to strip trailing slashes.
return path.rstrip('/')
return os.path.normpath(os.path.join(self.path, path))
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def _InstallableTargetInstallPath(self):
"""Returns the location of the final output for an installable target."""
# Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
# rely on this. Emulate this behavior for mac.
if (self.type == 'shared_library' and
(self.flavor != 'mac' or self.toolset != 'target')):
# Install all shared libs into a common directory (per toolset) for
# convenient access with LD_LIBRARY_PATH.
return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias)
return '$(builddir)/' + self.alias
def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
build_files):
"""Write the target to regenerate the Makefile."""
options = params['options']
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
for filename in params['build_files_arg']]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
options.toplevel_dir)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write(
"quiet_cmd_regen_makefile = ACTION Regenerating $@\n"
"cmd_regen_makefile = %(cmd)s\n"
"%(makefile_name)s: %(deps)s\n"
"\t$(call do_cmd,regen_makefile)\n\n" % {
'makefile_name': makefile_name,
'deps': ' '.join(map(Sourceify, build_files)),
'cmd': gyp.common.EncodePOSIXShellList(
[gyp_binary, '-fmake'] +
gyp.RegenerateFlags(options) +
build_files_args)})
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
arguments = ['make']
if options.toplevel_dir and options.toplevel_dir != '.':
arguments += '-C', options.toplevel_dir
arguments.append('BUILDTYPE=' + config)
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
android_ndk_version = generator_flags.get('android_ndk_version', None)
default_target = generator_flags.get('default_target', 'all')
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
if options.generator_output:
output_file = os.path.join(options.generator_output, output_file)
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'Makefile' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
if options.generator_output:
global srcdir_prefix
makefile_path = os.path.join(options.generator_output, makefile_path)
srcdir = gyp.common.RelativePath(srcdir, options.generator_output)
srcdir_prefix = '$(srcdir)/'
flock_command= 'flock'
header_params = {
'default_target': default_target,
'builddir': builddir_name,
'default_configuration': default_configuration,
'flock': flock_command,
'flock_index': 1,
'link_commands': LINK_COMMANDS_LINUX,
'extra_commands': '',
'srcdir': srcdir,
}
if flavor == 'mac':
flock_command = './gyp-mac-tool flock'
header_params.update({
'flock': flock_command,
'flock_index': 2,
'link_commands': LINK_COMMANDS_MAC,
'extra_commands': SHARED_HEADER_MAC_COMMANDS,
})
elif flavor == 'android':
header_params.update({
'link_commands': LINK_COMMANDS_ANDROID,
})
elif flavor == 'solaris':
header_params.update({
'flock': './gyp-sun-tool flock',
'flock_index': 2,
'extra_commands': SHARED_HEADER_SUN_COMMANDS,
})
elif flavor == 'freebsd':
# Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.
header_params.update({
'flock': 'lockf',
})
header_params.update({
'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'),
'AR.target': GetEnvironFallback(('AR_target', 'AR'), '$(AR)'),
'CXX.target': GetEnvironFallback(('CXX_target', 'CXX'), '$(CXX)'),
'LINK.target': GetEnvironFallback(('LD_target', 'LD'), '$(LINK)'),
'CC.host': GetEnvironFallback(('CC_host',), 'gcc'),
'AR.host': GetEnvironFallback(('AR_host',), 'ar'),
'CXX.host': GetEnvironFallback(('CXX_host',), 'g++'),
'LINK.host': GetEnvironFallback(('LD_host',), 'g++'),
})
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_array = data[build_file].get('make_global_settings', [])
wrappers = {}
wrappers['LINK'] = '%s $(builddir)/linker.lock' % flock_command
for key, value in make_global_settings_array:
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = '$(abspath %s)' % value
make_global_settings = ''
for key, value in make_global_settings_array:
if re.match('.*_wrapper', key):
continue
if value[0] != '$':
value = '$(abspath %s)' % value
wrapper = wrappers.get(key)
if wrapper:
value = '%s %s' % (wrapper, value)
del wrappers[key]
if key in ('CC', 'CC.host', 'CXX', 'CXX.host'):
make_global_settings += (
'ifneq (,$(filter $(origin %s), undefined default))\n' % key)
# Let gyp-time envvars win over global settings.
if key in os.environ:
value = os.environ[key]
make_global_settings += ' %s = %s\n' % (key, value)
make_global_settings += 'endif\n'
else:
make_global_settings += '%s ?= %s\n' % (key, value)
# TODO(ukai): define cmd when only wrapper is specified in
# make_global_settings.
header_params['make_global_settings'] = make_global_settings
ensure_directory_exists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(SHARED_HEADER % header_params)
# Currently any versions have the same effect, but in future the behavior
# could be different.
if android_ndk_version:
root_makefile.write(
'# Define LOCAL_PATH for build of Android applications.\n'
'LOCAL_PATH := $(call my-dir)\n'
'\n')
for toolset in toolsets:
root_makefile.write('TOOLSET := %s\n' % toolset)
WriteRootHeaderSuffixRules(root_makefile)
# Put build-time support tools next to the root Makefile.
dest_path = os.path.dirname(makefile_path)
gyp.common.CopyTool(flavor, dest_path)
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings_array == this_make_global_settings, (
"make_global_settings needs to be the same for all targets.")
build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
writer = MakefileWriter(generator_flags, flavor)
writer.Write(qualified_target, base_path, output_file, spec, configs,
part_of_all=qualified_target in needed_targets)
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Write out per-gyp (sub-project) Makefiles.
depth_rel_path = gyp.common.RelativePath(options.depth, os.getcwd())
for build_file in build_files:
# The paths in build_files were relativized above, so undo that before
# testing against the non-relativized items in target_list and before
# calculating the Makefile path.
build_file = os.path.join(depth_rel_path, build_file)
gyp_targets = [target_dicts[target]['target_name'] for target in target_list
if target.startswith(build_file) and
target in needed_targets]
# Only generate Makefiles for gyp files with targets.
if not gyp_targets:
continue
base_path, output_file = CalculateMakefilePath(build_file,
os.path.splitext(os.path.basename(build_file))[0] + '.Makefile')
makefile_rel_path = gyp.common.RelativePath(os.path.dirname(makefile_path),
os.path.dirname(output_file))
writer.WriteSubMake(output_file, makefile_rel_path, gyp_targets,
builddir_name)
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
# We wrap each .mk include in an if statement so users can tell make to
# not load a file by setting NO_LOAD. The below make code says, only
# load the .mk file if the .mk filename doesn't start with a token in
# NO_LOAD.
root_makefile.write(
"ifeq ($(strip $(foreach prefix,$(NO_LOAD),\\\n"
" $(findstring $(join ^,$(prefix)),\\\n"
" $(join ^," + include_file + ")))),)\n")
root_makefile.write(" include " + include_file + "\n")
root_makefile.write("endif\n")
root_makefile.write('\n')
if (not generator_flags.get('standalone')
and generator_flags.get('auto_regeneration', True)):
WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
|
gpl-2.0
|
sobercoder/gem5
|
src/arch/x86/isa/insts/x87/load_constants/load_0_1_or_pi.py
|
70
|
2454
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# Copyright (c) 2013 Mark D. Hill and David A. Wood
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
# Nilay Vaish
microcode = '''
def macroop FLDZ {
lfpimm ufp1, 0.0
movfp st(-1), ufp1, spm=-1
};
def macroop FLD1 {
lfpimm ufp1, 1.0
movfp st(-1), ufp1, spm=-1
};
def macroop FLDPI {
lfpimm ufp1, 3.14159265359
movfp st(-1), ufp1, spm=-1
};
'''
|
bsd-3-clause
|
nekrut/tools-iuc
|
tools/vsnp/vsnp_determine_ref_from_data.py
|
12
|
9491
|
#!/usr/bin/env python
import argparse
import gzip
import os
from collections import OrderedDict
import yaml
from Bio.SeqIO.QualityIO import FastqGeneralIterator
OUTPUT_DBKEY_DIR = 'output_dbkey'
OUTPUT_METRICS_DIR = 'output_metrics'
def get_sample_name(file_path):
base_file_name = os.path.basename(file_path)
if base_file_name.find(".") > 0:
# Eliminate the extension.
return os.path.splitext(base_file_name)[0]
return base_file_name
def get_dbkey(dnaprints_dict, key, s):
# dnaprints_dict looks something like this:
# {'brucella': {'NC_002945v4': ['11001110', '11011110', '11001100']}
# {'bovis': {'NC_006895': ['11111110', '00010010', '01111011']}}
d = dnaprints_dict.get(key, {})
for data_table_value, v_list in d.items():
if s in v_list:
return data_table_value
return ""
def get_dnaprints_dict(dnaprint_fields):
# A dndprint_fields entry looks something liek this.
# [['AF2122', '/galaxy/tool-data/vsnp/AF2122/dnaprints/NC_002945v4.yml']]
dnaprints_dict = {}
for item in dnaprint_fields:
# Here item is a 2-element list of data
# table components, # value and path.
value = item[0]
path = item[1].strip()
with open(path, "rt") as fh:
# The format of all dnaprints yaml
# files is something like this:
# brucella:
# - 0111111111111111
print_dict = yaml.load(fh, Loader=yaml.Loader)
for print_dict_k, print_dict_v in print_dict.items():
dnaprints_v_dict = dnaprints_dict.get(print_dict_k, {})
if len(dnaprints_v_dict) > 0:
# dnaprints_dict already contains k (e.g., 'brucella',
# and dnaprints_v_dict will be a dictionary # that
# looks something like this:
# {'NC_002945v4': ['11001110', '11011110', '11001100']}
value_list = dnaprints_v_dict.get(value, [])
value_list = value_list + print_dict_v
dnaprints_v_dict[value] = value_list
else:
# dnaprints_v_dict is an empty dictionary.
dnaprints_v_dict[value] = print_dict_v
dnaprints_dict[print_dict_k] = dnaprints_v_dict
# dnaprints_dict looks something like this:
# {'brucella': {'NC_002945v4': ['11001110', '11011110', '11001100']}
# {'bovis': {'NC_006895': ['11111110', '00010010', '01111011']}}
return dnaprints_dict
def get_group_and_dbkey(dnaprints_dict, brucella_string, brucella_sum, bovis_string, bovis_sum, para_string, para_sum):
if brucella_sum > 3:
group = "Brucella"
dbkey = get_dbkey(dnaprints_dict, "brucella", brucella_string)
elif bovis_sum > 3:
group = "TB"
dbkey = get_dbkey(dnaprints_dict, "bovis", bovis_string)
elif para_sum >= 1:
group = "paraTB"
dbkey = get_dbkey(dnaprints_dict, "para", para_string)
else:
group = ""
dbkey = ""
return group, dbkey
def get_oligo_dict():
oligo_dict = {}
oligo_dict["01_ab1"] = "AATTGTCGGATAGCCTGGCGATAACGACGC"
oligo_dict["02_ab3"] = "CACACGCGGGCCGGAACTGCCGCAAATGAC"
oligo_dict["03_ab5"] = "GCTGAAGCGGCAGACCGGCAGAACGAATAT"
oligo_dict["04_mel"] = "TGTCGCGCGTCAAGCGGCGTGAAATCTCTG"
oligo_dict["05_suis1"] = "TGCGTTGCCGTGAAGCTTAATTCGGCTGAT"
oligo_dict["06_suis2"] = "GGCAATCATGCGCAGGGCTTTGCATTCGTC"
oligo_dict["07_suis3"] = "CAAGGCAGATGCACATAATCCGGCGACCCG"
oligo_dict["08_ceti1"] = "GTGAATATAGGGTGAATTGATCTTCAGCCG"
oligo_dict["09_ceti2"] = "TTACAAGCAGGCCTATGAGCGCGGCGTGAA"
oligo_dict["10_canis4"] = "CTGCTACATAAAGCACCCGGCGACCGAGTT"
oligo_dict["11_canis"] = "ATCGTTTTGCGGCATATCGCTGACCACAGC"
oligo_dict["12_ovis"] = "CACTCAATCTTCTCTACGGGCGTGGTATCC"
oligo_dict["13_ether2"] = "CGAAATCGTGGTGAAGGACGGGACCGAACC"
oligo_dict["14_63B1"] = "CCTGTTTAAAAGAATCGTCGGAACCGCTCT"
oligo_dict["15_16M0"] = "TCCCGCCGCCATGCCGCCGAAAGTCGCCGT"
oligo_dict["16_mel1b"] = "TCTGTCCAAACCCCGTGACCGAACAATAGA"
oligo_dict["17_tb157"] = "CTCTTCGTATACCGTTCCGTCGTCACCATGGTCCT"
oligo_dict["18_tb7"] = "TCACGCAGCCAACGATATTCGTGTACCGCGACGGT"
oligo_dict["19_tbbov"] = "CTGGGCGACCCGGCCGACCTGCACACCGCGCATCA"
oligo_dict["20_tb5"] = "CCGTGGTGGCGTATCGGGCCCCTGGATCGCGCCCT"
oligo_dict["21_tb2"] = "ATGTCTGCGTAAAGAAGTTCCATGTCCGGGAAGTA"
oligo_dict["22_tb3"] = "GAAGACCTTGATGCCGATCTGGGTGTCGATCTTGA"
oligo_dict["23_tb4"] = "CGGTGTTGAAGGGTCCCCCGTTCCAGAAGCCGGTG"
oligo_dict["24_tb6"] = "ACGGTGATTCGGGTGGTCGACACCGATGGTTCAGA"
oligo_dict["25_para"] = "CCTTTCTTGAAGGGTGTTCG"
oligo_dict["26_para_sheep"] = "CGTGGTGGCGACGGCGGCGGGCCTGTCTAT"
oligo_dict["27_para_cattle"] = "TCTCCTCGGTCGGTGATTCGGGGGCGCGGT"
return oligo_dict
def get_seq_counts(value, fastq_list, gzipped):
count = 0
for fastq_file in fastq_list:
if gzipped:
with gzip.open(fastq_file, 'rt') as fh:
for title, seq, qual in FastqGeneralIterator(fh):
count += seq.count(value)
else:
with open(fastq_file, 'r') as fh:
for title, seq, qual in FastqGeneralIterator(fh):
count += seq.count(value)
return(value, count)
def get_species_counts(fastq_list, gzipped):
count_summary = {}
oligo_dict = get_oligo_dict()
for v1 in oligo_dict.values():
returned_value, count = get_seq_counts(v1, fastq_list, gzipped)
for key, v2 in oligo_dict.items():
if returned_value == v2:
count_summary.update({key: count})
count_list = []
for v in count_summary.values():
count_list.append(v)
brucella_sum = sum(count_list[:16])
bovis_sum = sum(count_list[16:24])
para_sum = sum(count_list[24:])
return count_summary, count_list, brucella_sum, bovis_sum, para_sum
def get_species_strings(count_summary):
binary_dictionary = {}
for k, v in count_summary.items():
if v > 1:
binary_dictionary.update({k: 1})
else:
binary_dictionary.update({k: 0})
binary_dictionary = OrderedDict(sorted(binary_dictionary.items()))
binary_list = []
for v in binary_dictionary.values():
binary_list.append(v)
brucella_binary = binary_list[:16]
brucella_string = ''.join(str(e) for e in brucella_binary)
bovis_binary = binary_list[16:24]
bovis_string = ''.join(str(e) for e in bovis_binary)
para_binary = binary_list[24:]
para_string = ''.join(str(e) for e in para_binary)
return brucella_string, bovis_string, para_string
def output_dbkey(file_name, dbkey, output_file):
# Output the dbkey.
with open(output_file, "w") as fh:
fh.write("%s" % dbkey)
def output_files(fastq_file, count_list, group, dbkey, dbkey_file, metrics_file):
base_file_name = get_sample_name(fastq_file)
output_dbkey(base_file_name, dbkey, dbkey_file)
output_metrics(base_file_name, count_list, group, dbkey, metrics_file)
def output_metrics(file_name, count_list, group, dbkey, output_file):
# Output the metrics.
with open(output_file, "w") as fh:
fh.write("Sample: %s\n" % file_name)
fh.write("Brucella counts: ")
for i in count_list[:16]:
fh.write("%d," % i)
fh.write("\nTB counts: ")
for i in count_list[16:24]:
fh.write("%d," % i)
fh.write("\nPara counts: ")
for i in count_list[24:]:
fh.write("%d," % i)
fh.write("\nGroup: %s" % group)
fh.write("\ndbkey: %s\n" % dbkey)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dnaprint_fields', action='append', dest='dnaprint_fields', nargs=2, help="List of dnaprints data table value, name and path fields")
parser.add_argument('--read1', action='store', dest='read1', help='Required: single read')
parser.add_argument('--read2', action='store', dest='read2', required=False, default=None, help='Optional: paired read')
parser.add_argument('--gzipped', action='store_true', dest='gzipped', help='Input files are gzipped')
parser.add_argument('--output_dbkey', action='store', dest='output_dbkey', help='Output reference file')
parser.add_argument('--output_metrics', action='store', dest='output_metrics', help='Output metrics file')
args = parser.parse_args()
fastq_list = [args.read1]
if args.read2 is not None:
fastq_list.append(args.read2)
# The value of dnaprint_fields is a list of lists, where each list is
# the [value, name, path] components of the vsnp_dnaprints data table.
# The data_manager_vsnp_dnaprints tool assigns the dbkey column from the
# all_fasta data table to the value column in the vsnp_dnaprints data
# table to ensure a proper mapping for discovering the dbkey.
dnaprints_dict = get_dnaprints_dict(args.dnaprint_fields)
# Here fastq_list consists of either a single read
# or a set of paired reads, producing single outputs.
count_summary, count_list, brucella_sum, bovis_sum, para_sum = get_species_counts(fastq_list, args.gzipped)
brucella_string, bovis_string, para_string = get_species_strings(count_summary)
group, dbkey = get_group_and_dbkey(dnaprints_dict, brucella_string, brucella_sum, bovis_string, bovis_sum, para_string, para_sum)
output_files(args.read1, count_list, group, dbkey, dbkey_file=args.output_dbkey, metrics_file=args.output_metrics)
|
mit
|
robojukie/myrobotlab
|
src/resource/VirtualDevice/Arduino.py
|
3
|
3258
|
#############################################
# This is a basic script to emulate the hardware of
# an Arduino microcontroller. The VirtualDevice
# service will execute this script when
# createVirtualArduino(port) is called
import time
import math
import threading
from random import randint
from org.myrobotlab.codec import ArduinoMsgCodec
working = False
worker = None
analogReadPollingPins = []
digitalReadPollingPins = []
def work():
"""thread worker function"""
global working, analogReadPollingPins
x = 0
working = True
while(working):
x = x + 0.09
y = int(math.cos(x) * 100 + 150)
# retcmd = "publishPin/" + str(pin) + "/3/"+ str(y) +"\n"
# uart.write(codec.encode(retcmd))
for pinx in digitalReadPollingPins:
retcmd = "publishPin/" + str(pinx) + "/0/"+str(randint(0,1))+"\n"
uart.write(codec.encode(retcmd))
for pinx in analogReadPollingPins:
#retcmd = "publishPin/" + str(pinx) + "/4/"+ str(y) +"\n"
retcmd = "publishPin/" + str(pinx) + "/" + str(int(pinx)%4) + "/"+ str(y) +"\n"
uart.write(codec.encode(retcmd))
sleep(0.001)
#print (y)
# TODO -------
# if (digitalReadPollingPins.length() == 0 && analogReadPollingPins.length() == 0
# working = False
print("I am done !")
codec = ArduinoMsgCodec()
virtual = Runtime.getService("virtual")
uart = virtual.getUART()
uart.setCodec("arduino")
logic = virtual.getLogic()
logic.subscribe(uart, "publishRX", "onByte")
logic.subscribe(uart, "publishConnect", "onConnect")
logic.subscribe(uart, "publishPortNames", "onPortNames")
logic.subscribe(uart, "publishDisconnect", "onDisconnect")
def onByte(b):
global working, worker, analogReadPollingPins
print("onByte", b)
command = codec.decode(b)
if command != None and len(command) > 0 :
print("decoded", command)
# rstrip trips the \n from the record
command = command.rstrip()
clist = command.split('/')
if command == "getVersion":
uart.write(codec.encode("publishVersion/21\n"))
elif command.startswith("digitalReadPollingStart"):
print("digitalReadPollingStart")
pin = clist[1]
digitalReadPollingPins.append(pin)
if worker == None:
worker = threading.Thread(name='worker', target=work)
worker.setDaemon(True)
worker.start()
elif command.startswith("digitalReadPollingStop"):
print("digitalReadPollingStop")
pin = clist[1]
digitalReadPollingPins.remove(pin)
elif command.startswith("analogReadPollingStart"):
print("analogReadPollingStart")
pin = clist[1]
analogReadPollingPins.append(pin)
if worker == None:
worker = threading.Thread(name='worker', target=work)
worker.setDaemon(True)
worker.start()
elif command.startswith("analogReadPollingStop"):
print("analogReadPollingStop")
pin = clist[1]
analogReadPollingPins.remove(pin)
def off():
working = False
worker = None
def onConnect(portName):
print("connected to ", portName)
def onPortNames(portName):
print("TODO - list portNames")
def onDisconnect(portName):
print("disconnected from ", portName)
|
apache-2.0
|
CouchPotato/CouchPotatoServer
|
libs/tornado/gen.py
|
64
|
35105
|
"""``tornado.gen`` is a generator-based interface to make it easier to
work in an asynchronous environment. Code using the ``gen`` module
is technically asynchronous, but it is written as a single generator
instead of a collection of separate functions.
For example, the following asynchronous handler::
class AsyncHandler(RequestHandler):
@asynchronous
def get(self):
http_client = AsyncHTTPClient()
http_client.fetch("http://example.com",
callback=self.on_fetch)
def on_fetch(self, response):
do_something_with_response(response)
self.render("template.html")
could be written with ``gen`` as::
class GenAsyncHandler(RequestHandler):
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response = yield http_client.fetch("http://example.com")
do_something_with_response(response)
self.render("template.html")
Most asynchronous functions in Tornado return a `.Future`;
yielding this object returns its `~.Future.result`.
You can also yield a list or dict of ``Futures``, which will be
started at the same time and run in parallel; a list or dict of results will
be returned when they are all finished::
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response1, response2 = yield [http_client.fetch(url1),
http_client.fetch(url2)]
response_dict = yield dict(response3=http_client.fetch(url3),
response4=http_client.fetch(url4))
response3 = response_dict['response3']
response4 = response_dict['response4']
If the `~functools.singledispatch` library is available (standard in
Python 3.4, available via the `singledispatch
<https://pypi.python.org/pypi/singledispatch>`_ package on older
versions), additional types of objects may be yielded. Tornado includes
support for ``asyncio.Future`` and Twisted's ``Deferred`` class when
``tornado.platform.asyncio`` and ``tornado.platform.twisted`` are imported.
See the `convert_yielded` function to extend this mechanism.
.. versionchanged:: 3.2
Dict support added.
.. versionchanged:: 4.1
Support added for yielding ``asyncio`` Futures and Twisted Deferreds
via ``singledispatch``.
"""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import functools
import itertools
import sys
import types
import weakref
from tornado.concurrent import Future, TracebackFuture, is_future, chain_future
from tornado.ioloop import IOLoop
from tornado.log import app_log
from tornado import stack_context
try:
from functools import singledispatch # py34+
except ImportError as e:
try:
from singledispatch import singledispatch # backport
except ImportError:
singledispatch = None
class KeyReuseError(Exception):
pass
class UnknownKeyError(Exception):
pass
class LeakedCallbackError(Exception):
pass
class BadYieldError(Exception):
pass
class ReturnValueIgnoredError(Exception):
pass
class TimeoutError(Exception):
"""Exception raised by ``with_timeout``."""
def engine(func):
"""Callback-oriented decorator for asynchronous generators.
This is an older interface; for new code that does not need to be
compatible with versions of Tornado older than 3.0 the
`coroutine` decorator is recommended instead.
This decorator is similar to `coroutine`, except it does not
return a `.Future` and the ``callback`` argument is not treated
specially.
In most cases, functions decorated with `engine` should take
a ``callback`` argument and invoke it with their result when
they are finished. One notable exception is the
`~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`,
which use ``self.finish()`` in place of a callback argument.
"""
func = _make_coroutine_wrapper(func, replace_callback=False)
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = func(*args, **kwargs)
def final_callback(future):
if future.result() is not None:
raise ReturnValueIgnoredError(
"@gen.engine functions cannot return values: %r" %
(future.result(),))
# The engine interface doesn't give us any way to return
# errors but to raise them into the stack context.
# Save the stack context here to use when the Future has resolved.
future.add_done_callback(stack_context.wrap(final_callback))
return wrapper
def coroutine(func, replace_callback=True):
"""Decorator for asynchronous generators.
Any generator that yields objects from this module must be wrapped
in either this decorator or `engine`.
Coroutines may "return" by raising the special exception
`Return(value) <Return>`. In Python 3.3+, it is also possible for
the function to simply use the ``return value`` statement (prior to
Python 3.3 generators were not allowed to also return values).
In all versions of Python a coroutine that simply wishes to exit
early may use the ``return`` statement without a value.
Functions with this decorator return a `.Future`. Additionally,
they may be called with a ``callback`` keyword argument, which
will be invoked with the future's result when it resolves. If the
coroutine fails, the callback will not be run and an exception
will be raised into the surrounding `.StackContext`. The
``callback`` argument is not visible inside the decorated
function; it is handled by the decorator itself.
From the caller's perspective, ``@gen.coroutine`` is similar to
the combination of ``@return_future`` and ``@gen.engine``.
.. warning::
When exceptions occur inside a coroutine, the exception
information will be stored in the `.Future` object. You must
examine the result of the `.Future` object, or the exception
may go unnoticed by your code. This means yielding the function
if called from another coroutine, using something like
`.IOLoop.run_sync` for top-level calls, or passing the `.Future`
to `.IOLoop.add_future`.
"""
return _make_coroutine_wrapper(func, replace_callback=True)
def _make_coroutine_wrapper(func, replace_callback):
"""The inner workings of ``@gen.coroutine`` and ``@gen.engine``.
The two decorators differ in their treatment of the ``callback``
argument, so we cannot simply implement ``@engine`` in terms of
``@coroutine``.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = TracebackFuture()
if replace_callback and 'callback' in kwargs:
callback = kwargs.pop('callback')
IOLoop.current().add_future(
future, lambda future: callback(future.result()))
try:
result = func(*args, **kwargs)
except (Return, StopIteration) as e:
result = getattr(e, 'value', None)
except Exception:
future.set_exc_info(sys.exc_info())
return future
else:
if isinstance(result, types.GeneratorType):
# Inline the first iteration of Runner.run. This lets us
# avoid the cost of creating a Runner when the coroutine
# never actually yields, which in turn allows us to
# use "optional" coroutines in critical path code without
# performance penalty for the synchronous case.
try:
orig_stack_contexts = stack_context._state.contexts
yielded = next(result)
if stack_context._state.contexts is not orig_stack_contexts:
yielded = TracebackFuture()
yielded.set_exception(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
future.set_result(getattr(e, 'value', None))
except Exception:
future.set_exc_info(sys.exc_info())
else:
Runner(result, future, yielded)
try:
return future
finally:
# Subtle memory optimization: if next() raised an exception,
# the future's exc_info contains a traceback which
# includes this stack frame. This creates a cycle,
# which will be collected at the next full GC but has
# been shown to greatly increase memory usage of
# benchmarks (relative to the refcount-based scheme
# used in the absence of cycles). We can avoid the
# cycle by clearing the local variable after we return it.
future = None
future.set_result(result)
return future
return wrapper
class Return(Exception):
"""Special exception to return a value from a `coroutine`.
If this exception is raised, its value argument is used as the
result of the coroutine::
@gen.coroutine
def fetch_json(url):
response = yield AsyncHTTPClient().fetch(url)
raise gen.Return(json_decode(response.body))
In Python 3.3, this exception is no longer necessary: the ``return``
statement can be used directly to return a value (previously
``yield`` and ``return`` with a value could not be combined in the
same function).
By analogy with the return statement, the value argument is optional,
but it is never necessary to ``raise gen.Return()``. The ``return``
statement can be used with no arguments instead.
"""
def __init__(self, value=None):
super(Return, self).__init__()
self.value = value
class WaitIterator(object):
"""Provides an iterator to yield the results of futures as they finish.
Yielding a set of futures like this:
``results = yield [future1, future2]``
pauses the coroutine until both ``future1`` and ``future2``
return, and then restarts the coroutine with the results of both
futures. If either future is an exception, the expression will
raise that exception and all the results will be lost.
If you need to get the result of each future as soon as possible,
or if you need the result of some futures even if others produce
errors, you can use ``WaitIterator``:
::
wait_iterator = gen.WaitIterator(future1, future2)
while not wait_iterator.done():
try:
result = yield wait_iterator.next()
except Exception as e:
print "Error {} from {}".format(e, wait_iterator.current_future)
else:
print "Result {} recieved from {} at {}".format(
result, wait_iterator.current_future,
wait_iterator.current_index)
Because results are returned as soon as they are available the
output from the iterator *will not be in the same order as the
input arguments*. If you need to know which future produced the
current result, you can use the attributes
``WaitIterator.current_future``, or ``WaitIterator.current_index``
to get the index of the future from the input list. (if keyword
arguments were used in the construction of the `WaitIterator`,
``current_index`` will use the corresponding keyword).
.. versionadded:: 4.1
"""
def __init__(self, *args, **kwargs):
if args and kwargs:
raise ValueError(
"You must provide args or kwargs, not both")
if kwargs:
self._unfinished = dict((f, k) for (k, f) in kwargs.items())
futures = list(kwargs.values())
else:
self._unfinished = dict((f, i) for (i, f) in enumerate(args))
futures = args
self._finished = collections.deque()
self.current_index = self.current_future = None
self._running_future = None
self_ref = weakref.ref(self)
for future in futures:
future.add_done_callback(functools.partial(
self._done_callback, self_ref))
def done(self):
"""Returns True if this iterator has no more results."""
if self._finished or self._unfinished:
return False
# Clear the 'current' values when iteration is done.
self.current_index = self.current_future = None
return True
def next(self):
"""Returns a `.Future` that will yield the next available result.
Note that this `.Future` will not be the same object as any of
the inputs.
"""
self._running_future = TracebackFuture()
if self._finished:
self._return_result(self._finished.popleft())
return self._running_future
@staticmethod
def _done_callback(self_ref, done):
self = self_ref()
if self is not None:
if self._running_future and not self._running_future.done():
self._return_result(done)
else:
self._finished.append(done)
def _return_result(self, done):
"""Called set the returned future's state that of the future
we yielded, and set the current future for the iterator.
"""
chain_future(done, self._running_future)
self.current_future = done
self.current_index = self._unfinished.pop(done)
class YieldPoint(object):
"""Base class for objects that may be yielded from the generator.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def start(self, runner):
"""Called by the runner after the generator has yielded.
No other methods will be called on this object before ``start``.
"""
raise NotImplementedError()
def is_ready(self):
"""Called by the runner to determine whether to resume the generator.
Returns a boolean; may be called more than once.
"""
raise NotImplementedError()
def get_result(self):
"""Returns the value to use as the result of the yield expression.
This method will only be called once, and only after `is_ready`
has returned true.
"""
raise NotImplementedError()
class Callback(YieldPoint):
"""Returns a callable object that will allow a matching `Wait` to proceed.
The key may be any value suitable for use as a dictionary key, and is
used to match ``Callbacks`` to their corresponding ``Waits``. The key
must be unique among outstanding callbacks within a single run of the
generator function, but may be reused across different runs of the same
function (so constants generally work fine).
The callback may be called with zero or one arguments; if an argument
is given it will be returned by `Wait`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
runner.register_callback(self.key)
def is_ready(self):
return True
def get_result(self):
return self.runner.result_callback(self.key)
class Wait(YieldPoint):
"""Returns the argument passed to the result of a previous `Callback`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
def is_ready(self):
return self.runner.is_ready(self.key)
def get_result(self):
return self.runner.pop_result(self.key)
class WaitAll(YieldPoint):
"""Returns the results of multiple previous `Callbacks <Callback>`.
The argument is a sequence of `Callback` keys, and the result is
a list of results in the same order.
`WaitAll` is equivalent to yielding a list of `Wait` objects.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, keys):
self.keys = keys
def start(self, runner):
self.runner = runner
def is_ready(self):
return all(self.runner.is_ready(key) for key in self.keys)
def get_result(self):
return [self.runner.pop_result(key) for key in self.keys]
def Task(func, *args, **kwargs):
"""Adapts a callback-based asynchronous function for use in coroutines.
Takes a function (and optional additional arguments) and runs it with
those arguments plus a ``callback`` keyword argument. The argument passed
to the callback is returned as the result of the yield expression.
.. versionchanged:: 4.0
``gen.Task`` is now a function that returns a `.Future`, instead of
a subclass of `YieldPoint`. It still behaves the same way when
yielded.
"""
future = Future()
def handle_exception(typ, value, tb):
if future.done():
return False
future.set_exc_info((typ, value, tb))
return True
def set_result(result):
if future.done():
return
future.set_result(result)
with stack_context.ExceptionStackContext(handle_exception):
func(*args, callback=_argument_adapter(set_result), **kwargs)
return future
class YieldFuture(YieldPoint):
def __init__(self, future, io_loop=None):
"""Adapts a `.Future` to the `YieldPoint` interface.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
self.future = future
self.io_loop = io_loop or IOLoop.current()
def start(self, runner):
if not self.future.done():
self.runner = runner
self.key = object()
runner.register_callback(self.key)
self.io_loop.add_future(self.future, runner.result_callback(self.key))
else:
self.runner = None
self.result = self.future.result()
def is_ready(self):
if self.runner is not None:
return self.runner.is_ready(self.key)
else:
return True
def get_result(self):
if self.runner is not None:
return self.runner.pop_result(self.key).result()
else:
return self.result
class Multi(YieldPoint):
"""Runs multiple asynchronous operations in parallel.
Takes a list of ``YieldPoints`` or ``Futures`` and returns a list of
their responses. It is not necessary to call `Multi` explicitly,
since the engine will do so automatically when the generator yields
a list of ``YieldPoints`` or a mixture of ``YieldPoints`` and ``Futures``.
Instead of a list, the argument may also be a dictionary whose values are
Futures, in which case a parallel dictionary is returned mapping the same
keys to their results.
"""
def __init__(self, children):
self.keys = None
if isinstance(children, dict):
self.keys = list(children.keys())
children = children.values()
self.children = []
for i in children:
if is_future(i):
i = YieldFuture(i)
self.children.append(i)
assert all(isinstance(i, YieldPoint) for i in self.children)
self.unfinished_children = set(self.children)
def start(self, runner):
for i in self.children:
i.start(runner)
def is_ready(self):
finished = list(itertools.takewhile(
lambda i: i.is_ready(), self.unfinished_children))
self.unfinished_children.difference_update(finished)
return not self.unfinished_children
def get_result(self):
result = (i.get_result() for i in self.children)
if self.keys is not None:
return dict(zip(self.keys, result))
else:
return list(result)
def multi_future(children):
"""Wait for multiple asynchronous futures in parallel.
Takes a list of ``Futures`` (but *not* other ``YieldPoints``) and returns
a new Future that resolves when all the other Futures are done.
If all the ``Futures`` succeeded, the returned Future's result is a list
of their results. If any failed, the returned Future raises the exception
of the first one to fail.
Instead of a list, the argument may also be a dictionary whose values are
Futures, in which case a parallel dictionary is returned mapping the same
keys to their results.
It is not necessary to call `multi_future` explcitly, since the engine will
do so automatically when the generator yields a list of `Futures`.
This function is faster than the `Multi` `YieldPoint` because it does not
require the creation of a stack context.
.. versionadded:: 4.0
"""
if isinstance(children, dict):
keys = list(children.keys())
children = children.values()
else:
keys = None
assert all(is_future(i) for i in children)
unfinished_children = set(children)
future = Future()
if not children:
future.set_result({} if keys is not None else [])
def callback(f):
unfinished_children.remove(f)
if not unfinished_children:
try:
result_list = [i.result() for i in children]
except Exception:
future.set_exc_info(sys.exc_info())
else:
if keys is not None:
future.set_result(dict(zip(keys, result_list)))
else:
future.set_result(result_list)
for f in children:
f.add_done_callback(callback)
return future
def maybe_future(x):
"""Converts ``x`` into a `.Future`.
If ``x`` is already a `.Future`, it is simply returned; otherwise
it is wrapped in a new `.Future`. This is suitable for use as
``result = yield gen.maybe_future(f())`` when you don't know whether
``f()`` returns a `.Future` or not.
"""
if is_future(x):
return x
else:
fut = Future()
fut.set_result(x)
return fut
def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
"""Wraps a `.Future` in a timeout.
Raises `TimeoutError` if the input future does not complete before
``timeout``, which may be specified in any form allowed by
`.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
relative to `.IOLoop.time`)
If the wrapped `.Future` fails after it has timed out, the exception
will be logged unless it is of a type contained in ``quiet_exceptions``
(which may be an exception type or a sequence of types).
Currently only supports Futures, not other `YieldPoint` classes.
.. versionadded:: 4.0
.. versionchanged:: 4.1
Added the ``quiet_exceptions`` argument and the logging of unhandled
exceptions.
"""
# TODO: allow yield points in addition to futures?
# Tricky to do with stack_context semantics.
#
# It's tempting to optimize this by cancelling the input future on timeout
# instead of creating a new one, but A) we can't know if we are the only
# one waiting on the input future, so cancelling it might disrupt other
# callers and B) concurrent futures can only be cancelled while they are
# in the queue, so cancellation cannot reliably bound our waiting time.
result = Future()
chain_future(future, result)
if io_loop is None:
io_loop = IOLoop.current()
def error_callback(future):
try:
future.result()
except Exception as e:
if not isinstance(e, quiet_exceptions):
app_log.error("Exception in Future %r after timeout",
future, exc_info=True)
def timeout_callback():
result.set_exception(TimeoutError("Timeout"))
# In case the wrapped future goes on to fail, log it.
future.add_done_callback(error_callback)
timeout_handle = io_loop.add_timeout(
timeout, timeout_callback)
if isinstance(future, Future):
# We know this future will resolve on the IOLoop, so we don't
# need the extra thread-safety of IOLoop.add_future (and we also
# don't care about StackContext here.
future.add_done_callback(
lambda future: io_loop.remove_timeout(timeout_handle))
else:
# concurrent.futures.Futures may resolve on any thread, so we
# need to route them back to the IOLoop.
io_loop.add_future(
future, lambda future: io_loop.remove_timeout(timeout_handle))
return result
def sleep(duration):
"""Return a `.Future` that resolves after the given number of seconds.
When used with ``yield`` in a coroutine, this is a non-blocking
analogue to `time.sleep` (which should not be used in coroutines
because it is blocking)::
yield gen.sleep(0.5)
Note that calling this function on its own does nothing; you must
wait on the `.Future` it returns (usually by yielding it).
.. versionadded:: 4.1
"""
f = Future()
IOLoop.current().call_later(duration, lambda: f.set_result(None))
return f
_null_future = Future()
_null_future.set_result(None)
moment = Future()
moment.__doc__ = \
"""A special object which may be yielded to allow the IOLoop to run for
one iteration.
This is not needed in normal use but it can be helpful in long-running
coroutines that are likely to yield Futures that are ready instantly.
Usage: ``yield gen.moment``
.. versionadded:: 4.0
"""
moment.set_result(None)
class Runner(object):
"""Internal implementation of `tornado.gen.engine`.
Maintains information about pending callbacks and their results.
The results of the generator are stored in ``result_future`` (a
`.TracebackFuture`)
"""
def __init__(self, gen, result_future, first_yielded):
self.gen = gen
self.result_future = result_future
self.future = _null_future
self.yield_point = None
self.pending_callbacks = None
self.results = None
self.running = False
self.finished = False
self.had_exception = False
self.io_loop = IOLoop.current()
# For efficiency, we do not create a stack context until we
# reach a YieldPoint (stack contexts are required for the historical
# semantics of YieldPoints, but not for Futures). When we have
# done so, this field will be set and must be called at the end
# of the coroutine.
self.stack_context_deactivate = None
if self.handle_yield(first_yielded):
self.run()
def register_callback(self, key):
"""Adds ``key`` to the list of callbacks."""
if self.pending_callbacks is None:
# Lazily initialize the old-style YieldPoint data structures.
self.pending_callbacks = set()
self.results = {}
if key in self.pending_callbacks:
raise KeyReuseError("key %r is already pending" % (key,))
self.pending_callbacks.add(key)
def is_ready(self, key):
"""Returns true if a result is available for ``key``."""
if self.pending_callbacks is None or key not in self.pending_callbacks:
raise UnknownKeyError("key %r is not pending" % (key,))
return key in self.results
def set_result(self, key, result):
"""Sets the result for ``key`` and attempts to resume the generator."""
self.results[key] = result
if self.yield_point is not None and self.yield_point.is_ready():
try:
self.future.set_result(self.yield_point.get_result())
except:
self.future.set_exc_info(sys.exc_info())
self.yield_point = None
self.run()
def pop_result(self, key):
"""Returns the result for ``key`` and unregisters it."""
self.pending_callbacks.remove(key)
return self.results.pop(key)
def run(self):
"""Starts or resumes the generator, running until it reaches a
yield point that is not ready.
"""
if self.running or self.finished:
return
try:
self.running = True
while True:
future = self.future
if not future.done():
return
self.future = None
try:
orig_stack_contexts = stack_context._state.contexts
try:
value = future.result()
except Exception:
self.had_exception = True
yielded = self.gen.throw(*sys.exc_info())
else:
yielded = self.gen.send(value)
if stack_context._state.contexts is not orig_stack_contexts:
self.gen.throw(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
self.finished = True
self.future = _null_future
if self.pending_callbacks and not self.had_exception:
# If we ran cleanly without waiting on all callbacks
# raise an error (really more of a warning). If we
# had an exception then some callbacks may have been
# orphaned, so skip the check in that case.
raise LeakedCallbackError(
"finished without waiting for callbacks %r" %
self.pending_callbacks)
self.result_future.set_result(getattr(e, 'value', None))
self.result_future = None
self._deactivate_stack_context()
return
except Exception:
self.finished = True
self.future = _null_future
self.result_future.set_exc_info(sys.exc_info())
self.result_future = None
self._deactivate_stack_context()
return
if not self.handle_yield(yielded):
return
finally:
self.running = False
def handle_yield(self, yielded):
# Lists containing YieldPoints require stack contexts;
# other lists are handled via multi_future in convert_yielded.
if (isinstance(yielded, list) and
any(isinstance(f, YieldPoint) for f in yielded)):
yielded = Multi(yielded)
elif (isinstance(yielded, dict) and
any(isinstance(f, YieldPoint) for f in yielded.values())):
yielded = Multi(yielded)
if isinstance(yielded, YieldPoint):
# YieldPoints are too closely coupled to the Runner to go
# through the generic convert_yielded mechanism.
self.future = TracebackFuture()
def start_yield_point():
try:
yielded.start(self)
if yielded.is_ready():
self.future.set_result(
yielded.get_result())
else:
self.yield_point = yielded
except Exception:
self.future = TracebackFuture()
self.future.set_exc_info(sys.exc_info())
if self.stack_context_deactivate is None:
# Start a stack context if this is the first
# YieldPoint we've seen.
with stack_context.ExceptionStackContext(
self.handle_exception) as deactivate:
self.stack_context_deactivate = deactivate
def cb():
start_yield_point()
self.run()
self.io_loop.add_callback(cb)
return False
else:
start_yield_point()
else:
try:
self.future = convert_yielded(yielded)
except BadYieldError:
self.future = TracebackFuture()
self.future.set_exc_info(sys.exc_info())
if not self.future.done() or self.future is moment:
self.io_loop.add_future(
self.future, lambda f: self.run())
return False
return True
def result_callback(self, key):
return stack_context.wrap(_argument_adapter(
functools.partial(self.set_result, key)))
def handle_exception(self, typ, value, tb):
if not self.running and not self.finished:
self.future = TracebackFuture()
self.future.set_exc_info((typ, value, tb))
self.run()
return True
else:
return False
def _deactivate_stack_context(self):
if self.stack_context_deactivate is not None:
self.stack_context_deactivate()
self.stack_context_deactivate = None
Arguments = collections.namedtuple('Arguments', ['args', 'kwargs'])
def _argument_adapter(callback):
"""Returns a function that when invoked runs ``callback`` with one arg.
If the function returned by this function is called with exactly
one argument, that argument is passed to ``callback``. Otherwise
the args tuple and kwargs dict are wrapped in an `Arguments` object.
"""
def wrapper(*args, **kwargs):
if kwargs or len(args) > 1:
callback(Arguments(args, kwargs))
elif args:
callback(args[0])
else:
callback(None)
return wrapper
def convert_yielded(yielded):
"""Convert a yielded object into a `.Future`.
The default implementation accepts lists, dictionaries, and Futures.
If the `~functools.singledispatch` library is available, this function
may be extended to support additional types. For example::
@convert_yielded.register(asyncio.Future)
def _(asyncio_future):
return tornado.platform.asyncio.to_tornado_future(asyncio_future)
.. versionadded:: 4.1
"""
# Lists and dicts containing YieldPoints were handled separately
# via Multi().
if isinstance(yielded, (list, dict)):
return multi_future(yielded)
elif is_future(yielded):
return yielded
else:
raise BadYieldError("yielded unknown object %r" % (yielded,))
if singledispatch is not None:
convert_yielded = singledispatch(convert_yielded)
|
gpl-3.0
|
KhalidGit/flask
|
Work/Trivia - Module 5/env/Lib/site-packages/setuptools/command/bdist_egg.py
|
286
|
18718
|
"""setuptools.command.bdist_egg
Build .egg distributions"""
# This module should be kept compatible with Python 2.3
import sys, os, marshal
from setuptools import Command
from distutils.dir_util import remove_tree, mkpath
try:
# Python 2.7 or >=3.2
from sysconfig import get_path, get_python_version
def _get_purelib():
return get_path("purelib")
except ImportError:
from distutils.sysconfig import get_python_lib, get_python_version
def _get_purelib():
return get_python_lib(False)
from distutils import log
from distutils.errors import DistutilsSetupError
from pkg_resources import get_build_platform, Distribution, ensure_directory
from pkg_resources import EntryPoint
from types import CodeType
from setuptools.compat import basestring, next
from setuptools.extension import Library
def strip_module(filename):
if '.' in filename:
filename = os.path.splitext(filename)[0]
if filename.endswith('module'):
filename = filename[:-6]
return filename
def write_stub(resource, pyfile):
f = open(pyfile,'w')
f.write('\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __loader__, __file__",
" import sys, pkg_resources, imp",
" __file__ = pkg_resources.resource_filename(__name__,%r)"
% resource,
" __loader__ = None; del __bootstrap__, __loader__",
" imp.load_dynamic(__name__,__file__)",
"__bootstrap__()",
"" # terminal \n
]))
f.close()
# stub __init__.py for packages distributed without one
NS_PKG_STUB = '__import__("pkg_resources").declare_namespace(__name__)'
class bdist_egg(Command):
description = "create an \"egg\" distribution"
user_options = [
('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_build_platform()),
('exclude-source-files', None,
"remove all .py files from the generated egg"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = [
'keep-temp', 'skip-build', 'exclude-source-files'
]
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.egg_output = None
self.exclude_source_files = None
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'egg')
if self.plat_name is None:
self.plat_name = get_build_platform()
self.set_undefined_options('bdist',('dist_dir', 'dist_dir'))
if self.egg_output is None:
# Compute filename of the output egg
basename = Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and self.plat_name
).egg_name()
self.egg_output = os.path.join(self.dist_dir, basename+'.egg')
def do_install_data(self):
# Hack for packages that install data to install's --install-lib
self.get_finalized_command('install').install_lib = self.bdist_dir
site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
old, self.distribution.data_files = self.distribution.data_files,[]
for item in old:
if isinstance(item,tuple) and len(item)==2:
if os.path.isabs(item[0]):
realpath = os.path.realpath(item[0])
normalized = os.path.normcase(realpath)
if normalized==site_packages or normalized.startswith(
site_packages+os.sep
):
item = realpath[len(site_packages)+1:], item[1]
# XXX else: raise ???
self.distribution.data_files.append(item)
try:
log.info("installing package data to %s" % self.bdist_dir)
self.call_command('install_data', force=0, root=None)
finally:
self.distribution.data_files = old
def get_outputs(self):
return [self.egg_output]
def call_command(self,cmdname,**kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname,self.bdist_dir)
kw.setdefault('skip_build',self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
def run(self):
# Generate metadata first
self.run_command("egg_info")
# We run install_lib before install_data, because some data hacks
# pull their data path from the install_lib command.
log.info("installing library code to %s" % self.bdist_dir)
instcmd = self.get_finalized_command('install')
old_root = instcmd.root; instcmd.root = None
if self.distribution.has_c_libraries() and not self.skip_build:
self.run_command('build_clib')
cmd = self.call_command('install_lib', warn_dir=0)
instcmd.root = old_root
all_outputs, ext_outputs = self.get_ext_outputs()
self.stubs = []
to_compile = []
for (p,ext_name) in enumerate(ext_outputs):
filename,ext = os.path.splitext(ext_name)
pyfile = os.path.join(self.bdist_dir, strip_module(filename)+'.py')
self.stubs.append(pyfile)
log.info("creating stub loader for %s" % ext_name)
if not self.dry_run:
write_stub(os.path.basename(ext_name), pyfile)
to_compile.append(pyfile)
ext_outputs[p] = ext_name.replace(os.sep,'/')
to_compile.extend(self.make_init_files())
if to_compile:
cmd.byte_compile(to_compile)
if self.distribution.data_files:
self.do_install_data()
# Make the EGG-INFO directory
archive_root = self.bdist_dir
egg_info = os.path.join(archive_root,'EGG-INFO')
self.mkpath(egg_info)
if self.distribution.scripts:
script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s" % script_dir)
self.call_command('install_scripts',install_dir=script_dir,no_ep=1)
self.copy_metadata_to(egg_info)
native_libs = os.path.join(egg_info, "native_libs.txt")
if all_outputs:
log.info("writing %s" % native_libs)
if not self.dry_run:
ensure_directory(native_libs)
libs_file = open(native_libs, 'wt')
libs_file.write('\n'.join(all_outputs))
libs_file.write('\n')
libs_file.close()
elif os.path.isfile(native_libs):
log.info("removing %s" % native_libs)
if not self.dry_run:
os.unlink(native_libs)
write_safety_flag(
os.path.join(archive_root,'EGG-INFO'), self.zip_safe()
)
if os.path.exists(os.path.join(self.egg_info,'depends.txt')):
log.warn(
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
if self.exclude_source_files:
self.zap_pyfiles()
# Make the archive
make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
dry_run=self.dry_run, mode=self.gen_header())
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution,'dist_files',[]).append(
('bdist_egg',get_python_version(),self.egg_output))
def zap_pyfiles(self):
log.info("Removing .py files from temporary directory")
for base,dirs,files in walk_egg(self.bdist_dir):
for name in files:
if name.endswith('.py'):
path = os.path.join(base,name)
log.debug("Deleting %s", path)
os.unlink(path)
def zip_safe(self):
safe = getattr(self.distribution,'zip_safe',None)
if safe is not None:
return safe
log.warn("zip_safe flag not set; analyzing archive contents...")
return analyze_egg(self.bdist_dir, self.stubs)
def make_init_files(self):
"""Create missing package __init__ files"""
init_files = []
for base,dirs,files in walk_egg(self.bdist_dir):
if base==self.bdist_dir:
# don't put an __init__ in the root
continue
for name in files:
if name.endswith('.py'):
if '__init__.py' not in files:
pkg = base[len(self.bdist_dir)+1:].replace(os.sep,'.')
if self.distribution.has_contents_for(pkg):
log.warn("Creating missing __init__.py for %s",pkg)
filename = os.path.join(base,'__init__.py')
if not self.dry_run:
f = open(filename,'w'); f.write(NS_PKG_STUB)
f.close()
init_files.append(filename)
break
else:
# not a package, don't traverse to subdirectories
dirs[:] = []
return init_files
def gen_header(self):
epm = EntryPoint.parse_map(self.distribution.entry_points or '')
ep = epm.get('setuptools.installation',{}).get('eggsecutable')
if ep is None:
return 'w' # not an eggsecutable, do it the usual way.
if not ep.attrs or ep.extras:
raise DistutilsSetupError(
"eggsecutable entry point (%r) cannot have 'extras' "
"or refer to a module" % (ep,)
)
pyver = sys.version[:3]
pkg = ep.module_name
full = '.'.join(ep.attrs)
base = ep.attrs[0]
basename = os.path.basename(self.egg_output)
header = (
"#!/bin/sh\n"
'if [ `basename $0` = "%(basename)s" ]\n'
'then exec python%(pyver)s -c "'
"import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
"from %(pkg)s import %(base)s; sys.exit(%(full)s())"
'" "$@"\n'
'else\n'
' echo $0 is not the correct name for this egg file.\n'
' echo Please rename it back to %(basename)s and try again.\n'
' exec false\n'
'fi\n'
) % locals()
if not self.dry_run:
mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
f = open(self.egg_output, 'w')
f.write(header)
f.close()
return 'a'
def copy_metadata_to(self, target_dir):
"Copy metadata (egg info) to the target_dir"
# normalize the path (so that a forward-slash in egg_info will
# match using startswith below)
norm_egg_info = os.path.normpath(self.egg_info)
prefix = os.path.join(norm_egg_info,'')
for path in self.ei_cmd.filelist.files:
if path.startswith(prefix):
target = os.path.join(target_dir, path[len(prefix):])
ensure_directory(target)
self.copy_file(path, target)
def get_ext_outputs(self):
"""Get a list of relative paths to C extensions in the output distro"""
all_outputs = []
ext_outputs = []
paths = {self.bdist_dir:''}
for base, dirs, files in os.walk(self.bdist_dir):
for filename in files:
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
all_outputs.append(paths[base]+filename)
for filename in dirs:
paths[os.path.join(base,filename)] = paths[base]+filename+'/'
if self.distribution.has_ext_modules():
build_cmd = self.get_finalized_command('build_ext')
for ext in build_cmd.extensions:
if isinstance(ext,Library):
continue
fullname = build_cmd.get_ext_fullname(ext.name)
filename = build_cmd.get_ext_filename(fullname)
if not os.path.basename(filename).startswith('dl-'):
if os.path.exists(os.path.join(self.bdist_dir,filename)):
ext_outputs.append(filename)
return all_outputs, ext_outputs
NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
def walk_egg(egg_dir):
"""Walk an unpacked egg's contents, skipping the metadata directory"""
walker = os.walk(egg_dir)
base,dirs,files = next(walker)
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base,dirs,files
for bdf in walker:
yield bdf
def analyze_egg(egg_dir, stubs):
# check for existing flag in EGG-INFO
for flag,fn in safety_flags.items():
if os.path.exists(os.path.join(egg_dir,'EGG-INFO',fn)):
return flag
if not can_scan(): return False
safe = True
for base, dirs, files in walk_egg(egg_dir):
for name in files:
if name.endswith('.py') or name.endswith('.pyw'):
continue
elif name.endswith('.pyc') or name.endswith('.pyo'):
# always scan, even if we already know we're not safe
safe = scan_module(egg_dir, base, name, stubs) and safe
return safe
def write_safety_flag(egg_dir, safe):
# Write or remove zip safety flag file(s)
for flag,fn in safety_flags.items():
fn = os.path.join(egg_dir, fn)
if os.path.exists(fn):
if safe is None or bool(safe) != flag:
os.unlink(fn)
elif safe is not None and bool(safe)==flag:
f=open(fn,'wt'); f.write('\n'); f.close()
safety_flags = {
True: 'zip-safe',
False: 'not-zip-safe',
}
def scan_module(egg_dir, base, name, stubs):
"""Check whether module possibly uses unsafe-for-zipfile stuff"""
filename = os.path.join(base,name)
if filename[:-1] in stubs:
return True # Extension module
pkg = base[len(egg_dir)+1:].replace(os.sep,'.')
module = pkg+(pkg and '.' or '')+os.path.splitext(name)[0]
if sys.version_info < (3, 3):
skip = 8 # skip magic & date
else:
skip = 12 # skip magic & date & file size
f = open(filename,'rb'); f.read(skip)
code = marshal.load(f); f.close()
safe = True
symbols = dict.fromkeys(iter_symbols(code))
for bad in ['__file__', '__path__']:
if bad in symbols:
log.warn("%s: module references %s", module, bad)
safe = False
if 'inspect' in symbols:
for bad in [
'getsource', 'getabsfile', 'getsourcefile', 'getfile'
'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
'getinnerframes', 'getouterframes', 'stack', 'trace'
]:
if bad in symbols:
log.warn("%s: module MAY be using inspect.%s", module, bad)
safe = False
if '__name__' in symbols and '__main__' in symbols and '.' not in module:
if sys.version[:3]=="2.4": # -m works w/zipfiles in 2.5
log.warn("%s: top-level module may be 'python -m' script", module)
safe = False
return safe
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names: yield name
for const in code.co_consts:
if isinstance(const,basestring):
yield const
elif isinstance(const,CodeType):
for name in iter_symbols(const):
yield name
def can_scan():
if not sys.platform.startswith('java') and sys.platform != 'cli':
# CPython, PyPy, etc.
return True
log.warn("Unable to analyze compiled code on this platform.")
log.warn("Please ask the author to include a 'zip_safe'"
" setting (either True or False) in the package's setup.py")
# Attribute names of options for commands that might need to be convinced to
# install to the egg build directory
INSTALL_DIRECTORY_ATTRS = [
'install_lib', 'install_dir', 'install_data', 'install_base'
]
def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=None,
mode='w'
):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
import zipfile
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
def visit(z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
p = path[len(base_dir)+1:]
if not dry_run:
z.write(path, p)
log.debug("adding '%s'" % p)
if compress is None:
compress = (sys.version>="2.4") # avoid 2.3 zipimport bug when 64 bits
compression = [zipfile.ZIP_STORED, zipfile.ZIP_DEFLATED][bool(compress)]
if not dry_run:
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in os.walk(base_dir):
visit(z, dirname, files)
z.close()
else:
for dirname, dirs, files in os.walk(base_dir):
visit(None, dirname, files)
return zip_filename
#
|
apache-2.0
|
trishnaguha/ansible
|
lib/ansible/modules/network/meraki/meraki_device.py
|
43
|
15823
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Kevin Breit (@kbreit) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: meraki_device
short_description: Manage devices in the Meraki cloud
version_added: "2.7"
description:
- Visibility into devices associated to a Meraki environment.
notes:
- This module does not support claiming of devices or licenses into a Meraki organization.
- More information about the Meraki API can be found at U(https://dashboard.meraki.com/api_docs).
- Some of the options are likely only used for developers within Meraki.
options:
state:
description:
- Query an organization.
choices: [absent, present, query]
default: query
org_name:
description:
- Name of organization.
- If C(clone) is specified, C(org_name) is the name of the new organization.
aliases: [ organization ]
org_id:
description:
- ID of organization.
net_name:
description:
- Name of a network.
aliases: [network]
net_id:
description:
- ID of a network.
serial:
description:
- Serial number of a device to query.
hostname:
description:
- Hostname of network device to search for.
aliases: [name]
model:
description:
- Model of network device to search for.
tags:
description:
- Space delimited list of tags to assign to device.
lat:
description:
- Latitude of device's geographic location.
- Use negative number for southern hemisphere.
aliases: [latitude]
lng:
description:
- Longitude of device's geographic location.
- Use negative number for western hemisphere.
aliases: [longitude]
address:
description:
- Postal address of device's location.
move_map_marker:
description:
- Whether or not to set the latitude and longitude of a device based on the new address.
- Only applies when C(lat) and C(lng) are not specified.
type: bool
serial_lldp_cdp:
description:
- Serial number of device to query LLDP/CDP information from.
lldp_cdp_timespan:
description:
- Timespan, in seconds, used to query LLDP and CDP information.
- Must be less than 1 month.
serial_uplink:
description:
- Serial number of device to query uplink information from.
author:
- Kevin Breit (@kbreit)
extends_documentation_fragment: meraki
'''
EXAMPLES = r'''
- name: Query all devices in an organization.
meraki_device:
auth_key: abc12345
org_name: YourOrg
state: query
delegate_to: localhost
- name: Query all devices in a network.
meraki_device:
auth_key: abc12345
org_name: YourOrg
net_name: YourNet
state: query
delegate_to: localhost
- name: Query a device by serial number.
meraki_device:
auth_key: abc12345
org_name: YourOrg
net_name: YourNet
serial: ABC-123
state: query
delegate_to: localhost
- name: Lookup uplink information about a device.
meraki_device:
auth_key: abc12345
org_name: YourOrg
net_name: YourNet
serial_uplink: ABC-123
state: query
delegate_to: localhost
- name: Lookup LLDP and CDP information about devices connected to specified device.
meraki_device:
auth_key: abc12345
org_name: YourOrg
net_name: YourNet
serial_lldp_cdp: ABC-123
state: query
delegate_to: localhost
- name: Lookup a device by hostname.
meraki_device:
auth_key: abc12345
org_name: YourOrg
net_name: YourNet
hostname: main-switch
state: query
delegate_to: localhost
- name: Query all devices of a specific model.
meraki_device:
auth_key: abc123
org_name: YourOrg
net_name: YourNet
model: MR26
state: query
delegate_to: localhost
- name: Update information about a device.
meraki_device:
auth_key: abc123
org_name: YourOrg
net_name: YourNet
state: present
serial: '{{serial}}'
name: mr26
address: 1060 W. Addison St., Chicago, IL
lat: 41.948038
lng: -87.65568
tags: recently-added
delegate_to: localhost
- name: Claim a deivce into a network.
meraki_device:
auth_key: abc123
org_name: YourOrg
net_name: YourNet
serial: ABC-123
state: present
delegate_to: localhost
- name: Remove a device from a network.
meraki_device:
auth_key: abc123
org_name: YourOrg
net_name: YourNet
serial: ABC-123
state: absent
delegate_to: localhost
'''
RETURN = r'''
response:
description: Data returned from Meraki dashboard.
type: dict
returned: info
'''
import os
from ansible.module_utils.basic import AnsibleModule, json, env_fallback
from ansible.module_utils._text import to_native
from ansible.module_utils.network.meraki.meraki import MerakiModule, meraki_argument_spec
def format_tags(tags):
return " {tags} ".format(tags=tags)
def is_device_valid(meraki, serial, data):
for device in data:
if device['serial'] == serial:
return True
return False
def get_org_devices(meraki, org_id):
path = meraki.construct_path('get_all_org', org_id=org_id)
response = meraki.request(path, method='GET')
if meraki.status != 200:
meraki.fail_json(msg='Failed to query all devices belonging to the organization')
return response
def main():
# define the available arguments/parameters that a user can pass to
# the module
argument_spec = meraki_argument_spec()
argument_spec.update(state=dict(type='str', choices=['absent', 'present', 'query'], default='query'),
net_name=dict(type='str', aliases=['network']),
net_id=dict(type='str'),
serial=dict(type='str'),
serial_uplink=dict(type='str'),
serial_lldp_cdp=dict(type='str'),
lldp_cdp_timespan=dict(type='int'),
hostname=dict(type='str', aliases=['name']),
model=dict(type='str'),
tags=dict(type='str'),
lat=dict(type='float', aliases=['latitude']),
lng=dict(type='float', aliases=['longitude']),
address=dict(type='str'),
move_map_marker=dict(type='bool'),
)
# seed the result dict in the object
# we primarily care about changed and state
# change is if this module effectively modified the target
# state will include any data that you want your module to pass back
# for consumption, for example, in a subsequent task
result = dict(
changed=False,
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
)
meraki = MerakiModule(module, function='device')
if meraki.params['serial_lldp_cdp'] and not meraki.params['lldp_cdp_timespan']:
meraki.fail_json(msg='lldp_cdp_timespan is required when querying LLDP and CDP information')
if meraki.params['net_name'] and meraki.params['net_id']:
meraki.fail_json(msg='net_name and net_id are mutually exclusive')
meraki.params['follow_redirects'] = 'all'
query_urls = {'device': '/networks/{net_id}/devices'}
query_org_urls = {'device': '/organizations/{org_id}/inventory'}
query_device_urls = {'device': '/networks/{net_id}/devices/'}
claim_device_urls = {'device': '/networks/{net_id}/devices/claim'}
bind_org_urls = {'device': '/organizations/{org_id}/claim'}
update_device_urls = {'device': '/networks/{net_id}/devices/'}
delete_device_urls = {'device': '/networks/{net_id}/devices/'}
meraki.url_catalog['get_all'].update(query_urls)
meraki.url_catalog['get_all_org'] = query_org_urls
meraki.url_catalog['get_device'] = query_device_urls
meraki.url_catalog['create'] = claim_device_urls
meraki.url_catalog['bind_org'] = bind_org_urls
meraki.url_catalog['update'] = update_device_urls
meraki.url_catalog['delete'] = delete_device_urls
payload = None
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
# FIXME: Work with Meraki so they can implement a check mode
if module.check_mode:
meraki.exit_json(**meraki.result)
# execute checks for argument completeness
# manipulate or modify the state as needed (this is going to be the
# part where your module will do what it needs to do)
org_id = meraki.params['org_id']
if org_id is None:
org_id = meraki.get_org_id(meraki.params['org_name'])
nets = meraki.get_nets(org_id=org_id)
net_id = None
if meraki.params['net_id'] or meraki.params['net_name']:
net_id = meraki.params['net_id']
if net_id is None:
net_id = meraki.get_net_id(net_name=meraki.params['net_name'], data=nets)
if meraki.params['state'] == 'query':
if meraki.params['net_name'] or meraki.params['net_id']:
device = []
if meraki.params['serial']:
path = meraki.construct_path('get_device', net_id=net_id) + meraki.params['serial']
request = meraki.request(path, method='GET')
device.append(request)
meraki.result['data'] = device
elif meraki.params['serial_uplink']:
path = meraki.construct_path('get_device', net_id=net_id) + meraki.params['serial_uplink'] + '/uplink'
meraki.result['data'] = (meraki.request(path, method='GET'))
elif meraki.params['serial_lldp_cdp']:
if meraki.params['lldp_cdp_timespan'] > 2592000:
meraki.fail_json(msg='LLDP/CDP timespan must be less than a month (2592000 seconds)')
path = meraki.construct_path('get_device', net_id=net_id) + meraki.params['serial_lldp_cdp'] + '/lldp_cdp'
path = path + '?timespan=' + str(meraki.params['lldp_cdp_timespan'])
device.append(meraki.request(path, method='GET'))
meraki.result['data'] = device
elif meraki.params['hostname']:
path = meraki.construct_path('get_all', net_id=net_id)
devices = meraki.request(path, method='GET')
for unit in devices:
if unit['name'] == meraki.params['hostname']:
device.append(unit)
meraki.result['data'] = device
elif meraki.params['model']:
path = meraki.construct_path('get_all', net_id=net_id)
devices = meraki.request(path, method='GET')
device_match = []
for device in devices:
if device['model'] == meraki.params['model']:
device_match.append(device)
meraki.result['data'] = device_match
else:
path = meraki.construct_path('get_all', net_id=net_id)
request = meraki.request(path, method='GET')
meraki.result['data'] = request
else:
path = meraki.construct_path('get_all_org', org_id=org_id)
devices = meraki.request(path, method='GET')
if meraki.params['serial']:
for device in devices:
if device['serial'] == meraki.params['serial']:
meraki.result['data'] = device
else:
meraki.result['data'] = devices
elif meraki.params['state'] == 'present':
device = []
if meraki.params['hostname']:
query_path = meraki.construct_path('get_all', net_id=net_id)
device_list = meraki.request(query_path, method='GET')
if is_device_valid(meraki, meraki.params['serial'], device_list):
payload = {'name': meraki.params['hostname'],
'tags': format_tags(meraki.params['tags']),
'lat': meraki.params['lat'],
'lng': meraki.params['lng'],
'address': meraki.params['address'],
'moveMapMarker': meraki.params['move_map_marker'],
}
query_path = meraki.construct_path('get_device', net_id=net_id) + meraki.params['serial']
device_data = meraki.request(query_path, method='GET')
ignore_keys = ['lanIp', 'serial', 'mac', 'model', 'networkId', 'moveMapMarker', 'wan1Ip', 'wan2Ip']
if meraki.is_update_required(device_data, payload, optional_ignore=ignore_keys):
path = meraki.construct_path('update', net_id=net_id) + meraki.params['serial']
updated_device = []
updated_device.append(meraki.request(path, method='PUT', payload=json.dumps(payload)))
meraki.result['data'] = updated_device
meraki.result['changed'] = True
else:
if net_id is None:
device_list = get_org_devices(meraki, org_id)
if is_device_valid(meraki, meraki.params['serial'], device_list) is False:
payload = {'serial': meraki.params['serial']}
path = meraki.construct_path('bind_org', org_id=org_id)
created_device = []
created_device.append(meraki.request(path, method='POST', payload=json.dumps(payload)))
meraki.result['data'] = created_device
meraki.result['changed'] = True
else:
query_path = meraki.construct_path('get_all', net_id=net_id)
device_list = meraki.request(query_path, method='GET')
if is_device_valid(meraki, meraki.params['serial'], device_list) is False:
if net_id:
payload = {'serial': meraki.params['serial']}
path = meraki.construct_path('create', net_id=net_id)
created_device = []
created_device.append(meraki.request(path, method='POST', payload=json.dumps(payload)))
meraki.result['data'] = created_device
meraki.result['changed'] = True
elif meraki.params['state'] == 'absent':
device = []
query_path = meraki.construct_path('get_all', net_id=net_id)
device_list = meraki.request(query_path, method='GET')
if is_device_valid(meraki, meraki.params['serial'], device_list) is True:
path = meraki.construct_path('delete', net_id=net_id)
path = path + meraki.params['serial'] + '/remove'
request = meraki.request(path, method='POST')
meraki.result['changed'] = True
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
meraki.exit_json(**meraki.result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
aebrahim/cobrapy
|
cobra/solvers/esolver.py
|
5
|
6286
|
from subprocess import check_output, check_call, CalledProcessError
from os import unlink, devnull
from os.path import isfile
from tempfile import NamedTemporaryFile
from fractions import Fraction
from six.moves import zip
from . import cglpk
from .wrappers import *
# detect paths to system calls for esolver and gzip
with open(devnull, "w") as DEVNULL:
try:
ESOLVER_COMMAND = check_output(["which", "esolver"],
stderr=DEVNULL).strip()
__esolver_version__ = check_output(["esolver", "-v"], stderr=DEVNULL)
except CalledProcessError:
raise RuntimeError("esolver command not found")
try:
GZIP_COMMAND = check_output(["which", "gzip"], stderr=DEVNULL).strip()
except CalledProcessError:
raise RuntimeError("gzip command not found")
del DEVNULL
solver_name = "esolver"
class Esolver(cglpk.GLP):
"""contain an LP which will be solved through the QSopt_ex
The LP is stored using a GLPK object, and written out to an
LP file which is then solved by the esolver command."""
def __init__(self, cobra_model=None):
cglpk.GLP.__init__(self, cobra_model)
self.solution_filepath = None
self.basis_filepath = None
self.rational_solution = False
self.verbose = False
self.clean_up = True # clean up files
def _clean(self, filename):
"""remove old files"""
if self.clean_up and filename is not None and isfile(filename):
unlink(filename)
def set_parameter(self, parameter_name, value):
if parameter_name == "GLP":
raise Exception("can not be set this way")
if parameter_name == "objective_sense":
self.set_objective_sense(value)
if not hasattr(self, parameter_name):
raise ValueError("Unkonwn parameter '%s'" % parameter_name)
setattr(self, parameter_name, value)
def solve_problem(self, **solver_parameters):
if "objective_sense" in solver_parameters:
self.set_objective_sense(solver_parameters.pop("objective_sense"))
for key, value in solver_parameters.items():
self.set_parameter(key, value)
# remove the old solution file
self._clean(self.solution_filepath)
with NamedTemporaryFile(suffix=".lp", delete=False) as f:
lp_filepath = f.name
self.write(lp_filepath)
existing_basis = self.basis_filepath
with NamedTemporaryFile(suffix=".bas", delete=False) as f:
self.basis_filepath = f.name
with NamedTemporaryFile(suffix=".sol") as f:
self.solution_filepath = f.name
command = [ESOLVER_COMMAND, "-b", self.basis_filepath,
"-O", self.solution_filepath[:-4]]
if existing_basis is not None and isfile(existing_basis):
command.extend(["-B", existing_basis])
command.extend(["-L", lp_filepath])
command_kwargs = {}
if self.verbose:
print(" ".join(command))
DEVNULL = None
else:
DEVNULL = open(devnull, 'wb')
command_kwargs["stdout"] = DEVNULL
command_kwargs["stderr"] = DEVNULL
try:
check_call(command, **command_kwargs)
failed = False
except CalledProcessError as e:
failed = True
if failed:
self.basis_filepath = existing_basis
existing_basis = None
# Sometimes on failure a solution isn't written out
if not isfile(self.solution_filepath):
with open(self.solution_filepath, "w") as outfile:
outfile.write("=infeasible\n")
elif isfile(self.solution_filepath + ".gz"):
# the solution may be written out compressed
check_call([GZIP_COMMAND, "-d", self.solution_filepath + ".gz"])
if DEVNULL is not None:
DEVNULL.close()
self._clean(lp_filepath)
self._clean(existing_basis) # replaced with the new basis
def get_status(self):
with open(self.solution_filepath) as infile:
return infile.readline().split("=")[1].strip().lower()
def _format(self, value):
"""convert a string value into either a fraction or float"""
value = Fraction(value)
return value if self.rational_solution else float(value)
def get_objective_value(self):
with open(self.solution_filepath) as infile:
status = infile.readline().split("=")[1].strip().lower()
if status != "optimal":
raise RuntimeError("status not optimal")
infile.readline()
return self._format(infile.readline().split("=")[1].strip())
def format_solution(self, cobra_model):
m = cobra_model
solution = m.solution.__class__(None)
with open(self.solution_filepath) as infile:
solution.status = infile.readline().split("=")[1].strip().lower()
if solution.status != "optimal":
return solution
infile.readline()
solution.f = self._format(Fraction(infile.readline()
.split("=")[1].strip()))
infile.readline()
value_dict = {}
for line in infile:
if line.endswith(":\n"):
break
varname, value = line.split("=")
value_dict[varname.strip()] = self._format(value.strip())
dual_dict = {}
for line in infile:
if line.endswith(":\n"):
break
varname, value = line.split("=")
dual_dict[varname.strip()] = self._format(value.strip())
solution.x = [value_dict.get("x_%d" % (i + 1), 0)
for i in range(len(m.reactions))]
solution.x_dict = {r.id: v for r, v in zip(m.reactions, solution.x)}
solution.y = [dual_dict.get("r_%d" % (i + 1), 0)
for i in range(len(m.metabolites))]
solution.y_dict = {m.id: v for m, v in zip(m.metabolites, solution.y)}
return solution
# wrappers for the classmethods at the module level
create_problem = Esolver.create_problem
solve = Esolver.solve
|
lgpl-2.1
|
shawnadelic/shuup
|
shuup/front/apps/auth/urls.py
|
2
|
1132
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django.conf.urls import patterns, url
from .views import (
LoginView, LogoutView, RecoverPasswordCompleteView,
RecoverPasswordConfirmView, RecoverPasswordSentView, RecoverPasswordView
)
urlpatterns = patterns(
'',
url(r'^login/$',
LoginView.as_view(),
name='login'),
url(r'^logout/$',
LogoutView.as_view(),
name='logout'),
url(r'^recover-password/$',
RecoverPasswordView.as_view(),
name='recover_password'),
url(r'^recover-password/(?P<uidb64>.+)/(?P<token>.+)/$',
RecoverPasswordConfirmView.as_view(),
name='recover_password_confirm'),
url(r'^recover-password/sent/$',
RecoverPasswordSentView.as_view(),
name='recover_password_sent'),
url(r'^recover-password/complete/$',
RecoverPasswordCompleteView.as_view(),
name='recover_password_complete'),
)
|
agpl-3.0
|
tuxcoindev/tuxcoin
|
contrib/pyminer/pyminer.py
|
2
|
6435
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 10337
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
mit
|
seana7a7/node-socket.io-mongodb-example
|
node_modules/socket.io/node_modules/engine.io/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py
|
2214
|
1347
|
#!/usr/bin/env python
import re
import json
# http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
|
mit
|
louietsai/python-for-android
|
python-modules/twisted/twisted/test/test_persisted.py
|
60
|
8648
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
# System Imports
import sys
from twisted.trial import unittest
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import cStringIO as StringIO
except ImportError:
import StringIO
# Twisted Imports
from twisted.persisted import styles, aot, crefutil
class VersionTestCase(unittest.TestCase):
def testNullVersionUpgrade(self):
global NullVersioned
class NullVersioned:
ok = 0
pkcl = pickle.dumps(NullVersioned())
class NullVersioned(styles.Versioned):
persistenceVersion = 1
def upgradeToVersion1(self):
self.ok = 1
mnv = pickle.loads(pkcl)
styles.doUpgrade()
assert mnv.ok, "initial upgrade not run!"
def testVersionUpgrade(self):
global MyVersioned
class MyVersioned(styles.Versioned):
persistenceVersion = 2
persistenceForgets = ['garbagedata']
v3 = 0
v4 = 0
def __init__(self):
self.somedata = 'xxx'
self.garbagedata = lambda q: 'cant persist'
def upgradeToVersion3(self):
self.v3 += 1
def upgradeToVersion4(self):
self.v4 += 1
mv = MyVersioned()
assert not (mv.v3 or mv.v4), "hasn't been upgraded yet"
pickl = pickle.dumps(mv)
MyVersioned.persistenceVersion = 4
obj = pickle.loads(pickl)
styles.doUpgrade()
assert obj.v3, "didn't do version 3 upgrade"
assert obj.v4, "didn't do version 4 upgrade"
pickl = pickle.dumps(obj)
obj = pickle.loads(pickl)
styles.doUpgrade()
assert obj.v3 == 1, "upgraded unnecessarily"
assert obj.v4 == 1, "upgraded unnecessarily"
def testNonIdentityHash(self):
global ClassWithCustomHash
class ClassWithCustomHash(styles.Versioned):
def __init__(self, unique, hash):
self.unique = unique
self.hash = hash
def __hash__(self):
return self.hash
v1 = ClassWithCustomHash('v1', 0)
v2 = ClassWithCustomHash('v2', 0)
pkl = pickle.dumps((v1, v2))
del v1, v2
ClassWithCustomHash.persistenceVersion = 1
ClassWithCustomHash.upgradeToVersion1 = lambda self: setattr(self, 'upgraded', True)
v1, v2 = pickle.loads(pkl)
styles.doUpgrade()
self.assertEquals(v1.unique, 'v1')
self.assertEquals(v2.unique, 'v2')
self.failUnless(v1.upgraded)
self.failUnless(v2.upgraded)
def testUpgradeDeserializesObjectsRequiringUpgrade(self):
global ToyClassA, ToyClassB
class ToyClassA(styles.Versioned):
pass
class ToyClassB(styles.Versioned):
pass
x = ToyClassA()
y = ToyClassB()
pklA, pklB = pickle.dumps(x), pickle.dumps(y)
del x, y
ToyClassA.persistenceVersion = 1
def upgradeToVersion1(self):
self.y = pickle.loads(pklB)
styles.doUpgrade()
ToyClassA.upgradeToVersion1 = upgradeToVersion1
ToyClassB.persistenceVersion = 1
ToyClassB.upgradeToVersion1 = lambda self: setattr(self, 'upgraded', True)
x = pickle.loads(pklA)
styles.doUpgrade()
self.failUnless(x.y.upgraded)
class MyEphemeral(styles.Ephemeral):
def __init__(self, x):
self.x = x
class EphemeralTestCase(unittest.TestCase):
def testEphemeral(self):
o = MyEphemeral(3)
self.assertEquals(o.__class__, MyEphemeral)
self.assertEquals(o.x, 3)
pickl = pickle.dumps(o)
o = pickle.loads(pickl)
self.assertEquals(o.__class__, styles.Ephemeral)
self.assert_(not hasattr(o, 'x'))
class Pickleable:
def __init__(self, x):
self.x = x
def getX(self):
return self.x
class A:
"""
dummy class
"""
def amethod(self):
pass
class B:
"""
dummy class
"""
def bmethod(self):
pass
def funktion():
pass
class PicklingTestCase(unittest.TestCase):
"""Test pickling of extra object types."""
def testModule(self):
pickl = pickle.dumps(styles)
o = pickle.loads(pickl)
self.assertEquals(o, styles)
def testClassMethod(self):
pickl = pickle.dumps(Pickleable.getX)
o = pickle.loads(pickl)
self.assertEquals(o, Pickleable.getX)
def testInstanceMethod(self):
obj = Pickleable(4)
pickl = pickle.dumps(obj.getX)
o = pickle.loads(pickl)
self.assertEquals(o(), 4)
self.assertEquals(type(o), type(obj.getX))
def testStringIO(self):
f = StringIO.StringIO()
f.write("abc")
pickl = pickle.dumps(f)
o = pickle.loads(pickl)
self.assertEquals(type(o), type(f))
self.assertEquals(f.getvalue(), "abc")
class EvilSourceror:
def __init__(self, x):
self.a = self
self.a.b = self
self.a.b.c = x
class NonDictState:
def __getstate__(self):
return self.state
def __setstate__(self, state):
self.state = state
class AOTTestCase(unittest.TestCase):
def testSimpleTypes(self):
obj = (1, 2.0, 3j, True, slice(1, 2, 3), 'hello', u'world', sys.maxint + 1, None, Ellipsis)
rtObj = aot.unjellyFromSource(aot.jellyToSource(obj))
self.assertEquals(obj, rtObj)
def testMethodSelfIdentity(self):
a = A()
b = B()
a.bmethod = b.bmethod
b.a = a
im_ = aot.unjellyFromSource(aot.jellyToSource(b)).a.bmethod
self.assertEquals(im_.im_class, im_.im_self.__class__)
def test_methodNotSelfIdentity(self):
"""
If a class change after an instance has been created,
L{aot.unjellyFromSource} shoud raise a C{TypeError} when trying to
unjelly the instance.
"""
a = A()
b = B()
a.bmethod = b.bmethod
b.a = a
savedbmethod = B.bmethod
del B.bmethod
try:
self.assertRaises(TypeError, aot.unjellyFromSource,
aot.jellyToSource(b))
finally:
B.bmethod = savedbmethod
def test_unsupportedType(self):
"""
L{aot.jellyToSource} should raise a C{TypeError} when trying to jelly
an unknown type.
"""
try:
set
except:
from sets import Set as set
self.assertRaises(TypeError, aot.jellyToSource, set())
def testBasicIdentity(self):
# Anyone wanting to make this datastructure more complex, and thus this
# test more comprehensive, is welcome to do so.
aj = aot.AOTJellier().jellyToAO
d = {'hello': 'world', "method": aj}
l = [1, 2, 3,
"he\tllo\n\n\"x world!",
u"goodbye \n\t\u1010 world!",
1, 1.0, 100 ** 100l, unittest, aot.AOTJellier, d,
funktion
]
t = tuple(l)
l.append(l)
l.append(t)
l.append(t)
uj = aot.unjellyFromSource(aot.jellyToSource([l, l]))
assert uj[0] is uj[1]
assert uj[1][0:5] == l[0:5]
def testNonDictState(self):
a = NonDictState()
a.state = "meringue!"
assert aot.unjellyFromSource(aot.jellyToSource(a)).state == a.state
def testCopyReg(self):
s = "foo_bar"
sio = StringIO.StringIO()
sio.write(s)
uj = aot.unjellyFromSource(aot.jellyToSource(sio))
# print repr(uj.__dict__)
assert uj.getvalue() == s
def testFunkyReferences(self):
o = EvilSourceror(EvilSourceror([]))
j1 = aot.jellyToAOT(o)
oj = aot.unjellyFromAOT(j1)
assert oj.a is oj
assert oj.a.b is oj.b
assert oj.c is not oj.c.c
class CrefUtilTestCase(unittest.TestCase):
"""
Tests for L{crefutil}.
"""
def test_dictUnknownKey(self):
"""
L{crefutil._DictKeyAndValue} only support keys C{0} and C{1}.
"""
d = crefutil._DictKeyAndValue({})
self.assertRaises(RuntimeError, d.__setitem__, 2, 3)
def test_deferSetMultipleTimes(self):
"""
L{crefutil._Defer} can be assigned a key only one time.
"""
d = crefutil._Defer()
d[0] = 1
self.assertRaises(RuntimeError, d.__setitem__, 0, 1)
testCases = [VersionTestCase, EphemeralTestCase, PicklingTestCase]
|
apache-2.0
|
ContinuumIO/ashiba
|
enaml/enaml/wx/wx_spin_box.py
|
1
|
16857
|
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
import wx
import wx.lib.newevent
from atom.api import Int, Typed
from enaml.widgets.spin_box import ProxySpinBox
from .wx_control import WxControl
#: The changed event for the custom spin box
wxSpinBoxEvent, EVT_SPIN_BOX = wx.lib.newevent.NewEvent()
class wxProperSpinBox(wx.SpinCtrl):
""" A custom wx spin control that acts more like QSpinBox.
The standard wx.SpinCtrl doesn't support too many features, and
the ones it does support are (like wrapping) are limited. So,
this custom control hard codes the internal range to the maximum
range of the wx.SpinCtrl and implements wrapping manually.
For changed events, users should bind to EVT_SPIN_BOX rather than
EVT_SPINCTRL.
See the method docstrings for supported functionality.
This control is really a god awful hack and needs to be rewritten
using a combination wx.SpinButton and wx.TextCtrl.
"""
def __init__(self, *args, **kwargs):
""" CustomSpinCtrl constructor.
Parameters
----------
*args, **kwargs
The positional and keyword arguments for initializing a
wx.SpinCtrl.
"""
# The max range of the wx.SpinCtrl is the range of a signed
# 32bit integer. We don't care about wx's internal value of
# the control, since we maintain our own internal counter.
# and because the internal value of the widget gets reset to
# the minimum of the range whenever SetValueString is called.
self._hard_min = -(1 << 31)
self._hard_max = (1 << 31) - 1
self._internal_value = 0
self._low = 0
self._high = 100
self._step = 1
self._prefix = u''
self._suffix = u''
self._special_value_text = u''
self._value_string = unicode(self._low)
self._wrap = False
self._read_only = False
# Stores whether spin-up or spin-down was pressed.
self._spin_state = None
super(wxProperSpinBox, self).__init__(*args, **kwargs)
super(wxProperSpinBox, self).SetRange(self._hard_min, self._hard_max)
# Setting the spin control to process the enter key removes
# its processing of the Tab key. This is desired for two reasons:
# 1) It is consistent with the Qt version of the control.
# 2) The default tab processing is kinda wacky in that when
# tab is pressed, it emits a text event with the string
# representation of the integer value of the control,
# regardless of the value of the user supplied string.
# This is definitely not correct and so processing on
# Enter allows us to avoid the issue entirely.
self.WindowStyle |= wx.TE_PROCESS_ENTER
self.Bind(wx.EVT_SPIN_UP, self.OnSpinUp)
self.Bind(wx.EVT_SPIN_DOWN, self.OnSpinDown)
self.Bind(wx.EVT_SPINCTRL, self.OnSpinCtrl)
self.Bind(wx.EVT_TEXT, self.OnText)
self.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
self.Bind(wx.EVT_TEXT_ENTER, self.OnEnterPressed)
#--------------------------------------------------------------------------
# Event Handlers
#--------------------------------------------------------------------------
def OnEnterPressed(self, event):
""" The event handler for an enter key press. It forces an
interpretation of the current text control value.
"""
self.InterpretText()
def OnKillFocus(self, event):
""" Handles evaluating the text in the control when the control
loses focus.
"""
# The spin control doesn't emit a spin event when losing focus
# to process typed input change unless it results in a different
# value, so we have to handle it manually and update the control
# again after the event. It must be invoked on a CallAfter or it
# doesn't work properly. The lambda avoids a DeadObjectError if
# the app is exited before the callback executes.
wx.CallAfter(lambda: self.InterpretText() if self else None)
def OnText(self, event):
""" Handles the text event of the spin control to store away the
user typed text for later conversion.
"""
if self._read_only:
return
# Do not be tempted to try to implement the 'tracking' feature
# by adding logic to this method. Wx emits this event at weird
# times such as ctrl-a select all as well as when SetValueString
# is called. Granted, this can be avoided with a recursion guard,
# however, there is no way to get/set the caret position on the
# control and every call to SetValueString resets the caret
# position to Zero. So, there is really no possible way to
# implement 'tracking' without creating an entirely new custom
# control. So for now, the wx backend just lacks that feature.
self._value_string = event.GetString()
def OnSpinUp(self, event):
""" The event handler for the spin up event. We veto the spin
event to prevent the control from changing it's internal value.
Instead, we maintain complete control of the value.
"""
event.Veto()
if self._read_only:
return
self._spin_state = 'up'
self.OnSpinCtrl(event)
self._spin_state = None
def OnSpinDown(self, event):
""" The event handler for the spin down event. We veto the spin
event to prevent the control from changing it's internal value.
Instead, we maintain complete control of the value.
"""
event.Veto()
if self._read_only:
return
self._spin_state = 'down'
self.OnSpinCtrl(event)
self._spin_state = None
def OnSpinCtrl(self, event):
""" Handles the spin control being changed by user interaction.
All of the manual stepping and wrapping logic is computed by
this method.
"""
if self._read_only:
return
last = self._internal_value
low = self._low
high = self._high
step = self._step
wrap = self._wrap
spin_state = self._spin_state
if spin_state == 'down':
if last == low:
if wrap:
computed = high
else:
computed = low
else:
computed = last - step
if computed < low:
computed = low
self.SetValue(computed)
elif spin_state == 'up':
if last == high:
if wrap:
computed = low
else:
computed = high
else:
computed = last + step
if computed > high:
computed = high
self.SetValue(computed)
else:
# A suprious spin event generated by wx when the widget loses
# focus. We can safetly ignore it.
pass
#--------------------------------------------------------------------------
# Getters/Setters
#--------------------------------------------------------------------------
def GetLow(self):
""" Returns the minimum value of the control.
"""
return self._low
def GetMin(self):
""" Equivalent to GetLow().
"""
return self._low
def SetLow(self, low):
""" Sets the minimum value of the control and changes the
value to the min if the current value would be out of range.
"""
if low < self._hard_min:
raise ValueError('%s is too low for wxProperSpinBox.' % low)
self._low = low
if self.GetValue() < low:
self.SetValue(low)
def GetHigh(self):
""" Returns the maximum value of the control.
"""
return self._high
def GetMax(self):
""" Equivalent to GetHigh().
"""
return self._high
def SetHigh(self, high):
""" Sets the maximum value of the control and changes the
value to the max if the current value would be out of range.
"""
if high > self._hard_max:
raise ValueError('%s is too high for wxProperSpinBox.' % high)
self._high = high
if self.GetValue() > high:
self.SetValue(high)
def SetRange(self, low, high):
""" Sets the low and high values of the control.
"""
self.SetLow(low)
self.SetHigh(high)
def GetStep(self):
""" Returns the step size of the control.
"""
return self._step
def SetStep(self, step):
""" Sets the step size of the control.
"""
self._step = step
def GetWrap(self):
""" Gets the wrap flag of the control.
"""
return self._wrap
def SetWrap(self, wrap):
""" Sets the wrap flag of the control.
"""
self._wrap = wrap
def GetPrefix(self):
""" Get the prefix text for the control.
Returns
-------
result : unicode
The unicode prefix text.
"""
return self._prefix
def SetPrefix(self, prefix):
""" Set the prefix text for the control.
Parameters
----------
prefix : unicode
The unicode prefix text for the control.
"""
self._prefix = prefix
def GetSuffix(self):
""" Get the suffix text for the control.
Returns
-------
result : unicode
The unicode suffix text.
"""
return self._suffix
def SetSuffix(self, suffix):
""" Set the suffix text for the control.
Parameters
----------
suffix : unicode
The unicode suffix text for the control.
"""
self._suffix = suffix
def GetSpecialValueText(self):
""" Returns the special value text for the spin box.
Returns
-------
result : unicode
The unicode special value text.
"""
return self._special_value_text
def SetSpecialValueText(self, text):
""" Set the special value text for the control.
Parameters
----------
text : unicode
The unicode special value text for the control.
"""
self._special_value_text = text
def GetReadOnly(self):
""" Get the read only flag for the control.
Returns
-------
result : bool
True if the control is read only, False otherwise.
"""
return self._suffix
def SetReadOnly(self, read_only):
""" Set the read only flag for the control
Parameters
----------
read_only : bool
True if the control should be read only, False otherwise.
"""
self._read_only = read_only
def GetValue(self):
""" Returns the internal integer value of the control.
"""
return self._internal_value
def SetValue(self, value):
""" Sets the value of the control to the given value, provided
that the value is within the range of the control. If the
given value is within range, and is different from the current
value of the control, an EVT_SPIN_BOX will be emitted.
"""
different = False
if self._low <= value <= self._high:
different = (self._internal_value != value)
self._internal_value = value
# Always set the value string, just to be overly
# safe that we don't fall out of sync.
self._value_string = self.TextFromValue(self._internal_value)
self.SetValueString(self._value_string)
if different:
evt = wxSpinBoxEvent()
wx.PostEvent(self, evt)
#--------------------------------------------------------------------------
# Support Methods
#--------------------------------------------------------------------------
def InterpretText(self):
""" Interprets the user supplied text and updates the control.
"""
prefix = self._prefix
suffix = self._suffix
svt = self._special_value_text
text = self._value_string
if svt and text == svt:
self.SetValue(self._low)
return
if prefix and text.startswith(prefix):
text = text[len(prefix):]
if suffix and text.endswith(suffix):
text = text[:-len(suffix)]
try:
value = int(text)
except ValueError:
value = self._internal_value
self.SetValue(value)
def TextFromValue(self, value):
""" Converts the given integer to a string for display.
"""
prefix = self._prefix
suffix = self._suffix
svt = self._special_value_text
if value == self._low and svt:
return svt
text = unicode(value)
if prefix:
text = '%s%s' % (prefix, text)
if suffix:
text = '%s%s' % (text, suffix)
return text
#: Cyclic guard flag
VALUE_FLAG = 0x1
class WxSpinBox(WxControl, ProxySpinBox):
""" A Wx implementation of an Enaml ProxySpinBox.
"""
#: A reference to the widget created by the proxy.
widget = Typed(wxProperSpinBox)
#: Cyclic guard flags
_guard = Int(0)
#--------------------------------------------------------------------------
# Initialization API
#--------------------------------------------------------------------------
def create_widget(self):
""" Create the underlying wxProperSpinBox widget.
"""
self.widget = wxProperSpinBox(self.parent_widget())
def init_widget(self, ):
""" Create and initialize the slider control.
"""
super(WxSpinBox, self).init_widget()
d = self.declaration
self.set_maximum(d.maximum)
self.set_minimum(d.minimum)
self.set_value(d.value)
self.set_prefix(d.prefix)
self.set_suffix(d.suffix)
self.set_special_value_text(d.special_value_text)
self.set_single_step(d.single_step)
self.set_read_only(d.read_only)
self.set_wrapping(d.wrapping)
self.widget.Bind(EVT_SPIN_BOX, self.on_value_changed)
#--------------------------------------------------------------------------
# Event Handlers
#--------------------------------------------------------------------------
def on_value_changed(self, event):
""" The event handler for the 'EVT_SPIN_BOX' event.
"""
if not self._guard & VALUE_FLAG:
self._guard |= VALUE_FLAG
try:
self.declaration.value = self.widget.GetValue()
finally:
self._guard &= ~VALUE_FLAG
#--------------------------------------------------------------------------
# ProxySpinBox API
#--------------------------------------------------------------------------
def set_maximum(self, maximum):
""" Set the widget's maximum value.
"""
self.widget.SetHigh(maximum)
def set_minimum(self, minimum):
""" Set the widget's minimum value.
"""
self.widget.SetLow(minimum)
def set_value(self, value):
""" Set the spin box's value.
"""
if not self._guard & VALUE_FLAG:
self._guard |= VALUE_FLAG
try:
self.widget.SetValue(value)
finally:
self._guard &= ~VALUE_FLAG
def set_prefix(self, prefix):
""" Set the prefix for the spin box.
"""
self.widget.SetPrefix(prefix)
def set_suffix(self, suffix):
""" Set the suffix for the spin box.
"""
self.widget.SetSuffix(suffix)
def set_special_value_text(self, text):
""" Set the special value text for the spin box.
"""
self.widget.SetSpecialValueText(text)
def set_single_step(self, step):
""" Set the widget's single step value.
"""
self.widget.SetStep(step)
def set_read_only(self, read_only):
""" Set the widget's read only flag.
"""
self.widget.SetReadOnly(read_only)
def set_wrapping(self, wrapping):
""" Set the widget's wrapping flag.
"""
self.widget.SetWrap(wrapping)
|
bsd-3-clause
|
Finntack/pootle
|
pootle/apps/staticpages/templatetags/staticpages.py
|
3
|
2425
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from django import template
from django.core.urlresolvers import reverse
from ..models import LegalPage
register = template.Library()
class LegalPageNode(template.Node):
def __init__(self, context_name):
self.context_name = context_name
def render(self, context):
lps = LegalPage.objects.live().all()
context[self.context_name] = lps
return ''
@register.tag
def get_legalpages(parser, token):
"""
Retrieves all active LegalPage objects.
Populates the template context with them in a variable
whose name is defined by the ``as`` clause.
Syntax::
{% get_legalpages as context_name %}
"""
bits = token.split_contents()
syntax_message = ("%(tag_name)s expects a syntax of %(tag_name)s "
"as context_name" %
dict(tag_name=bits[0]))
if len(bits) == 3:
if bits[1] != 'as':
raise template.TemplateSyntaxError(syntax_message)
context_name = bits[2]
return LegalPageNode(context_name)
else:
raise template.TemplateSyntaxError(syntax_message)
@register.tag
def staticpage_url(parser, token):
"""Returns the internal URL for a static page based on its virtual path.
Syntax::
{% staticpage_url 'virtual/path' %}
"""
bits = token.split_contents()
syntax_message = ("%(tag_name)s expects a syntax of %(tag_name)s "
"'virtual/path'" %
dict(tag_name=bits[0]))
quote_message = "%s tag's argument should be in quotes" % bits[0]
if len(bits) == 2:
virtual_path = bits[1]
if (not (virtual_path[0] == virtual_path[-1] and
virtual_path[0] in ('"', "'"))):
raise template.TemplateSyntaxError(quote_message)
return StaticPageURLNode(virtual_path[1:-1])
raise template.TemplateSyntaxError(syntax_message)
class StaticPageURLNode(template.Node):
def __init__(self, virtual_path):
self.virtual_path = virtual_path
def render(self, context):
return reverse('pootle-staticpages-display', args=[self.virtual_path])
|
gpl-3.0
|
eworm-de/systemd
|
test/sd-script.py
|
12
|
21508
|
#!/usr/bin/env python3
# SPDX-License-Identifier: LGPL-2.1-or-later
#
# sd-script.py: create LOTS of sd device entries in fake sysfs
#
# (C) 2018 Martin Wilck, SUSE Linux GmbH
#
# Run after sys-script.py
# Usage: sd-script.py <directory> <num>
# <num> is the number of device nodes (disks + partitions)
# to create in addition to what sys-script.py already did.
# The script can be run several times in a row if <num> is increased,
# adding yet more device entries.
# Tested up to 1000 entries, more are possible.
# Note that sys-script.py already creates 10 sd device nodes
# (sda+sdb and partitions). This script starts with sdc.
import re
import os
import errno
import sys
def d(path, mode):
os.mkdir(path, mode)
def l(path, src):
os.symlink(src, path)
def f(path, mode, contents):
with open(path, "wb") as f:
f.write(contents)
os.chmod(path, mode)
class SD(object):
sd_major = [8] + list(range(65, 72)) + list(range(128, 136))
_name_re = re.compile(r'sd(?P<f>[a-z]*)$')
def _init_from_name(self, name):
mt = self._name_re.match(name)
if mt is None:
raise RuntimeError("invalid name %s" % name)
nm = mt.group("f")
base = 1
ls = nm[-1]
nm = nm[:-1]
n = base * (ord(ls)-ord('a'))
while len(nm) > 0:
ls = nm[-1]
nm = nm[:-1]
base *= 26
n += base * (1 + ord(ls)-ord('a'))
self._num = n
def _init_from_dev(self, dev):
maj, min = dev.split(":")
maj = self.sd_major.index(int(maj, 10))
min = int(min, 10)
num = int(min / 16)
self._num = 16*maj + num%16 + 256*int(num/16)
@staticmethod
def _disk_num(a, b):
n = ord(a)-ord('a')
if b != '':
n = 26 * (n+1) + ord(b)-ord('a')
return n
@staticmethod
def _get_major(n):
return SD.sd_major[(n%256)//16]
@staticmethod
def _get_minor(n):
return 16 * (n % 16 + 16 * n//256)
@staticmethod
def _get_name(n):
# see sd_format_disk_name() (sd.c)
s = chr(n % 26 + ord('a'))
n = n // 26 - 1
while n >= 0:
s = chr(n % 26 + ord('a')) + s
n = n // 26 - 1
return "sd" + s
@staticmethod
def _get_dev_t(n):
maj = SD._get_major(n)
min = SD._get_minor(n)
return (maj << 20) + min
def __init__(self, arg):
if type(arg) is type(0):
self._num = arg
elif arg.startswith("sd"):
self._init_from_name(arg)
else:
self._init_from_dev(arg)
def __cmp__(self, other):
return cmp(self._num, other._num)
def __hash__(self):
return hash(self._num)
def __str__(self):
return "%s/%s" % (
self.devstr(),
self._get_name(self._num))
def major(self):
return self._get_major(self._num)
def minor(self):
return self._get_minor(self._num)
def devstr(self):
return "%d:%d" % (self._get_major(self._num),
self._get_minor(self._num))
def namestr(self):
return self._get_name(self._num)
def longstr(self):
return "%d\t%s\t%s\t%08x" % (self._num,
self.devstr(),
self.namestr(),
self._get_dev_t(self._num))
class MySD(SD):
def subst(self, first_sg):
disk = {
"lun": self._num,
"major": self.major(),
"devnode": self.namestr(),
"disk_minor": self.minor(),
"sg_minor": first_sg + self._num,
}
return disk
disk_template = r"""\
l('sys/bus/scsi/drivers/sd/7:0:0:{lun}', '../../../../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}')
l('sys/bus/scsi/devices/7:0:0:{lun}', '../../../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}')
l('sys/dev/char/254:{sg_minor}', '../../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/bsg/7:0:0:{lun}')
l('sys/dev/char/21:{sg_minor}', '../../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_generic/sg{sg_minor}')
l('sys/class/scsi_disk/7:0:0:{lun}', '../../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}')
l('sys/class/scsi_generic/sg{sg_minor}', '../../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_generic/sg{sg_minor}')
l('sys/class/bsg/7:0:0:{lun}', '../../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/bsg/7:0:0:{lun}')
l('sys/class/scsi_device/7:0:0:{lun}', '../../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_device/7:0:0:{lun}')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}', 0o755)
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/generic', 'scsi_generic/sg{sg_minor}')
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/subsystem', '../../../../../../../../../bus/scsi')
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/driver', '../../../../../../../../../bus/scsi/drivers/sd')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/iodone_cnt', 0o644, b'0xc3\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/device_blocked', 0o644, b'0\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/max_sectors', 0o644, b'240\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/modalias', 0o644, b'scsi:t-0x00\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_level', 0o644, b'3\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/queue_depth', 0o644, b'1\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/rev', 0o644, b'1.00\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/type', 0o644, b'0\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/iocounterbits', 0o644, b'32\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/vendor', 0o644, b'Generic \n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/state', 0o644, b'running\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/queue_type', 0o644, b'none\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/iorequest_cnt', 0o644, b'0xc3\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/evt_media_change', 0o644, b'0\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/model', 0o644, b'USB Flash Drive \n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/ioerr_cnt', 0o644, b'0x2\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/uevent', 0o644, b'''DEVTYPE=scsi_device
DRIVER=sd
MODALIAS=scsi:t-0x00
''')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/timeout', 0o644, b'60\n')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk', 0o755)
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}', 0o755)
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}/subsystem', '../../../../../../../../../../../class/scsi_disk')
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}/device', '../../../7:0:0:{lun}')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}/app_tag_own', 0o644, b'0\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}/FUA', 0o644, b'0\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}/cache_type', 0o644, b'write through\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}/protection_type', 0o644, b'0\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}/manage_start_stop', 0o644, b'0\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}/allow_restart', 0o644, b'1\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}/uevent', 0o644, b'')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}/power', 0o755)
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_disk/7:0:0:{lun}/power/wakeup', 0o644, b'\n')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/power', 0o755)
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/power/wakeup', 0o644, b'\n')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_generic', 0o755)
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_generic/sg{sg_minor}', 0o755)
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_generic/sg{sg_minor}/subsystem', '../../../../../../../../../../../class/scsi_generic')
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_generic/sg{sg_minor}/device', '../../../7:0:0:{lun}')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_generic/sg{sg_minor}/dev', 0o644, b'21:{sg_minor}\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_generic/sg{sg_minor}/uevent', 0o644, b'''MAJOR=21
MINOR={sg_minor}
''')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_generic/sg{sg_minor}/power', 0o755)
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_generic/sg{sg_minor}/power/wakeup', 0o644, b'\n')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/bsg', 0o755)
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/bsg/7:0:0:{lun}', 0o755)
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/bsg/7:0:0:{lun}/subsystem', '../../../../../../../../../../../class/bsg')
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/bsg/7:0:0:{lun}/device', '../../../7:0:0:{lun}')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/bsg/7:0:0:{lun}/dev', 0o644, b'254:{sg_minor}\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/bsg/7:0:0:{lun}/uevent', 0o644, b'''MAJOR=254
MINOR={sg_minor}
''')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/bsg/7:0:0:{lun}/power', 0o755)
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/bsg/7:0:0:{lun}/power/wakeup', 0o644, b'\n')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block', 0o755)
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_device', 0o755)
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_device/7:0:0:{lun}', 0o755)
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_device/7:0:0:{lun}/subsystem', '../../../../../../../../../../../class/scsi_device')
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_device/7:0:0:{lun}/device', '../../../7:0:0:{lun}')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_device/7:0:0:{lun}/uevent', 0o644, b'')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_device/7:0:0:{lun}/power', 0o755)
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/scsi_device/7:0:0:{lun}/power/wakeup', 0o644, b'\n')
l('sys/dev/block/{major}:{disk_minor}', '../../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}')
l('sys/class/block/{devnode}', '../../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}')
l('sys/block/{devnode}', '../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}', 0o755)
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/subsystem', '../../../../../../../../../../../class/block')
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/bdi', '../../../../../../../../../../virtual/bdi/{major}:{disk_minor}')
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/device', '../../../7:0:0:{lun}')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/capability', 0o644, b'13\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/ro', 0o644, b'0\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/make-it-fail', 0o644, b'0\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/size', 0o644, b'257024\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/dev', 0o644, b'{major}:{disk_minor}\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/range', 0o644, b'16\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/removable', 0o644, b'1\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/stat', 0o644, b' 117 409 2103 272 0 0 0 0 0 194 272\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/uevent', 0o644, b'''MAJOR={major}
MINOR={disk_minor}
DEVTYPE=disk
DEVNAME={devnode}
''')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue', 0o755)
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/bsg', '../../../bsg/7:0:0:{lun}')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/nr_requests', 0o644, b'128\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/nomerges', 0o644, b'0\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/scheduler', 0o644, b'noop anticipatory deadline [cfq] \n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/hw_sector_size', 0o644, b'512\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/max_hw_sectors_kb', 0o644, b'120\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/read_ahead_kb', 0o644, b'128\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/max_sectors_kb', 0o644, b'120\n')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/iosched', 0o755)
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/iosched/slice_async_rq', 0o644, b'2\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/iosched/back_seek_max', 0o644, b'16384\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/iosched/slice_sync', 0o644, b'100\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/iosched/slice_async', 0o644, b'40\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/iosched/fifo_expire_sync', 0o644, b'125\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/iosched/slice_idle', 0o644, b'8\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/iosched/back_seek_penalty', 0o644, b'2\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/iosched/fifo_expire_async', 0o644, b'250\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/queue/iosched/quantum', 0o644, b'4\n')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/power', 0o755)
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/power/wakeup', 0o644, b'\n')
"""
part_template = r"""\
l('sys/dev/block/{major}:{part_minor}', '../../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/{devnode}{part_num}')
l('sys/class/block/{devnode}{part_num}', '../../devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/{devnode}{part_num}')
d('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/{devnode}{part_num}', 0o755)
l('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/{devnode}{part_num}/subsystem', '../../../../../../../../../../../../class/block')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/{devnode}{part_num}/start', 0o644, b'32\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/{devnode}{part_num}/make-it-fail', 0o644, b'0\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/{devnode}{part_num}/size', 0o644, b'256992\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/{devnode}{part_num}/dev', 0o644, b'{major}:{part_minor}\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/{devnode}{part_num}/stat', 0o644, b' 109 392 1903 246 0 0 0 0 0 169 246\n')
f('sys/devices/pci0000:00/0000:00:1d.7/usb5/5-1/5-1:1.0/host7/target7:0:0/7:0:0:{lun}/block/{devnode}/{devnode}{part_num}/uevent', 0o644, b'''MAJOR={major}
MINOR={part_minor}
DEVTYPE=partition
DEVNAME={devnode}{part_num}
''')
"""
if len(sys.argv) != 3:
exit("Usage: {} <target dir> <number>".format(sys.argv[0]))
if not os.path.isdir(sys.argv[1]):
exit("Target dir {} not found".format(sys.argv[1]))
def create_part_sysfs(disk, sd, prt):
part = disk
part.update ({
"part_num": prt,
"part_minor": disk["disk_minor"] + prt,
})
try:
exec(part_template.format(**part))
except OSError:
si = sys.exc_info()[1]
if (si.errno == errno.EEXIST):
print("sysfs structures for %s%d exist" % (sd.namestr(), prt))
else:
print("error for %s%d: %s" % (sd.namestr(), prt, si[1]))
raise
else:
print("sysfs structures for %s%d created" % (sd.namestr(), prt))
def create_disk_sysfs(dsk, first_sg, n):
sd = MySD(dsk)
disk = sd.subst(first_sg)
try:
exec(disk_template.format(**disk))
except OSError:
si = sys.exc_info()[1]
if (si.errno == errno.EEXIST):
print("sysfs structures for %s exist" % sd.namestr())
elif (si.errno == errno.ENOENT):
print("error for %s: %s - have you run sys-script py first?" %
(sd.namestr(), si.strerror))
return -1
else:
print("error for %s: %s" % (sd.namestr(), si.strerror))
raise
else:
print("sysfs structures for %s created" % sd.namestr())
n += 1
if n >= last:
return n
for prt in range(1, 16):
create_part_sysfs(disk, sd, prt)
n += 1
if n >= last:
return n
return n
os.chdir(sys.argv[1])
n = 0
last = int(sys.argv[2])
first_sg = 2
for dsk in range(2, 1000):
n = create_disk_sysfs(dsk, first_sg, n)
if n >= last or n == -1:
break
|
gpl-2.0
|
MiLk/ansible
|
lib/ansible/modules/network/nxos/nxos_rollback.py
|
45
|
3780
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_rollback
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Set a checkpoint or rollback to a checkpoint.
description:
- This module offers the ability to set a configuration checkpoint
file or rollback to a configuration checkpoint file on Cisco NXOS
switches.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Sometimes C(transport=nxapi) may cause a timeout error.
options:
checkpoint_file:
description:
- Name of checkpoint file to create. Mutually exclusive
with rollback_to.
required: false
default: null
rollback_to:
description:
- Name of checkpoint file to rollback to. Mutually exclusive
with checkpoint_file.
required: false
default: null
'''
EXAMPLES = '''
- nxos_rollback:
checkpoint_file: backup.cfg
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
- nxos_rollback:
rollback_to: backup.cfg
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
filename:
description: The filename of the checkpoint/rollback file.
returned: success
type: string
sample: 'backup.cfg'
status:
description: Which operation took place and whether it was successful.
returned: success
type: string
sample: 'rollback executed'
'''
from ansible.module_utils.nxos import nxos_argument_spec, run_commands
from ansible.module_utils.basic import AnsibleModule
def checkpoint(filename, module):
commands = ['terminal dont-ask', 'checkpoint file %s' % filename]
run_commands(module, commands)
def rollback(filename, module):
commands = ['rollback running-config file %s' % filename]
run_commands(module, commands)
def main():
argument_spec = dict(
checkpoint_file=dict(required=False),
rollback_to=dict(required=False),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[['checkpoint_file',
'rollback_to']],
supports_check_mode=False)
checkpoint_file = module.params['checkpoint_file']
rollback_to = module.params['rollback_to']
status = None
filename = None
changed = False
if checkpoint_file:
checkpoint(checkpoint_file, module)
status = 'checkpoint file created'
elif rollback_to:
rollback(rollback_to, module)
status = 'rollback executed'
changed = True
filename = rollback_to or checkpoint_file
module.exit_json(changed=changed, status=status, filename=filename)
if __name__ == '__main__':
main()
|
gpl-3.0
|
mandx/pyrax
|
tests/unit/test_cloud_blockstorage.py
|
10
|
21816
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import unittest
from mock import patch
from mock import MagicMock as Mock
import pyrax.cloudblockstorage
from pyrax.cloudblockstorage import CloudBlockStorageClient
from pyrax.cloudblockstorage import CloudBlockStorageVolume
from pyrax.cloudblockstorage import CloudBlockStorageVolumeType
from pyrax.cloudblockstorage import CloudBlockStorageSnapshot
from pyrax.cloudblockstorage import CloudBlockStorageSnapshotManager
from pyrax.cloudblockstorage import _resolve_id
from pyrax.cloudblockstorage import _resolve_name
from pyrax.cloudblockstorage import assure_volume
from pyrax.cloudblockstorage import assure_snapshot
import pyrax.exceptions as exc
from pyrax.manager import BaseManager
import pyrax.utils as utils
from pyrax import fakes
example_uri = "http://example.com"
class CloudBlockStorageTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(CloudBlockStorageTest, self).__init__(*args, **kwargs)
def setUp(self):
self.client = fakes.FakeBlockStorageClient()
self.volume = fakes.FakeBlockStorageVolume()
self.snapshot = fakes.FakeBlockStorageSnapshot()
def tearDown(self):
pass
def test_resolve_id(self):
target = "test_id"
class Obj_with_id(object):
id = target
obj = Obj_with_id()
self.assertEqual(_resolve_id(obj), target)
self.assertEqual(_resolve_id(obj), target)
self.assertEqual(_resolve_id(obj.id), target)
def test_resolve_name(self):
target = "test_name"
class Obj_with_name(object):
name = target
obj = Obj_with_name()
self.assertEqual(_resolve_name(obj), target)
self.assertEqual(_resolve_name(obj), target)
self.assertEqual(_resolve_name(obj.name), target)
def test_assure_volume(self):
class TestClient(object):
_manager = fakes.FakeBlockStorageManager()
@assure_volume
def test_method(self, volume):
return volume
client = TestClient()
client._manager.get = Mock(return_value=self.volume)
# Pass the volume
ret = client.test_method(self.volume)
self.assertTrue(ret is self.volume)
# Pass the ID
ret = client.test_method(self.volume.id)
self.assertTrue(ret is self.volume)
def test_assure_snapshot(self):
class TestClient(object):
_snapshot_manager = fakes.FakeSnapshotManager()
@assure_snapshot
def test_method(self, snapshot):
return snapshot
client = TestClient()
client._snapshot_manager.get = Mock(return_value=self.snapshot)
# Pass the snapshot
ret = client.test_method(self.snapshot)
self.assertTrue(ret is self.snapshot)
# Pass the ID
ret = client.test_method(self.snapshot.id)
self.assertTrue(ret is self.snapshot)
def test_create_volume(self):
mgr = fakes.FakeManager()
mgr.api.region_name = "FAKE"
sav = pyrax.connect_to_cloudservers
fakenovavol = utils.random_unicode()
class FakeVol(object):
def __init__(self, *args, **kwargs):
self.volumes = fakenovavol
pyrax.connect_to_cloudservers = Mock(return_value=FakeVol())
vol = CloudBlockStorageVolume(mgr, {})
self.assertTrue(isinstance(vol, CloudBlockStorageVolume))
self.assertEqual(vol._nova_volumes, fakenovavol)
pyrax.connect_to_cloudservers = sav
def test_attach_to_instance(self):
vol = self.volume
inst = fakes.FakeServer()
mp = utils.random_unicode()
vol._nova_volumes.create_server_volume = Mock(return_value=vol)
vol.attach_to_instance(inst, mp)
vol._nova_volumes.create_server_volume.assert_called_once_with(inst.id,
vol.id, mp)
def test_attach_to_instance_fail(self):
vol = self.volume
inst = fakes.FakeServer()
mp = utils.random_unicode()
vol._nova_volumes.create_server_volume = Mock(
side_effect=Exception("test"))
self.assertRaises(exc.VolumeAttachmentFailed, vol.attach_to_instance,
inst, mp)
def test_detach_from_instance(self):
vol = self.volume
srv_id = utils.random_unicode()
att_id = utils.random_unicode()
vol.attachments = [{"server_id": srv_id, "id": att_id}]
vol._nova_volumes.delete_server_volume = Mock()
vol.detach()
vol._nova_volumes.delete_server_volume.assert_called_once_with(srv_id,
att_id)
def test_detach_from_instance_fail(self):
vol = self.volume
srv_id = utils.random_unicode()
att_id = utils.random_unicode()
vol.attachments = [{"server_id": srv_id, "id": att_id}]
vol._nova_volumes.delete_server_volume = Mock(
side_effect=Exception("test"))
self.assertRaises(exc.VolumeDetachmentFailed, vol.detach)
def test_detach_from_instance_no_attachment(self):
vol = self.volume
srv_id = utils.random_unicode()
att_id = utils.random_unicode()
vol.attachments = []
vol._nova_volumes.delete_server_volume = Mock()
ret = vol.detach()
self.assertTrue(ret is None)
self.assertFalse(vol._nova_volumes.delete_server_volume.called)
def test_create_snapshot(self):
vol = self.volume
vol.manager.create_snapshot = Mock()
name = utils.random_unicode()
desc = utils.random_unicode()
vol.create_snapshot(name=name, description=desc, force=False)
vol.manager.create_snapshot.assert_called_once_with(volume=vol,
name=name, description=desc, force=False)
def test_create_snapshot_bad_request(self):
vol = self.volume
sav = BaseManager.create
BaseManager.create = Mock(side_effect=exc.BadRequest(
"Invalid volume: must be available"))
name = utils.random_unicode()
desc = utils.random_unicode()
self.assertRaises(exc.VolumeNotAvailable, vol.create_snapshot,
name=name, description=desc, force=False)
BaseManager.create = sav
def test_create_snapshot_bad_request_other(self):
vol = self.volume
sav = BaseManager.create
BaseManager.create = Mock(side_effect=exc.BadRequest("FAKE"))
name = utils.random_unicode()
desc = utils.random_unicode()
self.assertRaises(exc.BadRequest, vol.create_snapshot,
name=name, description=desc, force=False)
BaseManager.create = sav
def test_vol_update_volume(self):
vol = self.volume
mgr = vol.manager
mgr.update = Mock()
nm = utils.random_unicode()
desc = utils.random_unicode()
vol.update(display_name=nm, display_description=desc)
mgr.update.assert_called_once_with(vol, display_name=nm,
display_description=desc)
def test_vol_rename(self):
vol = self.volume
nm = utils.random_unicode()
vol.update = Mock()
vol.rename(nm)
vol.update.assert_called_once_with(display_name=nm)
def test_mgr_update_volume(self):
clt = self.client
vol = self.volume
mgr = clt._manager
mgr.api.method_put = Mock(return_value=(None, None))
name = utils.random_unicode()
desc = utils.random_unicode()
exp_uri = "/%s/%s" % (mgr.uri_base, vol.id)
exp_body = {"volume": {"display_name": name,
"display_description": desc}}
mgr.update(vol, display_name=name, display_description=desc)
mgr.api.method_put.assert_called_once_with(exp_uri, body=exp_body)
def test_mgr_update_volume_empty(self):
clt = self.client
vol = self.volume
mgr = clt._manager
mgr.api.method_put = Mock(return_value=(None, None))
mgr.update(vol)
self.assertEqual(mgr.api.method_put.call_count, 0)
def test_list_types(self):
clt = self.client
clt._types_manager.list = Mock()
clt.list_types()
clt._types_manager.list.assert_called_once_with()
def test_list_snapshots(self):
clt = self.client
clt._snapshot_manager.list = Mock()
clt.list_snapshots()
clt._snapshot_manager.list.assert_called_once_with()
def test_vol_list_snapshots(self):
vol = self.volume
vol.manager.list_snapshots = Mock()
vol.list_snapshots()
vol.manager.list_snapshots.assert_called_once_with()
def test_vol_mgr_list_snapshots(self):
vol = self.volume
mgr = vol.manager
mgr.api.list_snapshots = Mock()
mgr.list_snapshots()
mgr.api.list_snapshots.assert_called_once_with()
def test_create_body_volume_bad_size(self):
mgr = self.client._manager
self.assertRaises(exc.InvalidSize, mgr._create_body, "name",
size='foo')
def test_create_volume_bad_clone_size(self):
mgr = self.client._manager
mgr._create = Mock(side_effect=exc.BadRequest(400,
"Clones currently must be >= original volume size"))
self.assertRaises(exc.VolumeCloneTooSmall, mgr.create, "name",
size=100, clone_id=utils.random_unicode())
def test_create_volume_fail_other(self):
mgr = self.client._manager
mgr._create = Mock(side_effect=exc.BadRequest(400, "FAKE"))
self.assertRaises(exc.BadRequest, mgr.create, "name",
size=100, clone_id=utils.random_unicode())
def test_create_body_volume(self):
mgr = self.client._manager
size = random.randint(100, 1024)
name = utils.random_unicode()
snapshot_id = utils.random_unicode()
clone_id = utils.random_unicode()
display_description = None
volume_type = None
metadata = None
availability_zone = utils.random_unicode()
fake_body = {"volume": {
"size": size,
"snapshot_id": snapshot_id,
"source_volid": clone_id,
"display_name": name,
"display_description": "",
"volume_type": "SATA",
"metadata": {},
"availability_zone": availability_zone,
"imageRef": None,
}}
ret = mgr._create_body(name=name, size=size, volume_type=volume_type,
description=display_description, metadata=metadata,
snapshot_id=snapshot_id, clone_id=clone_id,
availability_zone=availability_zone)
self.assertEqual(ret, fake_body)
def test_create_body_volume_defaults(self):
mgr = self.client._manager
size = random.randint(100, 1024)
name = utils.random_unicode()
snapshot_id = utils.random_unicode()
clone_id = utils.random_unicode()
display_description = utils.random_unicode()
volume_type = utils.random_unicode()
metadata = {}
availability_zone = utils.random_unicode()
fake_body = {"volume": {
"size": size,
"snapshot_id": snapshot_id,
"source_volid": clone_id,
"display_name": name,
"display_description": display_description,
"volume_type": volume_type,
"metadata": metadata,
"availability_zone": availability_zone,
"imageRef": None,
}}
ret = mgr._create_body(name=name, size=size, volume_type=volume_type,
description=display_description, metadata=metadata,
snapshot_id=snapshot_id, clone_id=clone_id,
availability_zone=availability_zone)
self.assertEqual(ret, fake_body)
def test_create_body_snapshot(self):
mgr = self.client._snapshot_manager
vol = self.volume
name = utils.random_unicode()
display_description = utils.random_unicode()
force = True
fake_body = {"snapshot": {
"display_name": name,
"display_description": display_description,
"volume_id": vol.id,
"force": str(force).lower(),
}}
ret = mgr._create_body(name=name, description=display_description,
volume=vol, force=force)
self.assertEqual(ret, fake_body)
def test_client_attach_to_instance(self):
clt = self.client
vol = self.volume
inst = fakes.FakeServer()
mp = utils.random_unicode()
vol.attach_to_instance = Mock()
clt.attach_to_instance(vol, inst, mp)
vol.attach_to_instance.assert_called_once_with(inst, mp)
def test_client_detach(self):
clt = self.client
vol = self.volume
vol.detach = Mock()
clt.detach(vol)
vol.detach.assert_called_once_with()
def test_client_delete_volume(self):
clt = self.client
vol = self.volume
vol.delete = Mock()
clt.delete_volume(vol)
vol.delete.assert_called_once_with(force=False)
def test_client_delete_volume_not_available(self):
clt = self.client
vol = self.volume
vol.manager.delete = Mock(side_effect=exc.VolumeNotAvailable(""))
self.assertRaises(exc.VolumeNotAvailable, clt.delete_volume, vol)
def test_client_delete_volume_force(self):
clt = self.client
vol = self.volume
vol.manager.delete = Mock()
vol.detach = Mock()
vol.delete_all_snapshots = Mock()
clt.delete_volume(vol, force=True)
vol.manager.delete.assert_called_once_with(vol)
vol.detach.assert_called_once_with()
vol.delete_all_snapshots.assert_called_once_with()
def test_volume_delete_all_snapshots(self):
vol = self.volume
snap = fakes.FakeBlockStorageSnapshot()
snap.delete = Mock()
vol.list_snapshots = Mock(return_value=[snap])
vol.delete_all_snapshots()
snap.delete.assert_called_once_with()
def test_client_snap_mgr_create_snapshot(self):
clt = self.client
vol = self.volume
name = utils.random_ascii()
description = utils.random_ascii()
mgr = clt._snapshot_manager
snap = fakes.FakeBlockStorageSnapshot()
mgr._create = Mock(return_value=snap)
ret = mgr.create(name, vol, description=description, force=True)
self.assertTrue(isinstance(ret, CloudBlockStorageSnapshot))
def test_client_create_snapshot(self):
clt = self.client
vol = self.volume
name = utils.random_unicode()
description = utils.random_unicode()
clt._snapshot_manager.create = Mock()
clt.create_snapshot(vol, name=name, description=description,
force=True)
clt._snapshot_manager.create.assert_called_once_with(volume=vol,
name=name, description=description, force=True)
def test_client_create_snapshot_not_available(self):
clt = self.client
vol = self.volume
name = utils.random_unicode()
description = utils.random_unicode()
cli_exc = exc.ClientException(409, "Request conflicts with in-progress")
sav = BaseManager.create
BaseManager.create = Mock(side_effect=cli_exc)
self.assertRaises(exc.VolumeNotAvailable, clt.create_snapshot, vol,
name=name, description=description)
BaseManager.create = sav
def test_client_create_snapshot_409_other(self):
clt = self.client
vol = self.volume
name = utils.random_unicode()
description = utils.random_unicode()
cli_exc = exc.ClientException(409, "FAKE")
sav = BaseManager.create
BaseManager.create = Mock(side_effect=cli_exc)
self.assertRaises(exc.ClientException, clt.create_snapshot, vol,
name=name, description=description)
BaseManager.create = sav
def test_client_create_snapshot_not_409(self):
clt = self.client
vol = self.volume
name = utils.random_unicode()
description = utils.random_unicode()
cli_exc = exc.ClientException(420, "FAKE")
sav = BaseManager.create
BaseManager.create = Mock(side_effect=cli_exc)
self.assertRaises(exc.ClientException, clt.create_snapshot, vol,
name=name, description=description)
BaseManager.create = sav
def test_client_delete_snapshot(self):
clt = self.client
snap = fakes.FakeBlockStorageSnapshot()
snap.delete = Mock()
clt.delete_snapshot(snap)
snap.delete.assert_called_once_with()
def test_snapshot_delete(self):
snap = self.snapshot
snap.manager.delete = Mock()
snap.delete()
snap.manager.delete.assert_called_once_with(snap)
def test_snapshot_delete_unavailable(self):
snap = self.snapshot
snap.status = "busy"
self.assertRaises(exc.SnapshotNotAvailable, snap.delete)
def test_snapshot_delete_retry(self):
snap = self.snapshot
snap.manager.delete = Mock(side_effect=exc.ClientException(
"Request conflicts with in-progress 'DELETE"))
pyrax.cloudblockstorage.RETRY_INTERVAL = 0.1
self.assertRaises(exc.ClientException, snap.delete)
def test_snapshot_update(self):
snap = self.snapshot
snap.manager.update = Mock()
nm = utils.random_unicode()
desc = utils.random_unicode()
snap.update(display_name=nm, display_description=desc)
snap.manager.update.assert_called_once_with(snap, display_name=nm,
display_description=desc)
def test_snapshot_rename(self):
snap = self.snapshot
snap.update = Mock()
nm = utils.random_unicode()
snap.rename(nm)
snap.update.assert_called_once_with(display_name=nm)
def test_volume_name_property(self):
vol = self.volume
nm = utils.random_unicode()
vol.display_name = nm
self.assertEqual(vol.name, vol.display_name)
nm = utils.random_unicode()
vol.name = nm
self.assertEqual(vol.name, vol.display_name)
def test_volume_description_property(self):
vol = self.volume
nm = utils.random_unicode()
vol.display_description = nm
self.assertEqual(vol.description, vol.display_description)
nm = utils.random_unicode()
vol.description = nm
self.assertEqual(vol.description, vol.display_description)
def test_snapshot_name_property(self):
snap = self.snapshot
nm = utils.random_unicode()
snap.display_name = nm
self.assertEqual(snap.name, snap.display_name)
nm = utils.random_unicode()
snap.name = nm
self.assertEqual(snap.name, snap.display_name)
def test_snapshot_description_property(self):
snap = self.snapshot
nm = utils.random_unicode()
snap.display_description = nm
self.assertEqual(snap.description, snap.display_description)
nm = utils.random_unicode()
snap.description = nm
self.assertEqual(snap.description, snap.display_description)
def test_mgr_update_snapshot(self):
clt = self.client
snap = self.snapshot
mgr = clt._snapshot_manager
mgr.api.method_put = Mock(return_value=(None, None))
name = utils.random_unicode()
desc = utils.random_unicode()
exp_uri = "/%s/%s" % (mgr.uri_base, snap.id)
exp_body = {"snapshot": {"display_name": name,
"display_description": desc}}
mgr.update(snap, display_name=name, display_description=desc)
mgr.api.method_put.assert_called_once_with(exp_uri, body=exp_body)
def test_mgr_update_snapshot_empty(self):
clt = self.client
snap = self.snapshot
mgr = clt._snapshot_manager
mgr.api.method_put = Mock(return_value=(None, None))
mgr.update(snap)
self.assertEqual(mgr.api.method_put.call_count, 0)
def test_clt_update_volume(self):
clt = self.client
vol = self.volume
name = utils.random_unicode()
desc = utils.random_unicode()
vol.update = Mock()
clt.update(vol, display_name=name, display_description=desc)
vol.update.assert_called_once_with(display_name=name,
display_description=desc)
def test_clt_rename(self):
clt = self.client
vol = self.volume
nm = utils.random_unicode()
clt.update = Mock()
clt.rename(vol, nm)
clt.update.assert_called_once_with(vol, display_name=nm)
def test_clt_update_snapshot(self):
clt = self.client
snap = self.snapshot
name = utils.random_unicode()
desc = utils.random_unicode()
snap.update = Mock()
clt.update_snapshot(snap, display_name=name, display_description=desc)
snap.update.assert_called_once_with(display_name=name,
display_description=desc)
def test_clt_rename_snapshot(self):
clt = self.client
snap = self.snapshot
nm = utils.random_unicode()
clt.update_snapshot = Mock()
clt.rename_snapshot(snap, nm)
clt.update_snapshot.assert_called_once_with(snap, display_name=nm)
def test_get_snapshot(self):
clt = self.client
mgr = clt._snapshot_manager
mgr.get = Mock()
snap = utils.random_unicode()
clt.get_snapshot(snap)
mgr.get.assert_called_once_with(snap)
if __name__ == "__main__":
unittest.main()
|
apache-2.0
|
browseinfo/odoo_saas3_nicolas
|
addons/purchase/report/request_quotation.py
|
3
|
1571
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
class request_quotation(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(request_quotation, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'user': self.pool.get('res.users').browse(cr, uid, uid, context)
})
report_sxw.report_sxw('report.purchase.quotation','purchase.order','addons/purchase/report/request_quotation.rml',parser=request_quotation)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
ShashaQin/erpnext
|
erpnext/setup/doctype/authorization_rule/authorization_rule.py
|
121
|
2292
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, flt
from frappe import _, msgprint
from frappe.model.document import Document
class AuthorizationRule(Document):
def check_duplicate_entry(self):
exists = frappe.db.sql("""select name, docstatus from `tabAuthorization Rule`
where transaction = %s and based_on = %s and system_user = %s
and system_role = %s and approving_user = %s and approving_role = %s
and to_emp =%s and to_designation=%s and name != %s""",
(self.transaction, self.based_on, cstr(self.system_user),
cstr(self.system_role), cstr(self.approving_user),
cstr(self.approving_role), cstr(self.to_emp),
cstr(self.to_designation), self.name))
auth_exists = exists and exists[0][0] or ''
if auth_exists:
frappe.throw(_("Duplicate Entry. Please check Authorization Rule {0}").format(auth_exists))
def validate_rule(self):
if self.transaction != 'Appraisal':
if not self.approving_role and not self.approving_user:
frappe.throw(_("Please enter Approving Role or Approving User"))
elif self.system_user and self.system_user == self.approving_user:
frappe.throw(_("Approving User cannot be same as user the rule is Applicable To"))
elif self.system_role and self.system_role == self.approving_role:
frappe.throw(_("Approving Role cannot be same as role the rule is Applicable To"))
elif self.transaction in ['Purchase Order', 'Purchase Receipt', \
'Purchase Invoice', 'Stock Entry'] and self.based_on \
in ['Average Discount', 'Customerwise Discount', 'Itemwise Discount']:
frappe.throw(_("Cannot set authorization on basis of Discount for {0}").format(self.transaction))
elif self.based_on == 'Average Discount' and flt(self.value) > 100.00:
frappe.throw(_("Discount must be less than 100"))
elif self.based_on == 'Customerwise Discount' and not self.master_name:
frappe.throw(_("Customer required for 'Customerwise Discount'"))
else:
if self.transaction == 'Appraisal':
self.based_on = "Not Applicable"
def validate(self):
self.check_duplicate_entry()
self.validate_rule()
if not self.value: self.value = 0.0
|
agpl-3.0
|
ChemiKhazi/Sprytile
|
rx/concurrency/virtualtimescheduler.py
|
2
|
4699
|
import logging
from rx.internal import PriorityQueue, ArgumentOutOfRangeException
from .schedulerbase import SchedulerBase
from .scheduleditem import ScheduledItem
from .scheduleperiodic import SchedulePeriodic
log = logging.getLogger("Rx")
class VirtualTimeScheduler(SchedulerBase):
"""Virtual Scheduler. This scheduler should work with either
datetime/timespan or ticks as int/int"""
def __init__(self, initial_clock=0):
"""Creates a new virtual time scheduler with the specified initial
clock value and absolute time comparer.
Keyword arguments:
initial_clock -- Initial value for the clock.
comparer -- Comparer to determine causality of events based on absolute
time.
"""
self.clock = initial_clock
self.is_enabled = False
self.queue = PriorityQueue(1024)
super(VirtualTimeScheduler, self).__init__()
@property
def now(self):
"""Gets the schedulers absolute time clock value as datetime offset."""
return self.to_datetime(self.clock)
def schedule(self, action, state=None):
"""Schedules an action to be executed."""
return self.schedule_absolute(self.clock, action, state)
def schedule_relative(self, duetime, action, state=None):
"""Schedules an action to be executed at duetime. Return the disposable
object used to cancel the scheduled action (best effort)
Keyword arguments:
duetime -- Relative time after which to execute the action.
action -- Action to be executed.
state -- [Optional] State passed to the action to be executed."""
runat = self.add(self.clock, self.to_relative(duetime))
return self.schedule_absolute(duetime=runat, action=action, state=state)
def schedule_absolute(self, duetime, action, state=None):
"""Schedules an action to be executed at duetime."""
si = ScheduledItem(self, state, action, duetime)
self.queue.enqueue(si)
return si.disposable
def schedule_periodic(self, period, action, state=None):
scheduler = SchedulePeriodic(self, period, action, state)
return scheduler.start()
def start(self):
"""Starts the virtual time scheduler."""
if self.is_enabled:
return
self.is_enabled = True
while self.is_enabled:
next = self.get_next()
if not next:
break
if next.duetime > self.clock:
self.clock = next.duetime
next.invoke()
self.is_enabled = False
def stop(self):
"""Stops the virtual time scheduler."""
self.is_enabled = False
def advance_to(self, time):
"""Advances the schedulers clock to the specified time, running all
work til that point.
Keyword arguments:
time -- Absolute time to advance the schedulers clock to."""
if self.clock > time:
raise ArgumentOutOfRangeException()
if self.clock == time:
return
if self.is_enabled:
return
self.is_enabled = True
while self.is_enabled:
next = self.get_next()
if not next:
break
if next.duetime > time:
self.queue.enqueue(next)
break
if next.duetime > self.clock:
self.clock = next.duetime
next.invoke()
self.is_enabled = False
self.clock = time
def advance_by(self, time):
"""Advances the schedulers clock by the specified relative time,
running all work scheduled for that timespan.
Keyword arguments:
time -- Relative time to advance the schedulers clock by."""
log.debug("VirtualTimeScheduler.advance_by(time=%s)", time)
dt = self.add(self.clock, time)
if self.clock > dt:
raise ArgumentOutOfRangeException()
return self.advance_to(dt)
def sleep(self, time):
"""Advances the schedulers clock by the specified relative time.
Keyword arguments:
time -- Relative time to advance the schedulers clock by."""
dt = self.add(self.clock, time)
if self.clock > dt:
raise ArgumentOutOfRangeException()
self.clock = dt
def get_next(self):
"""Returns the next scheduled item to be executed."""
while len(self.queue):
next = self.queue.dequeue()
if not next.is_cancelled():
return next
return None
@staticmethod
def add(absolute, relative):
raise NotImplementedError
|
mit
|
alqfahad/odoo
|
addons/account/wizard/account_tax_chart.py
|
385
|
3247
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_tax_chart(osv.osv_memory):
"""
For Chart of taxes
"""
_name = "account.tax.chart"
_description = "Account tax chart"
_columns = {
'period_id': fields.many2one('account.period', \
'Period', \
),
'target_move': fields.selection([('posted', 'All Posted Entries'),
('all', 'All Entries'),
], 'Target Moves', required=True),
}
def _get_period(self, cr, uid, context=None):
"""Return default period value"""
period_ids = self.pool.get('account.period').find(cr, uid, context=context)
return period_ids and period_ids[0] or False
def account_tax_chart_open_window(self, cr, uid, ids, context=None):
"""
Opens chart of Accounts
@param cr: the current row, from the database cursor,
@param uid: the current userโs ID for security checks,
@param ids: List of account chartโs IDs
@return: dictionary of Open account chart window on given fiscalyear and all Entries or posted entries
"""
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
if context is None:
context = {}
data = self.browse(cr, uid, ids, context=context)[0]
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_tax_code_tree')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
if data.period_id:
result['context'] = str({'period_id': data.period_id.id, \
'fiscalyear_id': data.period_id.fiscalyear_id.id, \
'state': data.target_move})
period_code = data.period_id.code
result['name'] += period_code and (':' + period_code) or ''
else:
result['context'] = str({'state': data.target_move})
return result
_defaults = {
'period_id': _get_period,
'target_move': 'posted'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
kuri65536/python-for-android
|
python-build/python-libs/gdata/src/gdata/tlslite/integration/POP3_TLS.py
|
271
|
5466
|
"""TLS Lite + poplib."""
import socket
from poplib import POP3
from gdata.tlslite.TLSConnection import TLSConnection
from gdata.tlslite.integration.ClientHelper import ClientHelper
# POP TLS PORT
POP3_TLS_PORT = 995
class POP3_TLS(POP3, ClientHelper):
"""This class extends L{poplib.POP3} with TLS support."""
def __init__(self, host, port = POP3_TLS_PORT,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings=None):
"""Create a new POP3_TLS.
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The caller should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type host: str
@param host: Server to connect to.
@type port: int
@param port: Port to connect to.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
self.host = host
self.port = port
msg = "getaddrinfo returns an empty list"
self.sock = None
for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
self.sock.connect(sa)
except socket.error, msg:
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
### New code below (all else copied from poplib)
ClientHelper.__init__(self,
username, password, sharedKey,
certChain, privateKey,
cryptoID, protocol,
x509Fingerprint,
x509TrustList, x509CommonName,
settings)
self.sock = TLSConnection(self.sock)
self.sock.closeSocket = True
ClientHelper._handshake(self, self.sock)
###
self.file = self.sock.makefile('rb')
self._debugging = 0
self.welcome = self._getresp()
|
apache-2.0
|
TeMPO-Consulting/mediadrop
|
mediacore/model/settings.py
|
1
|
4239
|
# This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2013 MediaCore Inc., Felix Schwarz and other contributors.
# For the exact contribution history, see the git revision log.
# The source code contained in this file is licensed under the GPLv3 or
# (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
"""
Settings Model
A very rudimentary settings implementation which is intended to store our
non-mission-critical options which can be edited via the admin UI.
.. todo:
Rather than fetch one option at a time, load all settings into an object
with attribute-style access.
"""
from sqlalchemy import Table, ForeignKey, Column
from sqlalchemy.exc import IntegrityError, ProgrammingError
from sqlalchemy.types import Unicode, UnicodeText, Integer, Boolean, Float
from sqlalchemy.orm import mapper, relation, backref, synonym, interfaces, validates
from urlparse import urlparse
from mediacore.model.meta import DBSession, metadata
from mediacore.plugin import events
settings = Table('settings', metadata,
Column('id', Integer, autoincrement=True, primary_key=True),
Column('key', Unicode(255), nullable=False, unique=True),
Column('value', UnicodeText),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
multisettings = Table('settings_multi', metadata,
Column('id', Integer, autoincrement=True, primary_key=True),
Column('key', Unicode(255), nullable=False),
Column('value', UnicodeText, nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
class Setting(object):
"""
A Single Setting
"""
query = DBSession.query_property()
def __init__(self, key=None, value=None):
self.key = key or None
self.value = value or None
def __repr__(self):
return '<Setting: %s = %r>' % (self.key, self.value)
def __unicode__(self):
return self.value
class MultiSetting(object):
"""
A MultiSetting
"""
query = DBSession.query_property()
def __init__(self, key=None, value=None):
self.key = key or None
self.value = value or None
def __repr__(self):
return '<MultiSetting: %s = %r>' % (self.key, self.value)
def __unicode__(self):
return self.value
mapper(Setting, settings, extension=events.MapperObserver(events.Setting))
mapper(MultiSetting, multisettings, extension=events.MapperObserver(events.MultiSetting))
def insert_settings(defaults):
"""Insert the given setting if they don't exist yet.
XXX: Does not include any support for MultiSetting. This approach
won't work for that. We'll need to use sqlalchemy-migrate.
:type defaults: list
:param defaults: Key and value pairs
:rtype: list
:returns: Any settings that have just been created.
"""
inserted = []
try:
settings_query = DBSession.query(Setting.key)\
.filter(Setting.key.in_([key for key, value in defaults]))
existing_settings = set(x[0] for x in settings_query)
except ProgrammingError:
# If we are running paster setup-app on a fresh database with a
# plugin which tries to use this function every time the
# Environment.loaded event fires, the settings table will not
# exist and this exception will be thrown, but its safe to ignore.
# The settings will be created the next time the event fires,
# which will likely be the first time the app server starts up.
return inserted
for key, value in defaults:
if key in existing_settings:
continue
transaction = DBSession.begin_nested()
try:
s = Setting(key, value)
DBSession.add(s)
transaction.commit()
inserted.append(s)
except IntegrityError:
transaction.rollback()
if inserted:
DBSession.commit()
return inserted
def fetch_and_create_multi_setting(key, value):
multisettings = MultiSetting.query\
.filter(MultiSetting.key==key)\
.all()
for ms in multisettings:
if ms.value == value:
return ms
ms = MultiSetting(key, value)
DBSession.add(ms)
return ms
|
gpl-3.0
|
JeremyRubin/bitcoin
|
test/functional/p2p_segwit.py
|
1
|
95731
|
#!/usr/bin/env python3
# Copyright (c) 2016-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test segwit transactions and blocks on P2P network."""
import math
import random
import struct
import time
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER
from test_framework.key import ECKey
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
CBlock,
CBlockHeader,
CInv,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
CTxWitness,
MAX_BLOCK_BASE_SIZE,
MSG_BLOCK,
MSG_TX,
MSG_WITNESS_FLAG,
NODE_NETWORK,
NODE_WITNESS,
msg_no_witness_block,
msg_getdata,
msg_headers,
msg_inv,
msg_tx,
msg_block,
msg_no_witness_tx,
ser_uint256,
ser_vector,
sha256,
uint256_from_str,
FromHex,
)
from test_framework.mininode import (
P2PInterface,
mininode_lock,
)
from test_framework.script import (
CScript,
CScriptNum,
CScriptOp,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_16,
OP_2DROP,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_HASH160,
OP_IF,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
SIGHASH_ANYONECANPAY,
SIGHASH_NONE,
SIGHASH_SINGLE,
SegwitV0SignatureHash,
LegacySignatureHash,
hash160,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
disconnect_nodes,
softfork_active,
hex_str_to_bytes,
assert_raises_rpc_error,
)
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
SEGWIT_HEIGHT = 120
class UTXO():
"""Used to keep track of anyone-can-spend outputs that we can use in the tests."""
def __init__(self, sha256, n, value):
self.sha256 = sha256
self.n = n
self.nValue = value
def get_p2pkh_script(pubkeyhash):
"""Get the script associated with a P2PKH."""
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key):
"""Add signature for a P2PK witness program."""
tx_hash = SegwitV0SignatureHash(script, tx_to, in_idx, hashtype, value)
signature = key.sign_ecdsa(tx_hash) + chr(hashtype).encode('latin-1')
tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [signature, script]
tx_to.rehash()
def get_virtual_size(witness_block):
"""Calculate the virtual size of a witness block.
Virtual size is base + witness/4."""
base_size = len(witness_block.serialize(with_witness=False))
total_size = len(witness_block.serialize())
# the "+3" is so we round up
vsize = int((3 * base_size + total_size + 3) / 4)
return vsize
def test_transaction_acceptance(node, p2p, tx, with_witness, accepted, reason=None):
"""Send a transaction to the node and check that it's accepted to the mempool
- Submit the transaction over the p2p interface
- use the getrawmempool rpc to check for acceptance."""
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
p2p.send_and_ping(msg_tx(tx) if with_witness else msg_no_witness_tx(tx))
assert_equal(tx.hash in node.getrawmempool(), accepted)
def test_witness_block(node, p2p, block, accepted, with_witness=True, reason=None):
"""Send a block to the node and check that it's accepted
- Submit the block over the p2p interface
- use the getbestblockhash rpc to check for acceptance."""
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
p2p.send_and_ping(msg_block(block) if with_witness else msg_no_witness_block(block))
assert_equal(node.getbestblockhash() == block.hash, accepted)
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.getdataset = set()
# Avoid sending out msg_getdata in the mininode thread as a reply to invs.
# They are not needed and would only lead to races because we send msg_getdata out in the test thread
def on_inv(self, message):
pass
def on_getdata(self, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60, success=True):
with mininode_lock:
self.last_message.pop("getdata", None)
self.send_message(msg_inv(inv=[CInv(MSG_TX, tx.sha256)]))
if success:
self.wait_for_getdata([tx.sha256], timeout)
else:
time.sleep(timeout)
assert not self.last_message.get("getdata")
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.last_message.pop("getheaders", None)
msg = msg_headers()
msg.headers = [CBlockHeader(block)]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(MSG_BLOCK, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata([block.sha256])
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_message["block"].block
class SegWitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [
["-acceptnonstdtxn=1", "-segwitheight={}".format(SEGWIT_HEIGHT), "[email protected]"],
["-acceptnonstdtxn=0", "-segwitheight={}".format(SEGWIT_HEIGHT)],
["-acceptnonstdtxn=1", "-segwitheight=-1"],
]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
self.sync_all()
# Helper functions
def build_next_block(self, version=4):
"""Build a block on top of node0's tip."""
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = version
block.rehash()
return block
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
"""Add list of transactions to block, adds witness commitment, then solves."""
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
def run_test(self):
# Setup the p2p connections
# self.test_node sets NODE_WITNESS|NODE_NETWORK
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
# self.old_node sets only NODE_NETWORK
self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK)
# self.std_node is for testing node1 (fRequireStandard=true)
self.std_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
assert self.test_node.nServices & NODE_WITNESS != 0
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
self.log.info("Starting tests before segwit activation")
self.segwit_active = False
self.test_non_witness_transaction()
self.test_v0_outputs_arent_spendable()
self.test_block_relay()
self.test_getblocktemplate_before_lockin()
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_standardness_v0()
self.log.info("Advancing to segwit activation")
self.advance_to_segwit_active()
# Segwit status 'active'
self.test_p2sh_witness()
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay()
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0()
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness_blinding()
self.test_non_standard_witness()
self.test_upgrade_after_activation()
self.test_witness_sigops()
self.test_superfluous_witness()
# Individual tests
def subtest(func): # noqa: N805
"""Wraps the subtests for logging and state assertions."""
def func_wrapper(self, *args, **kwargs):
self.log.info("Subtest: {} (Segwit active = {})".format(func.__name__, self.segwit_active))
# Assert segwit status is as expected
assert_equal(softfork_active(self.nodes[0], 'segwit'), self.segwit_active)
func(self, *args, **kwargs)
# Each subtest should leave some utxos for the next subtest
assert self.utxo
self.sync_blocks()
# Assert segwit status is as expected at end of subtest
assert_equal(softfork_active(self.nodes[0], 'segwit'), self.segwit_active)
return func_wrapper
@subtest # type: ignore
def test_non_witness_transaction(self):
"""See if sending a regular transaction works, and create a utxo to use in later tests."""
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
block = self.build_next_block(version=1)
block.solve()
self.test_node.send_and_ping(msg_no_witness_block(block)) # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_no_witness_tx(tx).serialize(), msg_tx(tx).serialize())
self.test_node.send_and_ping(msg_tx(tx)) # make sure the block was processed
assert tx.hash in self.nodes[0].getrawmempool()
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49 * 100000000))
self.nodes[0].generate(1)
@subtest # type: ignore
def test_unnecessary_witness_before_segwit_activation(self):
"""Verify that blocks with witnesses are rejected before activation."""
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert tx.sha256 != tx.calc_sha256(with_witness=True)
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(version=(VB_TOP_BITS | (1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, reason='unexpected-witness')
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_and_ping(msg_no_witness_block(block)) # make sure the block was processed
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
@subtest # type: ignore
def test_block_relay(self):
"""Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG.
This is true regardless of segwit activation.
Also test that we don't ask for blocks from unupgraded peers."""
blocktype = 2 | MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert self.test_node.last_message["getdata"].inv[0].type == blocktype
test_witness_block(self.nodes[0], self.test_node, block1, True)
block2 = self.build_next_block(version=4)
block2.solve()
self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
assert self.test_node.last_message["getdata"].inv[0].type == blocktype
test_witness_block(self.nodes[0], self.test_node, block2, True)
block3 = self.build_next_block(version=(VB_TOP_BITS | (1 << 15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert self.test_node.last_message["getdata"].inv[0].type == blocktype
test_witness_block(self.nodes[0], self.test_node, block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if not self.segwit_active:
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height + 1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2 | MSG_WITNESS_FLAG)
assert_equal(block.serialize(), wit_block.serialize())
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert len(block.vtx[0].wit.vtxinwit) == 1
assert len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2 | MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(), block.serialize())
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize()))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3 * len(block.serialize(False)) + len(block.serialize())
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
block4 = self.build_next_block(version=4)
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
# Since 0.14, inv's will only be responded to with a getheaders, so send a header
# to announce this block.
msg = msg_headers()
msg.headers = [CBlockHeader(block4)]
self.old_node.send_message(msg)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert block4.sha256 not in self.old_node.getdataset
@subtest # type: ignore
def test_v0_outputs_arent_spendable(self):
"""Test that v0 outputs aren't spendable before segwit activation.
~6 months after segwit activation, the SCRIPT_VERIFY_WITNESS flag was
backdated so that it applies to all blocks, going back to the genesis
block.
Consequently, version 0 witness outputs are never spendable without
witness, and so can't be spent before segwit activation (the point at which
blocks are permitted to contain witnesses)."""
# node2 doesn't need to be connected for this test.
# (If it's connected, node0 may propagate an invalid block to it over
# compact blocks and the nodes would have inconsistent tips.)
disconnect_nodes(self.nodes[0], 2)
# Create two outputs, a p2wsh and p2sh-p2wsh
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(script_pubkey)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
value = self.utxo[0].nValue // 3
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b'')]
tx.vout = [CTxOut(value, script_pubkey), CTxOut(value, p2sh_script_pubkey)]
tx.vout.append(CTxOut(value, CScript([OP_TRUE])))
tx.rehash()
txid = tx.sha256
# Add it to a block
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# Verify that segwit isn't activated. A block serialized with witness
# should be rejected prior to activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')
# Now send the block without witness. It should be accepted
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=False)
# Now try to spend the outputs. This should fail since SCRIPT_VERIFY_WITNESS is always enabled.
p2wsh_tx = CTransaction()
p2wsh_tx.vin = [CTxIn(COutPoint(txid, 0), b'')]
p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2wsh_tx.rehash()
p2sh_p2wsh_tx = CTransaction()
p2sh_p2wsh_tx.vin = [CTxIn(COutPoint(txid, 1), CScript([script_pubkey]))]
p2sh_p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2sh_p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2sh_p2wsh_tx.rehash()
for tx in [p2wsh_tx, p2sh_p2wsh_tx]:
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# When the block is serialized with a witness, the block will be rejected because witness
# data isn't allowed in blocks that don't commit to witness data.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')
# When the block is serialized without witness, validation fails because the transaction is
# invalid (transactions are always validated with SCRIPT_VERIFY_WITNESS so a segwit v0 transaction
# without a witness is invalid).
# Note: The reject reason for this failure could be
# 'block-validation-failed' (if script check threads > 1) or
# 'non-mandatory-script-verify-flag (Witness program was passed an
# empty witness)' (otherwise).
# TODO: support multiple acceptable reject reasons.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=False)
connect_nodes(self.nodes[0], 2)
self.utxo.pop(0)
self.utxo.append(UTXO(txid, 2, value))
@subtest # type: ignore
def test_getblocktemplate_before_lockin(self):
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules": ["segwit"]})
if node == self.nodes[2]:
# If this is a non-segwit node, we should not get a witness
# commitment.
assert 'default_witness_commitment' not in gbt_results
else:
# For segwit-aware nodes, check the witness
# commitment is correct.
assert 'default_witness_commitment' in gbt_results
witness_commitment = gbt_results['default_witness_commitment']
# Check that default_witness_commitment is present.
witness_root = CBlock.get_merkle_root([ser_uint256(0),
ser_uint256(txid)])
script = get_witness_script(witness_root, 0)
assert_equal(witness_commitment, script.hex())
# Clear out the mempool
self.nodes[0].generate(1)
self.sync_blocks()
@subtest # type: ignore
def test_witness_tx_relay_before_segwit_activation(self):
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert self.old_node.last_message["getdata"].inv[0].type == MSG_TX
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2, success=False)
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.old_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
@subtest # type: ignore
def test_standardness_v0(self):
"""Test V0 txout standardness.
V0 segwit outputs and inputs are always standard.
V0 segwit inputs may only be mined after activation, but not before."""
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue - 1000, p2sh_script_pubkey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
self.sync_blocks()
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue - 10000, script_pubkey)]
tx.vout.append(CTxOut(8000, script_pubkey)) # Might burn this later
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER # Just to have the option to bump this tx from the mempool
tx.rehash()
# This is always accepted, since the mempool policy is to consider segwit as always active
# and thus allow segwit outputs
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=True)
# Now create something that looks like a P2PKH output. This won't be spendable.
script_pubkey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
# tx was accepted, so we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(7000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)
# Now update self.utxo for later tests.
tx3 = CTransaction()
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
self.sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
if not self.segwit_active:
# Just check mempool acceptance, but don't add the transaction to the mempool, since witness is disallowed
# in blocks and the tx is impossible to mine right now.
assert_equal(self.nodes[0].testmempoolaccept([tx3.serialize_with_witness().hex()]), [{'txid': tx3.hash, 'allowed': True}])
# Create the same output as tx3, but by replacing tx
tx3_out = tx3.vout[0]
tx3 = tx
tx3.vout = [tx3_out]
tx3.rehash()
assert_equal(self.nodes[0].testmempoolaccept([tx3.serialize_with_witness().hex()]), [{'txid': tx3.hash, 'allowed': True}])
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
self.sync_blocks()
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
@subtest # type: ignore
def advance_to_segwit_active(self):
"""Mine enough blocks to activate segwit."""
assert not softfork_active(self.nodes[0], 'segwit')
height = self.nodes[0].getblockcount()
self.nodes[0].generate(SEGWIT_HEIGHT - height - 2)
assert not softfork_active(self.nodes[0], 'segwit')
self.nodes[0].generate(1)
assert softfork_active(self.nodes[0], 'segwit')
self.segwit_active = True
@subtest # type: ignore
def test_p2sh_witness(self):
"""Test P2SH wrapped witness programs."""
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
script_pubkey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# Verify mempool acceptance and block validity
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=True)
self.sync_blocks()
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), script_sig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older bitcoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
with self.nodes[0].assert_debug_log(
expected_msgs=(spend_tx.hash, 'was not accepted: non-mandatory-script-verify-flag (Witness program was passed an empty witness)')):
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
with self.nodes[0].assert_debug_log(
expected_msgs=(spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Script evaluated without error but finished with a false/empty top stack element)')):
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = script_sig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [b'a', witness_program]
# Verify mempool acceptance
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=True, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're after activation, then sending this with witnesses should be valid.
# This no longer works before activation, because SCRIPT_VERIFY_WITNESS
# is always set.
# TODO: rewrite this test to make clear that it only works after activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
@subtest # type: ignore
def test_witness_commitments(self):
"""Test witness commitments.
This test can only be run after segwit has activated."""
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert msg_block(block).serialize() != msg_no_witness_block(block).serialize()
# This empty block should be valid.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1]
# This should also be valid.
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=True)
# Now test commitments with actual transactions
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert len(block_3.vtx[0].vout) == 4 # 3 OP_returns
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
test_witness_block(self.nodes[0], self.test_node, block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest # type: ignore
def test_block_malleability(self):
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a' * 5000000)
assert get_virtual_size(block) > MAX_BLOCK_BASE_SIZE
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
assert_equal('bad-witness-nonce-size', self.nodes[0].submitblock(block.serialize().hex()))
assert self.nodes[0].getbestblockhash() != block.hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert get_virtual_size(block) < MAX_BLOCK_BASE_SIZE
assert_equal(None, self.nodes[0].submitblock(block.serialize().hex()))
assert self.nodes[0].getbestblockhash() == block.hash
# Now make sure that malleating the witness reserved value doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(1)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Changing the witness reserved value doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
@subtest # type: ignore
def test_witness_block_size(self):
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert len(self.utxo) > 0
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP] * NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
script_pubkey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value / NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, script_pubkey))
parent_tx.vout[0].nValue -= 50000
assert parent_tx.vout[0].nValue > 0
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize) * 4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes + 1, 55)
block.vtx[-1].wit.vtxinwit[int(i / (2 * NUM_DROPS))].scriptWitness.stack[i % (2 * NUM_DROPS)] = b'a' * (195 + extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert len(block.serialize()) > 2 * 1024 * 1024
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (cur_length - 1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert get_virtual_size(block) == MAX_BLOCK_BASE_SIZE
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
@subtest # type: ignore
def test_submit_block(self):
"""Test that submitblock adds the nonce automatically when possible."""
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
assert_equal('bad-witness-merkle-match', self.nodes[0].submitblock(block.serialize().hex()))
assert self.nodes[0].getbestblockhash() != block.hash
# Now redo commitment with the standard nonce, but let bitcoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
assert_equal(None, self.nodes[0].submitblock(block.serialize().hex()))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
assert_equal('bad-txnmrklroot', self.nodes[0].submitblock(block_2.serialize().hex()))
# Tip should not advance!
assert self.nodes[0].getbestblockhash() != block_2.hash
@subtest # type: ignore
def test_extra_witness_data(self):
"""Test extra witness data in a transaction."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 2000, script_pubkey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program]
tx2.wit.vtxinwit[1].scriptWitness.stack = [CScript([OP_TRUE])]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest # type: ignore
def test_max_witness_push_length(self):
"""Test that witness stack can only allow up to 520 byte pushes."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * (MAX_SCRIPT_ELEMENT_SIZE + 1), witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest # type: ignore
def test_max_witness_program_length(self):
"""Test that witness outputs greater than 10kB can't be spent."""
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a' * MAX_SCRIPT_ELEMENT_SIZE] * 19 + [OP_DROP] * 63 + [OP_TRUE])
assert len(long_witness_program) == MAX_PROGRAM_LENGTH + 1
long_witness_hash = sha256(long_witness_program)
long_script_pubkey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, long_script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a' * MAX_SCRIPT_ELEMENT_SIZE] * 19 + [OP_DROP] * 62 + [OP_TRUE])
assert len(witness_program) == MAX_PROGRAM_LENGTH
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, script_pubkey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest # type: ignore
def test_witness_input_length(self):
"""Test that vin length must match vtxinwit length."""
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
value = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(value / 10), script_pubkey))
tx.vout[0].nValue -= 1000
assert tx.vout[0].nValue >= 0
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(value - 3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest # type: ignore
def test_tx_relay_after_segwit_activation(self):
"""Test transaction relay after segwit activation.
After segwit activates, verify that mempool:
- rejects transactions with unnecessary/extra witnesses
- accepts transactions with valid witnesses
and that witness transactions are relayed to non-upgraded peers."""
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a' * 400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program]
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(MSG_TX, tx2.sha256)]) # wait until tx2 was inv'ed
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv([CInv(MSG_TX, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
weight = len(tx3.serialize_with_witness()) + 3 * len(tx3.serialize_without_witness())
vsize = math.ceil(weight / 4)
assert_equal(raw_tx["vsize"], vsize)
assert_equal(raw_tx["weight"], weight)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], witness_program.hex())
assert vsize != raw_tx["size"]
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest # type: ignore
def test_segwit_versions(self):
"""Test validity of future segwit version transactions.
Future segwit versions are non-standard to spend, but valid in blocks.
Sending to future segwit versions is always allowed.
Can run this before and after segwit activation."""
NUM_SEGWIT_VERSIONS = 17 # will test OP_0, OP1, ..., OP_16
if len(self.utxo) < NUM_SEGWIT_VERSIONS:
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 4000) // NUM_SEGWIT_VERSIONS
for i in range(NUM_SEGWIT_VERSIONS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_SEGWIT_VERSIONS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
self.sync_blocks()
temp_utxo = []
tx = CTransaction()
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16 + 1)) + [OP_0]:
# First try to spend to a future version segwit script_pubkey.
script_pubkey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue - 1000, script_pubkey)]
tx.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
self.sync_blocks()
assert len(self.nodes[0].getrawmempool()) == 0
# Finally, verify that version 0 -> version 1 transactions
# are standard
script_pubkey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue - 1000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
# Gets accepted to both policy-enforcing nodes and others.
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False, reason="reserved for soft-fork upgrades")
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.sync_blocks()
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest # type: ignore
def test_premature_coinbase_witness_spend(self):
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = script_pubkey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
self.sync_blocks()
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=True)
self.sync_blocks()
@subtest # type: ignore
def test_uncompressed_pubkey(self):
"""Test uncompressed pubkey validity in segwit transactions.
Uncompressed pubkeys are no longer supported in default relay policy,
but (for now) are still valid in blocks."""
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = ECKey()
key.generate(False)
pubkey = key.get_pubkey().get_bytes()
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue - 1000, script_pkh))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_wsh = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_wsh))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitV0SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.rehash()
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(script_wsh)
script_p2sh = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([script_wsh])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, script_p2sh))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
script_pubkey = get_p2pkh_script(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), script_sig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue - 1000, script_pubkey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue - 1000, CScript([OP_TRUE])))
(sig_hash, err) = LegacySignatureHash(script_pubkey, tx5, 0, SIGHASH_ALL)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
test_transaction_acceptance(self.nodes[0], self.test_node, tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
@subtest # type: ignore
def test_signature_version_1(self):
key = ECKey()
key.generate()
pubkey = key.get_pubkey().get_bytes()
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.sync_blocks()
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [0, SIGHASH_ANYONECANPAY]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 1000, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue + 1, key)
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Too-small input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue - 1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try correct value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_SIGHASH_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_SIGHASH_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_SIGHASH_TESTS
for i in range(NUM_SIGHASH_TESTS):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_SIGHASH_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_SIGHASH_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert len(temp_utxos) > num_inputs
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_p2pk_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, script_pkh))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitV0SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests by creating two outputs
# that consolidate all the coins in temp_utxos.
output_value = sum(i.nValue for i in temp_utxos) // 2
tx = CTransaction()
index = 0
# Just spend to our usual anyone-can-spend output
tx.vout = [CTxOut(output_value, CScript([OP_TRUE]))] * 2
for i in temp_utxos:
# Use SIGHASH_ALL|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, index, SIGHASH_ALL | SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
@subtest # type: ignore
def test_non_standard_witness_blinding(self):
"""Test behavior of unnecessary witnesses in transactions does not blind the node for the transaction"""
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, False, True)
self.nodes[0].generate(1)
self.sync_blocks()
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness
# doesn't blind a node to a transaction. Transactions
# rejected for having a witness shouldn't be added
# to the rejection cache.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), CScript([p2sh_program])))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * 400]
tx2.rehash()
# This will be rejected due to a policy check:
# No witness is allowed, since it is not a witness program but a p2sh program
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, True, False, 'bad-witness-nonstandard')
# If we send without witness, it should be accepted.
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, False, True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, False, True)
self.nodes[0].generate(1)
self.sync_blocks()
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest # type: ignore
def test_non_standard_witness(self):
"""Test detection of non-standard P2WSH witness"""
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
self.sync_blocks()
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid, i * 2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid, i * 2 + 1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[0], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[3], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[0], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[3], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
self.sync_blocks()
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
@subtest # type: ignore
def test_upgrade_after_activation(self):
"""Test the behavior of starting up a segwit-aware node after the softfork has activated."""
self.restart_node(2, extra_args=["-segwitheight={}".format(SEGWIT_HEIGHT)])
connect_nodes(self.nodes[0], 2)
# We reconnect more than 100 blocks, give it plenty of time
self.sync_blocks(timeout=240)
# Make sure that this peer thinks segwit has activated.
assert softfork_active(self.nodes[2], 'segwit')
# Make sure this peer's blocks match those of node0.
height = self.nodes[2].getblockcount()
while height >= 0:
block_hash = self.nodes[2].getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), self.nodes[2].getblock(block_hash))
height -= 1
@subtest # type: ignore
def test_witness_sigops(self):
"""Test sigop counting is correct inside witnesses."""
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG] * 5 + [OP_CHECKSIG] * 193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
sigops_per_script = 20 * 5 + 193 * 1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert extra_sigops_available < 100 # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
script_pubkey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
script_pubkey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.vout[-2].scriptPubKey = script_pubkey_toomany
tx.vout[-1].scriptPubKey = script_pubkey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
test_witness_block(self.nodes[0], self.test_node, block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs - 1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_toomany]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
script_pubkey_checksigs = CScript([OP_CHECKSIG] * checksig_count)
tx2.vout.append(CTxOut(0, script_pubkey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG] * (checksig_count - 1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_4, accepted=True)
# Reset the tip back down for the next test
self.sync_blocks()
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs - 1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_justright]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_5, accepted=True)
# TODO: test p2sh sigop counting
def test_superfluous_witness(self):
# Serialization of tx that puts witness flag to 3 always
def serialize_with_bogus_witness(tx):
flags = 3
r = b""
r += struct.pack("<i", tx.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(tx.vin)
r += ser_vector(tx.vout)
if flags & 1:
if (len(tx.wit.vtxinwit) != len(tx.vin)):
# vtxinwit must have the same length as vin
tx.wit.vtxinwit = tx.wit.vtxinwit[:len(tx.vin)]
for i in range(len(tx.wit.vtxinwit), len(tx.vin)):
tx.wit.vtxinwit.append(CTxInWitness())
r += tx.wit.serialize()
r += struct.pack("<I", tx.nLockTime)
return r
class msg_bogus_tx(msg_tx):
def serialize(self):
return serialize_with_bogus_witness(self.tx)
self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(address_type='bech32'), 5)
self.nodes[0].generate(1)
unspent = next(u for u in self.nodes[0].listunspent() if u['spendable'] and u['address'].startswith('bcrt'))
raw = self.nodes[0].createrawtransaction([{"txid": unspent['txid'], "vout": unspent['vout']}], {self.nodes[0].getnewaddress(): 1})
tx = FromHex(CTransaction(), raw)
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, serialize_with_bogus_witness(tx).hex())
with self.nodes[0].assert_debug_log(['Superfluous witness record']):
self.nodes[0].p2p.send_and_ping(msg_bogus_tx(tx))
raw = self.nodes[0].signrawtransactionwithwallet(raw)
assert raw['complete']
raw = raw['hex']
tx = FromHex(CTransaction(), raw)
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, serialize_with_bogus_witness(tx).hex())
with self.nodes[0].assert_debug_log(['Unknown transaction optional data']):
self.nodes[0].p2p.send_and_ping(msg_bogus_tx(tx))
if __name__ == '__main__':
SegWitTest().main()
|
mit
|
DMOJ/judge
|
dmoj/tests/test_problem.py
|
1
|
4108
|
import os
import unittest
from unittest import mock
from dmoj.config import InvalidInitException
from dmoj.problem import Problem, ProblemDataManager
class ProblemTest(unittest.TestCase):
def setUp(self):
self.data_patch = mock.patch('dmoj.problem.ProblemDataManager')
data_mock = self.data_patch.start()
data_mock.side_effect = lambda problem: self.problem_data
def test_test_case_matching(self):
class MockProblem(Problem):
def _resolve_archive_files(self):
return None
def _problem_file_list(self):
# fmt: off
return [
's2.1-1.in', 's2.1-1.out',
's2.1.2.in', 's2.1.2.out',
's3.4.in', 's3.4.out',
'5.in', '5.OUT',
'6-1.in', '6-1.OUT',
'6.2.in', '6.2.OUT',
'foo/a.b.c.6.3.in', 'foo/a.b.c.6.3.OUT',
'bar.in.7', 'bar.out.7',
'INPUT8.txt', 'OUTPUT8.txt',
'.DS_Store',
]
# fmt: on
with mock.patch('dmoj.problem.get_problem_root') as gpr:
gpr.return_value = '/proc'
self.problem_data = ProblemDataManager(None)
self.problem_data.update({'init.yml': 'archive: foo.zip'})
problem = MockProblem('test', 2, 16384, {})
self.assertEqual(
problem.config.test_cases.unwrap(),
[
{
'batched': [{'in': 's2.1-1.in', 'out': 's2.1-1.out'}, {'in': 's2.1.2.in', 'out': 's2.1.2.out'}],
'points': 1,
},
{'in': 's3.4.in', 'out': 's3.4.out', 'points': 1},
{'in': '5.in', 'out': '5.OUT', 'points': 1},
{
'batched': [
{'in': '6-1.in', 'out': '6-1.OUT'},
{'in': '6.2.in', 'out': '6.2.OUT'},
{'in': 'foo/a.b.c.6.3.in', 'out': 'foo/a.b.c.6.3.OUT'},
],
'points': 1,
},
{'in': 'bar.in.7', 'out': 'bar.out.7', 'points': 1},
{'in': 'INPUT8.txt', 'out': 'OUTPUT8.txt', 'points': 1},
],
)
def test_no_init(self):
self.problem_data = {}
with mock.patch('dmoj.problem.get_problem_root') as gpr:
gpr.return_value = '/proc'
with self.assertRaises(InvalidInitException):
Problem('test', 2, 16384, {})
def test_empty_init(self):
self.problem_data = {'init.yml': ''}
with mock.patch('dmoj.problem.get_problem_root') as gpr:
gpr.return_value = '/proc'
with self.assertRaisesRegex(InvalidInitException, 'lack of content'):
Problem('test', 2, 16384, {})
def test_bad_init(self):
self.problem_data = {'init.yml': '"'}
with mock.patch('dmoj.problem.get_problem_root') as gpr:
gpr.return_value = '/proc'
with self.assertRaisesRegex(InvalidInitException, 'while scanning a quoted scalar'):
Problem('test', 2, 16384, {})
def test_blank_init(self):
self.problem_data = {'init.yml': 'archive: does_not_exist.txt'}
with mock.patch('dmoj.problem.get_problem_root') as gpr:
gpr.return_value = '/proc'
with self.assertRaisesRegex(InvalidInitException, 'archive file'):
Problem('test', 2, 16384, {})
@unittest.skipIf(os.devnull != '/dev/null', 'os.path.exists("nul") is False on Windows')
def test_bad_archive(self):
self.problem_data = {'init.yml': 'archive: %s' % (os.devnull,)}
with mock.patch('dmoj.problem.get_problem_root') as gpr:
gpr.return_value = '/'
with self.assertRaisesRegex(InvalidInitException, 'bad archive:'):
Problem('test', 2, 16384, {})
def tearDown(self):
self.data_patch.stop()
|
agpl-3.0
|
valtech-mooc/edx-platform
|
common/djangoapps/student/tests/test_roles.py
|
61
|
7708
|
"""
Tests of student.roles
"""
import ddt
from django.test import TestCase
from courseware.tests.factories import UserFactory, StaffFactory, InstructorFactory
from student.tests.factories import AnonymousUserFactory
from student.roles import (
GlobalStaff, CourseRole, CourseStaffRole, CourseInstructorRole,
OrgStaffRole, OrgInstructorRole, RoleCache, CourseBetaTesterRole
)
from opaque_keys.edx.locations import SlashSeparatedCourseKey
class RolesTestCase(TestCase):
"""
Tests of student.roles
"""
def setUp(self):
self.course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
self.course_loc = self.course_key.make_usage_key('course', '2012_Fall')
self.anonymous_user = AnonymousUserFactory()
self.student = UserFactory()
self.global_staff = UserFactory(is_staff=True)
self.course_staff = StaffFactory(course_key=self.course_key)
self.course_instructor = InstructorFactory(course_key=self.course_key)
def test_global_staff(self):
self.assertFalse(GlobalStaff().has_user(self.student))
self.assertFalse(GlobalStaff().has_user(self.course_staff))
self.assertFalse(GlobalStaff().has_user(self.course_instructor))
self.assertTrue(GlobalStaff().has_user(self.global_staff))
def test_group_name_case_sensitive(self):
uppercase_course_id = "ORG/COURSE/NAME"
lowercase_course_id = uppercase_course_id.lower()
uppercase_course_key = SlashSeparatedCourseKey.from_deprecated_string(uppercase_course_id)
lowercase_course_key = SlashSeparatedCourseKey.from_deprecated_string(lowercase_course_id)
role = "role"
lowercase_user = UserFactory()
CourseRole(role, lowercase_course_key).add_users(lowercase_user)
uppercase_user = UserFactory()
CourseRole(role, uppercase_course_key).add_users(uppercase_user)
self.assertTrue(CourseRole(role, lowercase_course_key).has_user(lowercase_user))
self.assertFalse(CourseRole(role, uppercase_course_key).has_user(lowercase_user))
self.assertFalse(CourseRole(role, lowercase_course_key).has_user(uppercase_user))
self.assertTrue(CourseRole(role, uppercase_course_key).has_user(uppercase_user))
def test_course_role(self):
"""
Test that giving a user a course role enables access appropriately
"""
self.assertFalse(
CourseStaffRole(self.course_key).has_user(self.student),
"Student has premature access to {}".format(self.course_key)
)
CourseStaffRole(self.course_key).add_users(self.student)
self.assertTrue(
CourseStaffRole(self.course_key).has_user(self.student),
"Student doesn't have access to {}".format(unicode(self.course_key))
)
# remove access and confirm
CourseStaffRole(self.course_key).remove_users(self.student)
self.assertFalse(
CourseStaffRole(self.course_key).has_user(self.student),
"Student still has access to {}".format(self.course_key)
)
def test_org_role(self):
"""
Test that giving a user an org role enables access appropriately
"""
self.assertFalse(
OrgStaffRole(self.course_key.org).has_user(self.student),
"Student has premature access to {}".format(self.course_key.org)
)
OrgStaffRole(self.course_key.org).add_users(self.student)
self.assertTrue(
OrgStaffRole(self.course_key.org).has_user(self.student),
"Student doesn't have access to {}".format(unicode(self.course_key.org))
)
# remove access and confirm
OrgStaffRole(self.course_key.org).remove_users(self.student)
if hasattr(self.student, '_roles'):
del self.student._roles
self.assertFalse(
OrgStaffRole(self.course_key.org).has_user(self.student),
"Student still has access to {}".format(self.course_key.org)
)
def test_org_and_course_roles(self):
"""
Test that Org and course roles don't interfere with course roles or vice versa
"""
OrgInstructorRole(self.course_key.org).add_users(self.student)
CourseInstructorRole(self.course_key).add_users(self.student)
self.assertTrue(
OrgInstructorRole(self.course_key.org).has_user(self.student),
"Student doesn't have access to {}".format(unicode(self.course_key.org))
)
self.assertTrue(
CourseInstructorRole(self.course_key).has_user(self.student),
"Student doesn't have access to {}".format(unicode(self.course_key))
)
# remove access and confirm
OrgInstructorRole(self.course_key.org).remove_users(self.student)
self.assertFalse(
OrgInstructorRole(self.course_key.org).has_user(self.student),
"Student still has access to {}".format(self.course_key.org)
)
self.assertTrue(
CourseInstructorRole(self.course_key).has_user(self.student),
"Student doesn't have access to {}".format(unicode(self.course_key))
)
# ok now keep org role and get rid of course one
OrgInstructorRole(self.course_key.org).add_users(self.student)
CourseInstructorRole(self.course_key).remove_users(self.student)
self.assertTrue(
OrgInstructorRole(self.course_key.org).has_user(self.student),
"Student lost has access to {}".format(self.course_key.org)
)
self.assertFalse(
CourseInstructorRole(self.course_key).has_user(self.student),
"Student doesn't have access to {}".format(unicode(self.course_key))
)
def test_get_user_for_role(self):
"""
test users_for_role
"""
role = CourseStaffRole(self.course_key)
role.add_users(self.student)
self.assertGreater(len(role.users_with_role()), 0)
def test_add_users_doesnt_add_duplicate_entry(self):
"""
Tests that calling add_users multiple times before a single call
to remove_users does not result in the user remaining in the group.
"""
role = CourseStaffRole(self.course_key)
role.add_users(self.student)
self.assertTrue(role.has_user(self.student))
# Call add_users a second time, then remove just once.
role.add_users(self.student)
role.remove_users(self.student)
self.assertFalse(role.has_user(self.student))
@ddt.ddt
class RoleCacheTestCase(TestCase):
IN_KEY = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
NOT_IN_KEY = SlashSeparatedCourseKey('edX', 'toy', '2013_Fall')
ROLES = (
(CourseStaffRole(IN_KEY), ('staff', IN_KEY, 'edX')),
(CourseInstructorRole(IN_KEY), ('instructor', IN_KEY, 'edX')),
(OrgStaffRole(IN_KEY.org), ('staff', None, 'edX')),
(OrgInstructorRole(IN_KEY.org), ('instructor', None, 'edX')),
(CourseBetaTesterRole(IN_KEY), ('beta_testers', IN_KEY, 'edX')),
)
def setUp(self):
self.user = UserFactory()
@ddt.data(*ROLES)
@ddt.unpack
def test_only_in_role(self, role, target):
role.add_users(self.user)
cache = RoleCache(self.user)
self.assertTrue(cache.has_role(*target))
for other_role, other_target in self.ROLES:
if other_role == role:
continue
self.assertFalse(cache.has_role(*other_target))
@ddt.data(*ROLES)
@ddt.unpack
def test_empty_cache(self, role, target):
cache = RoleCache(self.user)
self.assertFalse(cache.has_role(*target))
|
agpl-3.0
|
webmedic/booker
|
src/plugins/tags/__init__.py
|
1
|
4970
|
from PyQt4 import QtGui, QtCore
import sys, os
import models
from pluginmgr import ShelfView
# This plugin lists the books by tag
EBOOK_EXTENSIONS=['epub','mobi','pdf']
class Catalog(ShelfView):
title = "Books By Tag"
itemText = "Tags"
items = {}
def showList(self, search = None):
"""Get all books from the DB and show them"""
if not self.widget:
print("Call setWidget first")
return
self.operate = self.showList
self.items = {}
css = '''
::item {
padding: 0;
margin: 0;
height: 48;
}
'''
self.widget.title.setText(self.title)
# Setup widgetry
self.widget.stack.setCurrentIndex(0)
self.shelf = QtGui.QListWidget()
# Make it look right
self.shelf.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.shelf.setFrameShape(self.shelf.NoFrame)
self.shelf.setDragEnabled(False)
self.shelf.setSelectionMode(self.shelf.NoSelection)
self.shelf.setStyleSheet(css)
self.shelf.setIconSize(QtCore.QSize(48,48))
# Hook the shelf context menu
self.shelf.customContextMenuRequested.connect(self.shelfContextMenu)
# Hook book editor
self.shelf.itemActivated.connect(self.widget.on_books_itemActivated)
# Fill the shelf
if search:
tags = models.Tag.query.order_by("name").filter(models.Tag.name.like("%%%s%%"%search))
else:
tags = models.Tag.query.order_by("name").all()
for a in tags:
a_item = QtGui.QListWidgetItem(a.name, self.shelf)
for b in a.books:
icon = QtGui.QIcon(QtGui.QPixmap(b.cover()).scaledToHeight(128, QtCore.Qt.SmoothTransformation))
item = QtGui.QListWidgetItem(icon, b.title, self.shelf)
item.book = b
self.items[b.id] = item
self.widget.shelfStack.setWidget(self.shelf)
def showGrid(self, search = None):
"""Get all books from the DB and show them"""
if not self.widget:
print("Call setWidget first")
return
self.operate = self.showGrid
self.items = {}
self.widget.title.setText(self.title)
css = '''
::item {
padding: 0;
margin: 0;
width: 150px;
height: 150px;
}
'''
# Setup widgetry
self.widget.stack.setCurrentIndex(0)
self.shelves = QtGui.QWidget()
self.shelvesLayout = QtGui.QVBoxLayout()
self.shelves.setLayout(self.shelvesLayout)
if search:
tags = models.Tag.query.order_by("name").filter(models.Tag.name.like("%%%s%%"%search))
else:
tags = models.Tag.query.order_by("name").all()
for a in tags:
# Make a shelf
shelf_label = QtGui.QLabel(a.name)
shelf = QtGui.QListWidget()
self.shelvesLayout.addWidget(shelf_label)
self.shelvesLayout.addWidget(shelf)
# Make it look right
shelf.setStyleSheet(css)
shelf.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
shelf.setFrameShape(shelf.NoFrame)
shelf.setIconSize(QtCore.QSize(128,128))
shelf.setViewMode(shelf.IconMode)
shelf.setMinimumHeight(153)
shelf.setMaximumHeight(153)
shelf.setMinimumWidth(153*len(a.books))
shelf.setFlow(shelf.LeftToRight)
shelf.setWrapping(False)
shelf.setDragEnabled(False)
shelf.setSelectionMode(shelf.NoSelection)
# Hook the shelf context menu
shelf.customContextMenuRequested.connect(self.shelfContextMenu)
# Hook book editor
shelf.itemActivated.connect(self.widget.on_books_itemActivated)
# Fill the shelf
for b in a.books:
pixmap = QtGui.QPixmap(b.cover())
if pixmap.isNull():
pixmap = QtGui.QPixmap(b.default_cover())
icon = QtGui.QIcon(pixmap.scaledToHeight(128, QtCore.Qt.SmoothTransformation))
item = QtGui.QListWidgetItem(icon, b.title, shelf)
item.book = b
self.items[b.id] = item
self.shelvesLayout.addStretch(1)
self.widget.shelfStack.setWidget(self.shelves)
def updateBook(self, book):
# This may get called when no books
# have been loaded in this view, so make it cheap
if self.items and book.id in self.items:
item = self.items[book.id]
icon = QtGui.QIcon(QtGui.QPixmap(book.cover()).scaledToHeight(128, QtCore.Qt.SmoothTransformation))
item.setText(book.title)
item.setIcon(icon)
item.book = book
|
mit
|
duncanwp/iris
|
lib/iris/tests/unit/plot/test_points.py
|
11
|
3049
|
# (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.plot.points` function."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
from iris.tests.stock import simple_2d
from iris.tests.unit.plot import TestGraphicStringCoord, MixinCoords
if tests.MPL_AVAILABLE:
import iris.plot as iplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
iplt.points(self.cube, coords=('bar', 'str_coord'))
self.assertBoundsTickLabels('yaxis')
def test_xaxis_labels(self):
iplt.points(self.cube, coords=('str_coord', 'bar'))
self.assertBoundsTickLabels('xaxis')
def test_xaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim(0, 3)
iplt.points(self.cube, coords=('str_coord', 'bar'), axes=ax)
plt.close(fig)
self.assertPointsTickLabels('xaxis', ax)
def test_yaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_ylim(0, 3)
iplt.points(self.cube, coords=('bar', 'str_coord'), axes=ax)
plt.close(fig)
self.assertPointsTickLabels('yaxis', ax)
def test_geoaxes_exception(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
self.assertRaises(TypeError, iplt.points,
self.lat_lon_cube, axes=ax)
plt.close(fig)
@tests.skip_plot
class TestCoords(tests.IrisTest, MixinCoords):
def setUp(self):
# We have a 2d cube with dimensionality (bar: 3; foo: 4)
self.cube = simple_2d(with_bounds=False)
self.foo = self.cube.coord('foo').points
self.foo_index = np.arange(self.foo.size)
self.bar = self.cube.coord('bar').points
self.bar_index = np.arange(self.bar.size)
self.data = None
self.dataT = None
self.mpl_patch = self.patch('matplotlib.pyplot.scatter')
self.draw_func = iplt.points
if __name__ == "__main__":
tests.main()
|
lgpl-3.0
|
zhangjunlei26/servo
|
python/mozlog/mozlog/structured/formatters/machformatter.py
|
45
|
12405
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import time
from collections import defaultdict
try:
import blessings
except ImportError:
blessings = None
import base
def format_seconds(total):
"""Format number of seconds to MM:SS.DD form."""
minutes, seconds = divmod(total, 60)
return '%2d:%05.2f' % (minutes, seconds)
class NullTerminal(object):
def __getattr__(self, name):
return self._id
def _id(self, value):
return value
class MachFormatter(base.BaseFormatter):
def __init__(self, start_time=None, write_interval=False, write_times=True,
terminal=None, disable_colors=False):
if disable_colors:
terminal = None
elif terminal is None and blessings is not None:
terminal = blessings.Terminal()
if start_time is None:
start_time = time.time()
start_time = int(start_time * 1000)
self.start_time = start_time
self.write_interval = write_interval
self.write_times = write_times
self.status_buffer = {}
self.has_unexpected = {}
self.last_time = None
self.terminal = terminal
self.verbose = False
self._known_pids = set()
self.summary_values = {"tests": 0,
"subtests": 0,
"expected": 0,
"unexpected": defaultdict(int),
"skipped": 0}
self.summary_unexpected = []
def __call__(self, data):
s = base.BaseFormatter.__call__(self, data)
if s is None:
return
time = format_seconds(self._time(data))
action = data["action"].upper()
thread = data["thread"]
# Not using the NullTerminal here is a small optimisation to cut the number of
# function calls
if self.terminal is not None:
test = self._get_test_id(data)
time = self.terminal.blue(time)
color = None
if data["action"] == "test_end":
if "expected" not in data and not self.has_unexpected[test]:
color = self.terminal.green
else:
color = self.terminal.red
elif data["action"] in ("suite_start", "suite_end",
"test_start", "test_status"):
color = self.terminal.yellow
elif data["action"] == "crash":
color = self.terminal.red
if color is not None:
action = color(action)
return "%s %s: %s %s\n" % (time, action, thread, s)
def _get_test_id(self, data):
test_id = data.get("test")
if isinstance(test_id, list):
test_id = tuple(test_id)
return test_id
def _get_file_name(self, test_id):
if isinstance(test_id, (str, unicode)):
return test_id
if isinstance(test_id, tuple):
return "".join(test_id)
assert False, "unexpected test_id"
def suite_start(self, data):
self.summary_values = {"tests": 0,
"subtests": 0,
"expected": 0,
"unexpected": defaultdict(int),
"skipped": 0}
self.summary_unexpected = []
return "%i" % len(data["tests"])
def suite_end(self, data):
term = self.terminal if self.terminal is not None else NullTerminal()
heading = "Summary"
rv = ["", heading, "=" * len(heading), ""]
has_subtests = self.summary_values["subtests"] > 0
if has_subtests:
rv.append("Ran %i tests (%i parents, %i subtests)" %
(self.summary_values["tests"] + self.summary_values["subtests"],
self.summary_values["tests"],
self.summary_values["subtests"]))
else:
rv.append("Ran %i tests" % self.summary_values["tests"])
rv.append("Expected results: %i" % self.summary_values["expected"])
unexpected_count = sum(self.summary_values["unexpected"].values())
if unexpected_count > 0:
unexpected_str = " (%s)" % ", ".join("%s: %i" % (key, value) for key, value in
sorted(self.summary_values["unexpected"].items()))
else:
unexpected_str = ""
rv.append("Unexpected results: %i%s" % (unexpected_count, unexpected_str))
if self.summary_values["skipped"] > 0:
rv.append("Skipped: %i" % self.summary_values["skipped"])
rv.append("")
if not self.summary_values["unexpected"]:
rv.append(term.green("OK"))
else:
heading = "Unexpected Results"
rv.extend([heading, "=" * len(heading), ""])
if has_subtests:
for test_id, results in self.summary_unexpected:
test = self._get_file_name(test_id)
rv.extend([test, "-" * len(test)])
for name, status, expected, message in results:
if name is None:
name = "[Parent]"
rv.append("%s %s" % (self.format_expected(status, expected), name))
else:
for test_id, results in self.summary_unexpected:
test = self._get_file_name(test_id)
assert len(results) == 1
name, status, expected, messge = results[0]
assert name is None
rv.append("%s %s" % (self.format_expected(status, expected), test))
return "\n".join(rv)
def format_expected(self, status, expected):
term = self.terminal if self.terminal is not None else NullTerminal()
if status == "ERROR":
color = term.red
else:
color = term.yellow
if expected in ("PASS", "OK"):
return color(status)
return color("%s expected %s" % (status, expected))
def test_start(self, data):
self.summary_values["tests"] += 1
return "%s" % (self._get_test_id(data),)
def test_end(self, data):
subtests = self._get_subtest_data(data)
unexpected = subtests["unexpected"]
message = data.get("message", "")
if "stack" in data:
stack = data["stack"]
if stack and stack[-1] != "\n":
stack += "\n"
message = stack + message
if "expected" in data:
parent_unexpected = True
expected_str = ", expected %s" % data["expected"]
unexpected.append((None, data["status"], data["expected"],
message))
else:
parent_unexpected = False
expected_str = ""
test = self._get_test_id(data)
if unexpected:
self.summary_unexpected.append((test, unexpected))
self._update_summary(data)
#Reset the counts to 0
self.status_buffer[test] = {"count": 0, "unexpected": [], "pass": 0}
self.has_unexpected[test] = bool(unexpected)
if subtests["count"] != 0:
rv = "Harness %s%s. Subtests passed %i/%i. Unexpected %s" % (
data["status"], expected_str, subtests["pass"], subtests["count"],
len(unexpected))
else:
rv = "%s%s" % (data["status"], expected_str)
if unexpected:
rv += "\n"
if len(unexpected) == 1 and parent_unexpected:
rv += "%s" % unexpected[0][-1]
else:
for name, status, expected, message in unexpected:
if name is None:
name = "[Parent]"
expected_str = "Expected %s, got %s" % (expected, status)
rv += "%s\n" % ("\n".join([name, "-" * len(name), expected_str, message]))
rv = rv[:-1]
return rv
def test_status(self, data):
self.summary_values["subtests"] += 1
test = self._get_test_id(data)
if test not in self.status_buffer:
self.status_buffer[test] = {"count": 0, "unexpected": [], "pass": 0}
self.status_buffer[test]["count"] += 1
message = data.get("message", "")
if "stack" in data:
if message:
message += "\n"
message += data["stack"]
if data["status"] == "PASS":
self.status_buffer[test]["pass"] += 1
self._update_summary(data)
rv = None
status, subtest = data["status"], data["subtest"]
unexpected = "expected" in data
if self.verbose:
if self.terminal is not None:
status = (self.terminal.red if unexpected else self.terminal.green)(status)
rv = " ".join([subtest, status, message])
elif unexpected:
# We only append an unexpected summary if it was not logged
# directly by verbose mode.
self.status_buffer[test]["unexpected"].append((subtest,
status,
data["expected"],
message))
return rv
def _update_summary(self, data):
if "expected" in data:
self.summary_values["unexpected"][data["status"]] += 1
elif data["status"] == "SKIP":
self.summary_values["skipped"] += 1
else:
self.summary_values["expected"] += 1
def process_output(self, data):
rv = []
if "command" in data and data["process"] not in self._known_pids:
self._known_pids.add(data["process"])
rv.append('(pid:%s) Full command: %s' % (data["process"], data["command"]))
rv.append('(pid:%s) "%s"' % (data["process"], data["data"]))
return "\n".join(rv)
def crash(self, data):
test = self._get_test_id(data)
if data.get("stackwalk_returncode", 0) != 0 and not data.get("stackwalk_stderr"):
success = True
else:
success = False
rv = ["pid:%s. Test:%s. Minidump anaylsed:%s. Signature:[%s]" %
(data.get("pid", None), test, success, data["signature"])]
if data.get("minidump_path"):
rv.append("Crash dump filename: %s" % data["minidump_path"])
if data.get("stackwalk_returncode", 0) != 0:
rv.append("minidump_stackwalk exited with return code %d" %
data["stackwalk_returncode"])
if data.get("stackwalk_stderr"):
rv.append("stderr from minidump_stackwalk:")
rv.append(data["stackwalk_stderr"])
elif data.get("stackwalk_stdout"):
rv.append(data["stackwalk_stdout"])
if data.get("stackwalk_errors"):
rv.extend(data.get("stackwalk_errors"))
rv = "\n".join(rv)
if not rv[-1] == "\n":
rv += "\n"
return rv
def log(self, data):
level = data.get("level").upper()
if self.terminal is not None:
if level in ("CRITICAL", "ERROR"):
level = self.terminal.red(level)
elif level == "WARNING":
level = self.terminal.yellow(level)
elif level == "INFO":
level = self.terminal.blue(level)
if data.get('component'):
rv = " ".join([data["component"], level, data["message"]])
else:
rv = "%s %s" % (level, data["message"])
if "stack" in data:
rv += "\n%s" % data["stack"]
return rv
def _get_subtest_data(self, data):
test = self._get_test_id(data)
return self.status_buffer.get(test, {"count": 0, "unexpected": [], "pass": 0})
def _time(self, data):
entry_time = data["time"]
if self.write_interval and self.last_time is not None:
t = entry_time - self.last_time
self.last_time = entry_time
else:
t = entry_time - self.start_time
return t / 1000.
|
mpl-2.0
|
harisibrahimkv/django
|
django/db/backends/oracle/compiler.py
|
25
|
2437
|
from django.db import NotSupportedError
from django.db.models.sql import compiler
class SQLCompiler(compiler.SQLCompiler):
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Create the SQL for this query. Return the SQL string and list
of parameters. This is overridden from the original Query class
to handle the additional SQL Oracle requires to emulate LIMIT
and OFFSET.
If 'with_limits' is False, any limit/offset information is not
included in the query.
"""
# The `do_offset` flag indicates whether we need to construct
# the SQL needed to use limit/offset with Oracle.
do_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark)
if not do_offset:
sql, params = super().as_sql(with_limits=False, with_col_aliases=with_col_aliases)
elif not self.connection.features.supports_select_for_update_with_limit and self.query.select_for_update:
raise NotSupportedError(
'LIMIT/OFFSET is not supported with select_for_update on this '
'database backend.'
)
else:
sql, params = super().as_sql(with_limits=False, with_col_aliases=True)
# Wrap the base query in an outer SELECT * with boundaries on
# the "_RN" column. This is the canonical way to emulate LIMIT
# and OFFSET on Oracle.
high_where = ''
if self.query.high_mark is not None:
high_where = 'WHERE ROWNUM <= %d' % (self.query.high_mark,)
if self.query.low_mark:
sql = (
'SELECT * FROM (SELECT "_SUB".*, ROWNUM AS "_RN" FROM (%s) '
'"_SUB" %s) WHERE "_RN" > %d' % (sql, high_where, self.query.low_mark)
)
else:
# Simplify the query to support subqueries if there's no offset.
sql = (
'SELECT * FROM (SELECT "_SUB".* FROM (%s) "_SUB" %s)' % (sql, high_where)
)
return sql, params
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
pass
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
pass
|
bsd-3-clause
|
acrsteiner/three.js
|
utils/exporters/blender/addons/io_three/logger.py
|
176
|
1423
|
import os
import logging
import tempfile
from . import constants
LOG_FILE = None
LOGGER = None
LEVELS = {
constants.DEBUG: logging.DEBUG,
constants.INFO: logging.INFO,
constants.WARNING: logging.WARNING,
constants.ERROR: logging.ERROR,
constants.CRITICAL: logging.CRITICAL
}
def init(filename, level=constants.DEBUG):
"""Initialize the logger.
:param filename: base name of the log file
:param level: logging level (Default value = DEBUG)
"""
global LOG_FILE
LOG_FILE = os.path.join(tempfile.gettempdir(), filename)
with open(LOG_FILE, 'w'):
pass
global LOGGER
LOGGER = logging.getLogger('Three.Export')
LOGGER.setLevel(LEVELS[level])
if not LOGGER.handlers:
stream = logging.StreamHandler()
stream.setLevel(LEVELS[level])
format_ = '%(asctime)s - %(name)s - %(levelname)s: %(message)s'
formatter = logging.Formatter(format_)
stream.setFormatter(formatter)
file_handler = logging.FileHandler(LOG_FILE)
file_handler.setLevel(LEVELS[level])
file_handler.setFormatter(formatter)
LOGGER.addHandler(stream)
LOGGER.addHandler(file_handler)
def info(*args):
LOGGER.info(*args)
def debug(*args):
LOGGER.debug(*args)
def warning(*args):
LOGGER.warning(*args)
def error(*args):
LOGGER.error(*args)
def critical(*args):
LOGGER.critical(*args)
|
mit
|
timlinux/QGIS
|
tests/src/python/test_qgspointcloudattributebyramprenderer.py
|
1
|
17855
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsPointCloudAttributeByRampRenderer
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '09/11/2020'
__copyright__ = 'Copyright 2020, The QGIS Project'
import qgis # NOQA
from qgis.core import (
QgsProviderRegistry,
QgsPointCloudLayer,
QgsPointCloudAttributeByRampRenderer,
QgsReadWriteContext,
QgsRenderContext,
QgsPointCloudRenderContext,
QgsVector3D,
QgsMultiRenderChecker,
QgsMapSettings,
QgsRectangle,
QgsUnitTypes,
QgsMapUnitScale,
QgsCoordinateReferenceSystem,
QgsDoubleRange,
QgsColorRampShader,
QgsStyle,
QgsLayerTreeLayer
)
from qgis.PyQt.QtCore import QDir, QSize, Qt
from qgis.PyQt.QtGui import QPainter
from qgis.PyQt.QtXml import QDomDocument
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
start_app()
class TestQgsPointCloudAttributeByRampRenderer(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.report = "<h1>Python QgsPointCloudAttributeByRampRenderer Tests</h1>\n"
@classmethod
def tearDownClass(cls):
report_file_path = "%s/qgistest.html" % QDir.tempPath()
with open(report_file_path, 'a') as report_file:
report_file.write(cls.report)
@unittest.skipIf('ept' not in QgsProviderRegistry.instance().providerList(), 'EPT provider not available')
def testSetLayer(self):
layer = QgsPointCloudLayer(unitTestDataPath() + '/point_clouds/ept/norgb/ept.json', 'test', 'ept')
self.assertTrue(layer.isValid())
# test that a point cloud with no RGB attributes is automatically assigned the ramp renderer
self.assertIsInstance(layer.renderer(), QgsPointCloudAttributeByRampRenderer)
# check default range
self.assertAlmostEqual(layer.renderer().minimum(), -1.98, 6)
self.assertAlmostEqual(layer.renderer().maximum(), -1.92, 6)
def testBasic(self):
renderer = QgsPointCloudAttributeByRampRenderer()
renderer.setAttribute('attr')
self.assertEqual(renderer.attribute(), 'attr')
renderer.setMinimum(5)
self.assertEqual(renderer.minimum(), 5)
renderer.setMaximum(15)
self.assertEqual(renderer.maximum(), 15)
ramp = QgsStyle.defaultStyle().colorRamp("Viridis")
shader = QgsColorRampShader(20, 30, ramp)
renderer.setColorRampShader(shader)
self.assertEqual(renderer.colorRampShader().minimumValue(), 20)
self.assertEqual(renderer.colorRampShader().maximumValue(), 30)
renderer.setMaximumScreenError(18)
renderer.setMaximumScreenErrorUnit(QgsUnitTypes.RenderInches)
renderer.setPointSize(13)
renderer.setPointSizeUnit(QgsUnitTypes.RenderPoints)
renderer.setPointSizeMapUnitScale(QgsMapUnitScale(1000, 2000))
rr = renderer.clone()
self.assertEqual(rr.maximumScreenError(), 18)
self.assertEqual(rr.maximumScreenErrorUnit(), QgsUnitTypes.RenderInches)
self.assertEqual(rr.pointSize(), 13)
self.assertEqual(rr.pointSizeUnit(), QgsUnitTypes.RenderPoints)
self.assertEqual(rr.pointSizeMapUnitScale().minScale, 1000)
self.assertEqual(rr.pointSizeMapUnitScale().maxScale, 2000)
self.assertEqual(rr.attribute(), 'attr')
self.assertEqual(rr.minimum(), 5)
self.assertEqual(rr.maximum(), 15)
self.assertEqual(rr.colorRampShader().minimumValue(), 20)
self.assertEqual(rr.colorRampShader().maximumValue(), 30)
self.assertEqual(rr.colorRampShader().sourceColorRamp().color1().name(),
renderer.colorRampShader().sourceColorRamp().color1().name())
self.assertEqual(rr.colorRampShader().sourceColorRamp().color2().name(),
renderer.colorRampShader().sourceColorRamp().color2().name())
doc = QDomDocument("testdoc")
elem = renderer.save(doc, QgsReadWriteContext())
r2 = QgsPointCloudAttributeByRampRenderer.create(elem, QgsReadWriteContext())
self.assertEqual(r2.maximumScreenError(), 18)
self.assertEqual(r2.maximumScreenErrorUnit(), QgsUnitTypes.RenderInches)
self.assertEqual(r2.pointSize(), 13)
self.assertEqual(r2.pointSizeUnit(), QgsUnitTypes.RenderPoints)
self.assertEqual(r2.pointSizeMapUnitScale().minScale, 1000)
self.assertEqual(r2.pointSizeMapUnitScale().maxScale, 2000)
self.assertEqual(r2.attribute(), 'attr')
self.assertEqual(r2.minimum(), 5)
self.assertEqual(r2.maximum(), 15)
self.assertEqual(r2.colorRampShader().minimumValue(), 20)
self.assertEqual(r2.colorRampShader().maximumValue(), 30)
self.assertEqual(r2.colorRampShader().sourceColorRamp().color1().name(),
renderer.colorRampShader().sourceColorRamp().color1().name())
self.assertEqual(r2.colorRampShader().sourceColorRamp().color2().name(),
renderer.colorRampShader().sourceColorRamp().color2().name())
def testUsedAttributes(self):
renderer = QgsPointCloudAttributeByRampRenderer()
renderer.setAttribute('attr')
rc = QgsRenderContext()
prc = QgsPointCloudRenderContext(rc, QgsVector3D(), QgsVector3D())
self.assertEqual(renderer.usedAttributes(prc), {'attr'})
def testLegend(self):
renderer = QgsPointCloudAttributeByRampRenderer()
renderer.setAttribute('Intensity')
renderer.setMinimum(200)
renderer.setMaximum(800)
ramp = QgsStyle.defaultStyle().colorRamp("Viridis")
shader = QgsColorRampShader(200, 800, ramp.clone())
shader.setClassificationMode(QgsColorRampShader.EqualInterval)
shader.classifyColorRamp(classes=4)
renderer.setColorRampShader(shader)
layer = QgsPointCloudLayer(unitTestDataPath() + '/point_clouds/ept/sunshine-coast/ept.json', 'test', 'ept')
layer_tree_layer = QgsLayerTreeLayer(layer)
nodes = renderer.createLegendNodes(layer_tree_layer)
self.assertEqual(len(nodes), 4)
self.assertEqual(nodes[0].data(Qt.DisplayRole), '200')
self.assertEqual(nodes[1].data(Qt.DisplayRole), '400')
self.assertEqual(nodes[2].data(Qt.DisplayRole), '600')
self.assertEqual(nodes[3].data(Qt.DisplayRole), '800')
shader = QgsColorRampShader(200, 600, ramp.clone())
shader.setClassificationMode(QgsColorRampShader.EqualInterval)
shader.classifyColorRamp(classes=2)
renderer.setColorRampShader(shader)
nodes = renderer.createLegendNodes(layer_tree_layer)
self.assertEqual(len(nodes), 2)
self.assertEqual(nodes[0].data(Qt.DisplayRole), '200')
self.assertEqual(nodes[1].data(Qt.DisplayRole), '600')
@unittest.skipIf('ept' not in QgsProviderRegistry.instance().providerList(), 'EPT provider not available')
def testRender(self):
layer = QgsPointCloudLayer(unitTestDataPath() + '/point_clouds/ept/sunshine-coast/ept.json', 'test', 'ept')
self.assertTrue(layer.isValid())
renderer = QgsPointCloudAttributeByRampRenderer()
renderer.setAttribute('Intensity')
renderer.setMinimum(200)
renderer.setMaximum(1000)
ramp = QgsStyle.defaultStyle().colorRamp("Viridis")
shader = QgsColorRampShader(200, 1000, ramp)
shader.classifyColorRamp()
renderer.setColorRampShader(shader)
layer.setRenderer(renderer)
layer.renderer().setPointSize(2)
layer.renderer().setPointSizeUnit(QgsUnitTypes.RenderMillimeters)
mapsettings = QgsMapSettings()
mapsettings.setOutputSize(QSize(400, 400))
mapsettings.setOutputDpi(96)
mapsettings.setDestinationCrs(layer.crs())
mapsettings.setExtent(QgsRectangle(498061, 7050991, 498069, 7050999))
mapsettings.setLayers([layer])
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('pointcloudrenderer')
renderchecker.setControlName('expected_ramp_render')
result = renderchecker.runTest('expected_ramp_render')
TestQgsPointCloudAttributeByRampRenderer.report += renderchecker.report()
self.assertTrue(result)
@unittest.skipIf('ept' not in QgsProviderRegistry.instance().providerList(), 'EPT provider not available')
def testRenderX(self):
layer = QgsPointCloudLayer(unitTestDataPath() + '/point_clouds/ept/sunshine-coast/ept.json', 'test', 'ept')
self.assertTrue(layer.isValid())
renderer = QgsPointCloudAttributeByRampRenderer()
renderer.setAttribute('X')
renderer.setMinimum(498062.00000)
renderer.setMaximum(498067.39000)
ramp = QgsStyle.defaultStyle().colorRamp("Viridis")
shader = QgsColorRampShader(498062.00000, 498067.39000, ramp)
shader.classifyColorRamp()
renderer.setColorRampShader(shader)
layer.setRenderer(renderer)
layer.renderer().setPointSize(2)
layer.renderer().setPointSizeUnit(QgsUnitTypes.RenderMillimeters)
mapsettings = QgsMapSettings()
mapsettings.setOutputSize(QSize(400, 400))
mapsettings.setOutputDpi(96)
mapsettings.setDestinationCrs(layer.crs())
mapsettings.setExtent(QgsRectangle(498061, 7050991, 498069, 7050999))
mapsettings.setLayers([layer])
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('pointcloudrenderer')
renderchecker.setControlName('expected_ramp_xrender')
result = renderchecker.runTest('expected_ramp_xrender')
TestQgsPointCloudAttributeByRampRenderer.report += renderchecker.report()
self.assertTrue(result)
@unittest.skipIf('ept' not in QgsProviderRegistry.instance().providerList(), 'EPT provider not available')
def testRenderY(self):
layer = QgsPointCloudLayer(unitTestDataPath() + '/point_clouds/ept/sunshine-coast/ept.json', 'test', 'ept')
self.assertTrue(layer.isValid())
renderer = QgsPointCloudAttributeByRampRenderer()
renderer.setAttribute('Y')
renderer.setMinimum(7050992.84000)
renderer.setMaximum(7050997.04000)
ramp = QgsStyle.defaultStyle().colorRamp("Viridis")
shader = QgsColorRampShader(7050992.84000, 7050997.04000, ramp)
shader.classifyColorRamp()
renderer.setColorRampShader(shader)
layer.setRenderer(renderer)
layer.renderer().setPointSize(2)
layer.renderer().setPointSizeUnit(QgsUnitTypes.RenderMillimeters)
mapsettings = QgsMapSettings()
mapsettings.setOutputSize(QSize(400, 400))
mapsettings.setOutputDpi(96)
mapsettings.setDestinationCrs(layer.crs())
mapsettings.setExtent(QgsRectangle(498061, 7050991, 498069, 7050999))
mapsettings.setLayers([layer])
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('pointcloudrenderer')
renderchecker.setControlName('expected_ramp_yrender')
result = renderchecker.runTest('expected_ramp_yrender')
TestQgsPointCloudAttributeByRampRenderer.report += renderchecker.report()
self.assertTrue(result)
@unittest.skipIf('ept' not in QgsProviderRegistry.instance().providerList(), 'EPT provider not available')
def testRenderZ(self):
layer = QgsPointCloudLayer(unitTestDataPath() + '/point_clouds/ept/sunshine-coast/ept.json', 'test', 'ept')
self.assertTrue(layer.isValid())
renderer = QgsPointCloudAttributeByRampRenderer()
renderer.setAttribute('Z')
renderer.setMinimum(74.34000)
renderer.setMaximum(75)
ramp = QgsStyle.defaultStyle().colorRamp("Viridis")
shader = QgsColorRampShader(74.34000, 75, ramp)
shader.classifyColorRamp()
renderer.setColorRampShader(shader)
layer.setRenderer(renderer)
layer.renderer().setPointSize(2)
layer.renderer().setPointSizeUnit(QgsUnitTypes.RenderMillimeters)
mapsettings = QgsMapSettings()
mapsettings.setOutputSize(QSize(400, 400))
mapsettings.setOutputDpi(96)
mapsettings.setDestinationCrs(layer.crs())
mapsettings.setExtent(QgsRectangle(498061, 7050991, 498069, 7050999))
mapsettings.setLayers([layer])
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('pointcloudrenderer')
renderchecker.setControlName('expected_ramp_zrender')
result = renderchecker.runTest('expected_ramp_zrender')
TestQgsPointCloudAttributeByRampRenderer.report += renderchecker.report()
self.assertTrue(result)
@unittest.skipIf('ept' not in QgsProviderRegistry.instance().providerList(), 'EPT provider not available')
def testRenderCrsTransform(self):
layer = QgsPointCloudLayer(unitTestDataPath() + '/point_clouds/ept/sunshine-coast/ept.json', 'test', 'ept')
self.assertTrue(layer.isValid())
renderer = QgsPointCloudAttributeByRampRenderer()
renderer.setAttribute('Intensity')
renderer.setMinimum(200)
renderer.setMaximum(1000)
ramp = QgsStyle.defaultStyle().colorRamp("Viridis")
shader = QgsColorRampShader(200, 1000, ramp)
shader.classifyColorRamp()
renderer.setColorRampShader(shader)
layer.setRenderer(renderer)
layer.renderer().setPointSize(2)
layer.renderer().setPointSizeUnit(QgsUnitTypes.RenderMillimeters)
mapsettings = QgsMapSettings()
mapsettings.setOutputSize(QSize(400, 400))
mapsettings.setOutputDpi(96)
mapsettings.setDestinationCrs(QgsCoordinateReferenceSystem('EPSG:4326'))
mapsettings.setExtent(QgsRectangle(152.980508492, -26.662023491, 152.980586020, -26.662071137))
mapsettings.setLayers([layer])
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('pointcloudrenderer')
renderchecker.setControlName('expected_ramp_render_crs_transform')
result = renderchecker.runTest('expected_ramp_render_crs_transform')
TestQgsPointCloudAttributeByRampRenderer.report += renderchecker.report()
self.assertTrue(result)
@unittest.skipIf('ept' not in QgsProviderRegistry.instance().providerList(), 'EPT provider not available')
def testRenderPointSize(self):
layer = QgsPointCloudLayer(unitTestDataPath() + '/point_clouds/ept/sunshine-coast/ept.json', 'test', 'ept')
self.assertTrue(layer.isValid())
renderer = QgsPointCloudAttributeByRampRenderer()
renderer.setAttribute('Intensity')
renderer.setMinimum(200)
renderer.setMaximum(1000)
ramp = QgsStyle.defaultStyle().colorRamp("Viridis")
shader = QgsColorRampShader(200, 1000, ramp)
shader.classifyColorRamp()
renderer.setColorRampShader(shader)
layer.setRenderer(renderer)
layer.renderer().setPointSize(.15)
layer.renderer().setPointSizeUnit(QgsUnitTypes.RenderMapUnits)
mapsettings = QgsMapSettings()
mapsettings.setOutputSize(QSize(400, 400))
mapsettings.setOutputDpi(96)
mapsettings.setDestinationCrs(layer.crs())
mapsettings.setExtent(QgsRectangle(498061, 7050991, 498069, 7050999))
mapsettings.setLayers([layer])
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('pointcloudrenderer')
renderchecker.setControlName('expected_ramp_pointsize')
result = renderchecker.runTest('expected_ramp_pointsize')
TestQgsPointCloudAttributeByRampRenderer.report += renderchecker.report()
self.assertTrue(result)
@unittest.skipIf('ept' not in QgsProviderRegistry.instance().providerList(), 'EPT provider not available')
def testRenderZRange(self):
layer = QgsPointCloudLayer(unitTestDataPath() + '/point_clouds/ept/sunshine-coast/ept.json', 'test', 'ept')
self.assertTrue(layer.isValid())
renderer = QgsPointCloudAttributeByRampRenderer()
renderer.setAttribute('Intensity')
renderer.setMinimum(200)
renderer.setMaximum(1000)
ramp = QgsStyle.defaultStyle().colorRamp("Viridis")
shader = QgsColorRampShader(200, 1000, ramp)
shader.classifyColorRamp()
renderer.setColorRampShader(shader)
layer.setRenderer(renderer)
layer.renderer().setPointSize(2)
layer.renderer().setPointSizeUnit(QgsUnitTypes.RenderMillimeters)
mapsettings = QgsMapSettings()
mapsettings.setOutputSize(QSize(400, 400))
mapsettings.setOutputDpi(96)
mapsettings.setDestinationCrs(layer.crs())
mapsettings.setExtent(QgsRectangle(498061, 7050991, 498069, 7050999))
mapsettings.setLayers([layer])
mapsettings.setZRange(QgsDoubleRange(74.7, 75))
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(mapsettings)
renderchecker.setControlPathPrefix('pointcloudrenderer')
renderchecker.setControlName('expected_ramp_zfilter')
result = renderchecker.runTest('expected_ramp_zfilter')
TestQgsPointCloudAttributeByRampRenderer.report += renderchecker.report()
self.assertTrue(result)
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
|
maartenq/ansible
|
lib/ansible/modules/network/eos/eos_vlan.py
|
25
|
11418
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: eos_vlan
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage VLANs on Arista EOS network devices
description:
- This module provides declarative management of VLANs
on Arista EOS network devices.
notes:
- Tested against EOS 4.15
options:
name:
description:
- Name of the VLAN.
vlan_id:
description:
- ID of the VLAN.
required: true
interfaces:
description:
- List of interfaces that should be associated to the VLAN. The name of interface is
case sensitive and should be in expanded format and not abbreviated.
associated_interfaces:
description:
- This is a intent option and checks the operational state of the for given vlan C(name)
for associated interfaces. The name of interface is case sensitive and should be in
expanded format and not abbreviated. If the value in the C(associated_interfaces)
does not match with the operational state of vlan interfaces on device it will result in failure.
version_added: "2.5"
delay:
description:
- Delay the play should wait to check for declarative intent params values.
default: 10
aggregate:
description: List of VLANs definitions.
purge:
description:
- Purge VLANs not defined in the I(aggregate) parameter.
default: no
state:
description:
- State of the VLAN configuration.
default: present
choices: ['present', 'absent', 'active', 'suspend']
extends_documentation_fragment: eos
"""
EXAMPLES = """
- name: Create vlan
eos_vlan:
vlan_id: 4000
name: vlan-4000
state: present
- name: Add interfaces to vlan
eos_vlan:
vlan_id: 4000
state: present
interfaces:
- Ethernet1
- Ethernet2
- name: Check if interfaces is assigned to vlan
eos_vlan:
vlan_id: 4000
associated_interfaces:
- Ethernet1
- Ethernet2
- name: Suspend vlan
eos_vlan:
vlan_id: 4000
state: suspend
- name: Unsuspend vlan
eos_vlan:
vlan_id: 4000
state: active
- name: Create aggregate of vlans
eos_vlan:
aggregate:
- vlan_id: 4000
- {vlan_id: 4001, name: vlan-4001}
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- vlan 20
- name test-vlan
"""
import re
import time
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.eos.eos import load_config, run_commands
from ansible.module_utils.network.eos.eos import eos_argument_spec, check_args
def search_obj_in_list(vlan_id, lst):
for o in lst:
if o['vlan_id'] == vlan_id:
return o
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
purge = module.params['purge']
for w in want:
vlan_id = w['vlan_id']
name = w['name']
state = w['state']
interfaces = w['interfaces']
obj_in_have = search_obj_in_list(vlan_id, have)
if state == 'absent':
if obj_in_have:
commands.append('no vlan %s' % w['vlan_id'])
elif state == 'present':
if not obj_in_have:
commands.append('vlan %s' % w['vlan_id'])
if w['name']:
commands.append('name %s' % w['name'])
if w['interfaces']:
for i in w['interfaces']:
commands.append('interface %s' % i)
commands.append('switchport access vlan %s' % w['vlan_id'])
else:
if w['name'] and w['name'] != obj_in_have['name']:
commands.append('vlan %s' % w['vlan_id'])
commands.append('name %s' % w['name'])
if w['interfaces']:
if not obj_in_have['interfaces']:
for i in w['interfaces']:
commands.append('vlan %s' % w['vlan_id'])
commands.append('interface %s' % i)
commands.append('switchport access vlan %s' % w['vlan_id'])
elif set(w['interfaces']) != obj_in_have['interfaces']:
missing_interfaces = list(set(w['interfaces']) - set(obj_in_have['interfaces']))
for i in missing_interfaces:
commands.append('vlan %s' % w['vlan_id'])
commands.append('interface %s' % i)
commands.append('switchport access vlan %s' % w['vlan_id'])
superfluous_interfaces = list(set(obj_in_have['interfaces']) - set(w['interfaces']))
for i in superfluous_interfaces:
commands.append('vlan %s' % w['vlan_id'])
commands.append('interface %s' % i)
commands.append('no switchport access vlan %s' % w['vlan_id'])
else:
if not obj_in_have:
commands.append('vlan %s' % w['vlan_id'])
if w['name']:
commands.append('name %s' % w['name'])
commands.append('state %s' % w['state'])
elif (w['name'] and obj_in_have['name'] != w['name']) or obj_in_have['state'] != w['state']:
commands.append('vlan %s' % w['vlan_id'])
if w['name']:
if obj_in_have['name'] != w['name']:
commands.append('name %s' % w['name'])
if obj_in_have['state'] != w['state']:
commands.append('state %s' % w['state'])
if purge:
for h in have:
obj_in_want = search_obj_in_list(h['vlan_id'], want)
if not obj_in_want and h['vlan_id'] != '1':
commands.append('no vlan %s' % h['vlan_id'])
return commands
def map_config_to_obj(module):
objs = []
vlans = run_commands(module, ['show vlan configured-ports | json'])
for vlan in vlans[0]['vlans']:
obj = {}
obj['vlan_id'] = vlan
obj['name'] = vlans[0]['vlans'][vlan]['name']
obj['state'] = vlans[0]['vlans'][vlan]['status']
obj['interfaces'] = []
interfaces = vlans[0]['vlans'][vlan]
for interface in interfaces['interfaces']:
obj['interfaces'].append(interface)
if obj['state'] == 'suspended':
obj['state'] = 'suspend'
objs.append(obj)
return objs
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
if item.get('interfaces'):
item['interfaces'] = [intf.replace(" ", "") for intf in item.get('interfaces') if intf]
if item.get('associated_interfaces'):
item['associated_interfaces'] = [intf.replace(" ", "") for intf in item.get('associated_interfaces') if intf]
d = item.copy()
d['vlan_id'] = str(d['vlan_id'])
obj.append(d)
else:
obj.append({
'vlan_id': str(module.params['vlan_id']),
'name': module.params['name'],
'state': module.params['state'],
'interfaces': [intf.replace(" ", "") for intf in module.params['interfaces']] if module.params['interfaces'] else [],
'associated_interfaces': [intf.replace(" ", "") for intf in
module.params['associated_interfaces']] if module.params['associated_interfaces'] else []
})
return obj
def check_declarative_intent_params(want, module, result):
have = None
is_delay = False
for w in want:
if w.get('associated_interfaces') is None:
continue
if result['changed'] and not is_delay:
time.sleep(module.params['delay'])
is_delay = True
if have is None:
have = map_config_to_obj(module)
for i in w['associated_interfaces']:
obj_in_have = search_obj_in_list(w['vlan_id'], have)
if obj_in_have and 'interfaces' in obj_in_have and i not in obj_in_have['interfaces']:
module.fail_json(msg="Interface %s not configured on vlan %s" % (i, w['vlan_id']))
def main():
""" main entry point for module execution
"""
element_spec = dict(
vlan_id=dict(type='int'),
name=dict(),
interfaces=dict(type='list'),
associated_interfaces=dict(type='list'),
delay=dict(default=10, type='int'),
state=dict(default='present',
choices=['present', 'absent', 'active', 'suspend'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['vlan_id'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
purge=dict(default=False, type='bool')
)
argument_spec.update(element_spec)
argument_spec.update(eos_argument_spec)
required_one_of = [['vlan_id', 'aggregate']]
mutually_exclusive = [['vlan_id', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
if response.get('diff') and module._diff:
result['diff'] = {'prepared': response.get('diff')}
result['session_name'] = response.get('session')
result['changed'] = True
check_declarative_intent_params(want, module, result)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
mattcongy/itshop
|
docker-images/taigav2/taiga-back/taiga/mdrender/templatetags/functions.py
|
2
|
1172
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2016 Jesรบs Espino <[email protected]>
# Copyright (C) 2014-2016 David Barragรกn <[email protected]>
# Copyright (C) 2014-2016 Alejandro Alonso <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django_jinja import library
from jinja2 import Markup
from taiga.mdrender.service import render
@library.global_function
def mdrender(project, text) -> str:
if text:
return Markup(render(project, text))
return ""
|
mit
|
bop/hybrid
|
lib/python2.6/site-packages/django/contrib/admin/util.py
|
101
|
15254
|
from __future__ import unicode_literals
import datetime
import decimal
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.related import RelatedObject
from django.forms.forms import pretty_name
from django.utils import formats
from django.utils.html import format_html
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.encoding import force_str, force_text, smart_text
from django.utils import six
from django.utils.translation import ungettext
from django.core.urlresolvers import reverse
def lookup_needs_distinct(opts, lookup_path):
"""
Returns True if 'distinct()' should be used to query the given lookup path.
"""
field_name = lookup_path.split('__', 1)[0]
field = opts.get_field_by_name(field_name)[0]
if ((hasattr(field, 'rel') and
isinstance(field.rel, models.ManyToManyRel)) or
(isinstance(field, models.related.RelatedObject) and
not field.field.unique)):
return True
return False
def prepare_lookup_value(key, value):
"""
Returns a lookup value prepared to be used in queryset filtering.
"""
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
value = value.split(',')
# if key ends with __isnull, special case '' and false
if key.endswith('__isnull'):
if value.lower() in ('', 'false'):
value = False
else:
value = True
return value
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' and similarly problematic characters.
Similar to urllib.quote, except that the quoting is slightly different so
that it doesn't get automatically unquoted by the Web browser.
"""
if not isinstance(s, six.string_types):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"<>%\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for name, opts in fieldsets:
for field in opts['fields']:
# type checking feels dirty, but it seems like the best way here
if type(field) == tuple:
field_names.extend(field)
else:
field_names.append(field)
return field_names
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogenous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
collector = NestedObjects(using=using)
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
if has_admin:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.object_name.lower()),
None, (quote(obj._get_pk_val()),))
p = '%s.%s' % (opts.app_label,
opts.get_delete_permission())
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return format_html('{0}: <a href="{1}">{2}</a>',
capfirst(opts.verbose_name),
admin_url,
obj)
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
return to_delete, perms_needed, protected
class NestedObjects(Collector):
def __init__(self, *args, **kwargs):
super(NestedObjects, self).__init__(*args, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
self.protected = set()
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source_attr=None, **kwargs):
for obj in objs:
if source_attr:
self.add_edge(getattr(obj, source_attr), obj)
else:
self.add_edge(None, obj)
try:
return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
except models.ProtectedError as e:
self.protected.update(e.protected_objects)
def related_objects(self, related, objs):
qs = super(NestedObjects, self).related_objects(related, objs)
return qs.select_related(related.field.name)
def _nested(self, obj, seen, format_callback):
if obj in seen:
return []
seen.add(obj)
children = []
for child in self.edges.get(obj, ()):
children.extend(self._nested(child, seen, format_callback))
if format_callback:
ret = [format_callback(obj)]
else:
ret = [obj]
if children:
ret.append(children)
return ret
def nested(self, format_callback=None):
"""
Return the graph as a nested list.
"""
seen = set()
roots = []
for root in self.edges.get(None, ()):
roots.extend(self._nested(root, seen, format_callback))
return roots
def can_fast_delete(self, *args, **kwargs):
"""
We always want to load the objects into memory so that we can display
them to the user in confirm page.
"""
return False
def model_format_dict(obj):
"""
Return a `dict` with keys 'verbose_name' and 'verbose_name_plural',
typically for use with string formatting.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
"""
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_text(opts.verbose_name),
'verbose_name_plural': force_text(opts.verbose_name_plural)
}
def model_ngettext(obj, n=None):
"""
Return the appropriate `verbose_name` or `verbose_name_plural` value for
`obj` depending on the count `n`.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
If `obj` is a `QuerySet` instance, `n` is optional and the length of the
`QuerySet` is used.
"""
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = opts.get_field(name)
except models.FieldDoesNotExist:
# For non-field values, the value is either a method, property or
# returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif (model_admin is not None and hasattr(model_admin, name) and
not name == '__str__' and not name == '__unicode__'):
attr = getattr(model_admin, name)
value = attr(obj)
else:
attr = getattr(obj, name)
if callable(attr):
value = attr()
else:
value = attr
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def label_for_field(name, model, model_admin=None, return_attr=False):
"""
Returns a sensible label for a field name. The name can be a callable or the
name of an object attributes, as well as a genuine fields. If return_attr is
True, the resolved attribute (which could be a callable) is also returned.
This will be None if (and only if) the name refers to a field.
"""
attr = None
try:
field = model._meta.get_field_by_name(name)[0]
if isinstance(field, RelatedObject):
label = field.opts.verbose_name
else:
label = field.verbose_name
except models.FieldDoesNotExist:
if name == "__unicode__":
label = force_text(model._meta.verbose_name)
attr = six.text_type
elif name == "__str__":
label = force_str(model._meta.verbose_name)
attr = bytes
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__class__.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
if return_attr:
return (label, attr)
else:
return label
def help_text_for_field(name, model):
try:
help_text = model._meta.get_field_by_name(name)[0].help_text
except models.FieldDoesNotExist:
help_text = ""
return smart_text(help_text)
def display_for_field(value, field):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if field.flatchoices:
return dict(field.flatchoices).get(value, EMPTY_CHANGELIST_VALUE)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField):
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(field, models.DateTimeField):
return formats.localize(timezone.template_localtime(value))
elif isinstance(field, (models.DateField, models.TimeField)):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, models.FloatField):
return formats.number_format(value)
else:
return smart_text(value)
def display_for_value(value, boolean=False):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if boolean:
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(value, datetime.datetime):
return formats.localize(timezone.template_localtime(value))
elif isinstance(value, (datetime.date, datetime.time)):
return formats.localize(value)
elif isinstance(value, six.integer_types + (decimal.Decimal, float)):
return formats.number_format(value)
else:
return smart_text(value)
class NotRelationField(Exception):
pass
def get_model_from_relation(field):
if isinstance(field, models.related.RelatedObject):
return field.model
elif getattr(field, 'rel'): # or isinstance?
return field.rel.to
else:
raise NotRelationField
def reverse_field_path(model, path):
""" Create a reversed field path.
E.g. Given (Order, "user__groups"),
return (Group, "user__order").
Final field must be a related model, not a data field.
"""
reversed_path = []
parent = model
pieces = path.split(LOOKUP_SEP)
for piece in pieces:
field, model, direct, m2m = parent._meta.get_field_by_name(piece)
# skip trailing data field if extant:
if len(reversed_path) == len(pieces)-1: # final iteration
try:
get_model_from_relation(field)
except NotRelationField:
break
if direct:
related_name = field.related_query_name()
parent = field.rel.to
else:
related_name = field.field.name
parent = field.model
reversed_path.insert(0, related_name)
return (parent, LOOKUP_SEP.join(reversed_path))
def get_fields_from_path(model, path):
""" Return list of Fields given path relative to model.
e.g. (ModelX, "user__groups__name") -> [
<django.db.models.fields.related.ForeignKey object at 0x...>,
<django.db.models.fields.related.ManyToManyField object at 0x...>,
<django.db.models.fields.CharField object at 0x...>,
]
"""
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.append(parent._meta.get_field_by_name(piece)[0])
return fields
def remove_trailing_data_field(fields):
""" Discard trailing non-relation field if extant. """
try:
get_model_from_relation(fields[-1])
except NotRelationField:
fields = fields[:-1]
return fields
def get_limit_choices_to_from_path(model, path):
""" Return Q object for limiting choices if applicable.
If final model in path is linked via a ForeignKey or ManyToManyField which
has a `limit_choices_to` attribute, return it as a Q object.
"""
fields = get_fields_from_path(model, path)
fields = remove_trailing_data_field(fields)
limit_choices_to = (
fields and hasattr(fields[-1], 'rel') and
getattr(fields[-1].rel, 'limit_choices_to', None))
if not limit_choices_to:
return models.Q() # empty Q
elif isinstance(limit_choices_to, models.Q):
return limit_choices_to # already a Q
else:
return models.Q(**limit_choices_to) # convert dict to Q
|
gpl-2.0
|
gechr/ansible-modules-extras
|
network/dnsimple.py
|
49
|
11750
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: dnsimple
version_added: "1.6"
short_description: Interface with dnsimple.com (a DNS hosting service).
description:
- "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)"
options:
account_email:
description:
- "Account email. If omitted, the env variables DNSIMPLE_EMAIL and DNSIMPLE_API_TOKEN will be looked for. If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)"
required: false
default: null
account_api_token:
description:
- Account API token. See I(account_email) for info.
required: false
default: null
domain:
description:
- Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple. If omitted, a list of domains will be returned.
- If domain is present but the domain doesn't exist, it will be created.
required: false
default: null
record:
description:
- Record to add, if blank a record for the domain will be created, supports the wildcard (*)
required: false
default: null
record_ids:
description:
- List of records to ensure they either exist or don't exist
required: false
default: null
type:
description:
- The type of DNS record to create
required: false
choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL' ]
default: null
ttl:
description:
- The TTL to give the new record
required: false
default: 3600 (one hour)
value:
description:
- Record value
- "Must be specified when trying to ensure a record exists"
required: false
default: null
priority:
description:
- Record priority
required: false
default: null
state:
description:
- whether the record should exist or not
required: false
choices: [ 'present', 'absent' ]
default: null
solo:
description:
- Whether the record should be the only one for that record type and record name. Only use with state=present on a record
required: false
default: null
requirements: [ dnsimple ]
author: "Alex Coomans (@drcapulet)"
'''
EXAMPLES = '''
# authenticate using email and API token
- local_action: dnsimple [email protected] account_api_token=dummyapitoken
# fetch all domains
- local_action dnsimple
register: domains
# fetch my.com domain records
- local_action: dnsimple domain=my.com state=present
register: records
# delete a domain
- local_action: dnsimple domain=my.com state=absent
# create a test.my.com A record to point to 127.0.0.01
- local_action: dnsimple domain=my.com record=test type=A value=127.0.0.1
register: record
# and then delete it
- local_action: dnsimple domain=my.com record_ids={{ record['id'] }}
# create a my.com CNAME record to example.com
- local_action: dnsimple domain=my.com record= type=CNAME value=example.com state=present
# change it's ttl
- local_action: dnsimple domain=my.com record= type=CNAME value=example.com ttl=600 state=present
# and delete the record
- local_action: dnsimpledomain=my.com record= type=CNAME value=example.com state=absent
'''
import os
try:
from dnsimple import DNSimple
from dnsimple.dnsimple import DNSimpleException
HAS_DNSIMPLE = True
except ImportError:
HAS_DNSIMPLE = False
def main():
module = AnsibleModule(
argument_spec = dict(
account_email = dict(required=False),
account_api_token = dict(required=False, no_log=True),
domain = dict(required=False),
record = dict(required=False),
record_ids = dict(required=False, type='list'),
type = dict(required=False, choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL']),
ttl = dict(required=False, default=3600, type='int'),
value = dict(required=False),
priority = dict(required=False, type='int'),
state = dict(required=False, choices=['present', 'absent']),
solo = dict(required=False, type='bool'),
),
required_together = (
['record', 'value']
),
supports_check_mode = True,
)
if not HAS_DNSIMPLE:
module.fail_json(msg="dnsimple required for this module")
account_email = module.params.get('account_email')
account_api_token = module.params.get('account_api_token')
domain = module.params.get('domain')
record = module.params.get('record')
record_ids = module.params.get('record_ids')
record_type = module.params.get('type')
ttl = module.params.get('ttl')
value = module.params.get('value')
priority = module.params.get('priority')
state = module.params.get('state')
is_solo = module.params.get('solo')
if account_email and account_api_token:
client = DNSimple(email=account_email, api_token=account_api_token)
elif os.environ.get('DNSIMPLE_EMAIL') and os.environ.get('DNSIMPLE_API_TOKEN'):
client = DNSimple(email=os.environ.get('DNSIMPLE_EMAIL'), api_token=os.environ.get('DNSIMPLE_API_TOKEN'))
else:
client = DNSimple()
try:
# Let's figure out what operation we want to do
# No domain, return a list
if not domain:
domains = client.domains()
module.exit_json(changed=False, result=[d['domain'] for d in domains])
# Domain & No record
if domain and record is None and not record_ids:
domains = [d['domain'] for d in client.domains()]
if domain.isdigit():
dr = next((d for d in domains if d['id'] == int(domain)), None)
else:
dr = next((d for d in domains if d['name'] == domain), None)
if state == 'present':
if dr:
module.exit_json(changed=False, result=dr)
else:
if module.check_mode:
module.exit_json(changed=True)
else:
module.exit_json(changed=True, result=client.add_domain(domain)['domain'])
elif state == 'absent':
if dr:
if not module.check_mode:
client.delete(domain)
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
else:
module.fail_json(msg="'%s' is an unknown value for the state argument" % state)
# need the not none check since record could be an empty string
if domain and record is not None:
records = [r['record'] for r in client.records(str(domain))]
if not record_type:
module.fail_json(msg="Missing the record type")
if not value:
module.fail_json(msg="Missing the record value")
rr = next((r for r in records if r['name'] == record and r['record_type'] == record_type and r['content'] == value), None)
if state == 'present':
changed = False
if is_solo:
# delete any records that have the same name and record type
same_type = [r['id'] for r in records if r['name'] == record and r['record_type'] == record_type]
if rr:
same_type = [rid for rid in same_type if rid != rr['id']]
if same_type:
if not module.check_mode:
for rid in same_type:
client.delete_record(str(domain), rid)
changed = True
if rr:
# check if we need to update
if rr['ttl'] != ttl or rr['prio'] != priority:
data = {}
if ttl: data['ttl'] = ttl
if priority: data['prio'] = priority
if module.check_mode:
module.exit_json(changed=True)
else:
module.exit_json(changed=True, result=client.update_record(str(domain), str(rr['id']), data)['record'])
else:
module.exit_json(changed=changed, result=rr)
else:
# create it
data = {
'name': record,
'record_type': record_type,
'content': value,
}
if ttl: data['ttl'] = ttl
if priority: data['prio'] = priority
if module.check_mode:
module.exit_json(changed=True)
else:
module.exit_json(changed=True, result=client.add_record(str(domain), data)['record'])
elif state == 'absent':
if rr:
if not module.check_mode:
client.delete_record(str(domain), rr['id'])
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
else:
module.fail_json(msg="'%s' is an unknown value for the state argument" % state)
# Make sure these record_ids either all exist or none
if domain and record_ids:
current_records = [str(r['record']['id']) for r in client.records(str(domain))]
wanted_records = [str(r) for r in record_ids]
if state == 'present':
difference = list(set(wanted_records) - set(current_records))
if difference:
module.fail_json(msg="Missing the following records: %s" % difference)
else:
module.exit_json(changed=False)
elif state == 'absent':
difference = list(set(wanted_records) & set(current_records))
if difference:
if not module.check_mode:
for rid in difference:
client.delete_record(str(domain), rid)
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
else:
module.fail_json(msg="'%s' is an unknown value for the state argument" % state)
except DNSimpleException, e:
module.fail_json(msg="Unable to contact DNSimple: %s" % e.message)
module.fail_json(msg="Unknown what you wanted me to do")
# import module snippets
from ansible.module_utils.basic import *
main()
|
gpl-3.0
|
rotofly/odoo
|
addons/crm_helpdesk/__init__.py
|
442
|
1081
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_helpdesk
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
fernandezcuesta/ansible
|
lib/ansible/modules/cloud/cloudstack/cs_region.py
|
18
|
5397
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, Renรฉ Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_region
short_description: Manages regions on Apache CloudStack based clouds.
description:
- Add, update and remove regions.
version_added: "2.3"
author: "Renรฉ Moser (@resmo)"
options:
id:
description:
- ID of the region.
- Must be an number (int).
required: true
name:
description:
- Name of the region.
- Required if C(state=present)
required: false
default: null
endpoint:
description:
- Endpoint URL of the region.
- Required if C(state=present)
required: false
default: null
state:
description:
- State of the region.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# create a region
local_action:
module: cs_region
id: 2
name: geneva
endpoint: https://cloud.gva.example.com
# remove a region with ID 2
local_action:
module: cs_region
id: 2
state: absent
'''
RETURN = '''
---
id:
description: ID of the region.
returned: success
type: int
sample: 1
name:
description: Name of the region.
returned: success
type: string
sample: local
endpoint:
description: Endpoint of the region.
returned: success
type: string
sample: http://cloud.example.com
gslb_service_enabled:
description: Whether the GSLB service is enabled or not.
returned: success
type: bool
sample: true
portable_ip_service_enabled:
description: Whether the portable IP service is enabled or not.
returned: success
type: bool
sample: true
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together
)
class AnsibleCloudStackRegion(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackRegion, self).__init__(module)
self.returns = {
'endpoint': 'endpoint',
'gslbserviceenabled': 'gslb_service_enabled',
'portableipserviceenabled': 'portable_ip_service_enabled',
}
def get_region(self):
id = self.module.params.get('id')
regions = self.query_api('listRegions', id=id)
if regions:
return regions['region'][0]
return None
def present_region(self):
region = self.get_region()
if not region:
region = self._create_region(region=region)
else:
region = self._update_region(region=region)
return region
def _create_region(self, region):
self.result['changed'] = True
args = {
'id': self.module.params.get('id'),
'name': self.module.params.get('name'),
'endpoint': self.module.params.get('endpoint')
}
if not self.module.check_mode:
res = self.query_api('addRegion', **args)
region = res['region']
return region
def _update_region(self, region):
args = {
'id': self.module.params.get('id'),
'name': self.module.params.get('name'),
'endpoint': self.module.params.get('endpoint')
}
if self.has_changed(args, region):
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('updateRegion', **args)
region = res['region']
return region
def absent_region(self):
region = self.get_region()
if region:
self.result['changed'] = True
if not self.module.check_mode:
self.query_api('removeRegion', id=region['id'])
return region
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
id=dict(required=True, type='int'),
name=dict(),
endpoint=dict(),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
required_if=[
('state', 'present', ['name', 'endpoint']),
],
supports_check_mode=True
)
acs_region = AnsibleCloudStackRegion(module)
state = module.params.get('state')
if state == 'absent':
region = acs_region.absent_region()
else:
region = acs_region.present_region()
result = acs_region.get_result(region)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
edum1978/eduengage
|
boilerplate/external/pytz/tzfile.py
|
118
|
4340
|
#!/usr/bin/env python
'''
$Id: tzfile.py,v 1.8 2004/06/03 00:15:24 zenzen Exp $
'''
from cStringIO import StringIO
from datetime import datetime, timedelta
from struct import unpack, calcsize
from pytz.tzinfo import StaticTzInfo, DstTzInfo, memorized_ttinfo
from pytz.tzinfo import memorized_datetime, memorized_timedelta
def build_tzinfo(zone, fp):
head_fmt = '>4s c 15x 6l'
head_size = calcsize(head_fmt)
(magic, format, ttisgmtcnt, ttisstdcnt,leapcnt, timecnt,
typecnt, charcnt) = unpack(head_fmt, fp.read(head_size))
# Make sure it is a tzfile(5) file
assert magic == 'TZif'
# Read out the transition times, localtime indices and ttinfo structures.
data_fmt = '>%(timecnt)dl %(timecnt)dB %(ttinfo)s %(charcnt)ds' % dict(
timecnt=timecnt, ttinfo='lBB'*typecnt, charcnt=charcnt)
data_size = calcsize(data_fmt)
data = unpack(data_fmt, fp.read(data_size))
# make sure we unpacked the right number of values
assert len(data) == 2 * timecnt + 3 * typecnt + 1
transitions = [memorized_datetime(trans)
for trans in data[:timecnt]]
lindexes = list(data[timecnt:2 * timecnt])
ttinfo_raw = data[2 * timecnt:-1]
tznames_raw = data[-1]
del data
# Process ttinfo into separate structs
ttinfo = []
tznames = {}
i = 0
while i < len(ttinfo_raw):
# have we looked up this timezone name yet?
tzname_offset = ttinfo_raw[i+2]
if tzname_offset not in tznames:
nul = tznames_raw.find('\0', tzname_offset)
if nul < 0:
nul = len(tznames_raw)
tznames[tzname_offset] = tznames_raw[tzname_offset:nul]
ttinfo.append((ttinfo_raw[i],
bool(ttinfo_raw[i+1]),
tznames[tzname_offset]))
i += 3
# Now build the timezone object
if len(transitions) == 0:
ttinfo[0][0], ttinfo[0][2]
cls = type(zone, (StaticTzInfo,), dict(
zone=zone,
_utcoffset=memorized_timedelta(ttinfo[0][0]),
_tzname=ttinfo[0][2]))
else:
# Early dates use the first standard time ttinfo
i = 0
while ttinfo[i][1]:
i += 1
if ttinfo[i] == ttinfo[lindexes[0]]:
transitions[0] = datetime.min
else:
transitions.insert(0, datetime.min)
lindexes.insert(0, i)
# calculate transition info
transition_info = []
for i in range(len(transitions)):
inf = ttinfo[lindexes[i]]
utcoffset = inf[0]
if not inf[1]:
dst = 0
else:
for j in range(i-1, -1, -1):
prev_inf = ttinfo[lindexes[j]]
if not prev_inf[1]:
break
dst = inf[0] - prev_inf[0] # dst offset
if dst <= 0: # Bad dst? Look further.
for j in range(i+1, len(transitions)):
stdinf = ttinfo[lindexes[j]]
if not stdinf[1]:
dst = inf[0] - stdinf[0]
if dst > 0:
break # Found a useful std time.
tzname = inf[2]
# Round utcoffset and dst to the nearest minute or the
# datetime library will complain. Conversions to these timezones
# might be up to plus or minus 30 seconds out, but it is
# the best we can do.
utcoffset = int((utcoffset + 30) / 60) * 60
dst = int((dst + 30) / 60) * 60
transition_info.append(memorized_ttinfo(utcoffset, dst, tzname))
cls = type(zone, (DstTzInfo,), dict(
zone=zone,
_utc_transition_times=transitions,
_transition_info=transition_info))
return cls()
if __name__ == '__main__':
import os.path
from pprint import pprint
base = os.path.join(os.path.dirname(__file__), 'zoneinfo')
tz = build_tzinfo('Australia/Melbourne',
open(os.path.join(base,'Australia','Melbourne'), 'rb'))
tz = build_tzinfo('US/Eastern',
open(os.path.join(base,'US','Eastern'), 'rb'))
pprint(tz._utc_transition_times)
#print tz.asPython(4)
#print tz.transitions_mapping
|
lgpl-3.0
|
Technorip/Myntra
|
Django Backend/myntra/env/lib/python2.7/site-packages/django/core/checks/compatibility/django_1_8_0.py
|
286
|
1052
|
from __future__ import unicode_literals
from django.conf import global_settings, settings
from .. import Tags, Warning, register
@register(Tags.compatibility)
def check_duplicate_template_settings(app_configs, **kwargs):
if settings.TEMPLATES:
values = [
'TEMPLATE_DIRS',
'ALLOWED_INCLUDE_ROOTS',
'TEMPLATE_CONTEXT_PROCESSORS',
'TEMPLATE_DEBUG',
'TEMPLATE_LOADERS',
'TEMPLATE_STRING_IF_INVALID',
]
duplicates = [
value for value in values
if getattr(settings, value) != getattr(global_settings, value)
]
if duplicates:
return [Warning(
"The standalone TEMPLATE_* settings were deprecated in Django "
"1.8 and the TEMPLATES dictionary takes precedence. You must "
"put the values of the following settings into your default "
"TEMPLATES dict: %s." % ", ".join(duplicates),
id='1_8.W001',
)]
return []
|
gpl-2.0
|
CouchPotato/CouchPotatoServer
|
libs/chardet/utf8prober.py
|
2919
|
2652
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8SMModel
ONE_CHAR_PROB = 0.5
class UTF8Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(UTF8SMModel)
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mCodingSM.reset()
self._mNumOfMBChar = 0
def get_charset_name(self):
return "utf-8"
def feed(self, aBuf):
for c in aBuf:
codingState = self._mCodingSM.next_state(c)
if codingState == constants.eError:
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
if self._mCodingSM.get_current_charlen() >= 2:
self._mNumOfMBChar += 1
if self.get_state() == constants.eDetecting:
if self.get_confidence() > constants.SHORTCUT_THRESHOLD:
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
unlike = 0.99
if self._mNumOfMBChar < 6:
for i in range(0, self._mNumOfMBChar):
unlike = unlike * ONE_CHAR_PROB
return 1.0 - unlike
else:
return unlike
|
gpl-3.0
|
gigglesninja/senior-design
|
MissionPlanner/Lib/email/charset.py
|
180
|
16043
|
# Copyright (C) 2001-2006 Python Software Foundation
# Author: Ben Gertzfield, Barry Warsaw
# Contact: [email protected]
__all__ = [
'Charset',
'add_alias',
'add_charset',
'add_codec',
]
import codecs
import email.base64mime
import email.quoprimime
from email import errors
from email.encoders import encode_7or8bit
# Flags for types of header encodings
QP = 1 # Quoted-Printable
BASE64 = 2 # Base64
SHORTEST = 3 # the shorter of QP and base64, but only for headers
# In "=?charset?q?hello_world?=", the =?, ?q?, and ?= add up to 7
MISC_LEN = 7
DEFAULT_CHARSET = 'us-ascii'
# Defaults
CHARSETS = {
# input header enc body enc output conv
'iso-8859-1': (QP, QP, None),
'iso-8859-2': (QP, QP, None),
'iso-8859-3': (QP, QP, None),
'iso-8859-4': (QP, QP, None),
# iso-8859-5 is Cyrillic, and not especially used
# iso-8859-6 is Arabic, also not particularly used
# iso-8859-7 is Greek, QP will not make it readable
# iso-8859-8 is Hebrew, QP will not make it readable
'iso-8859-9': (QP, QP, None),
'iso-8859-10': (QP, QP, None),
# iso-8859-11 is Thai, QP will not make it readable
'iso-8859-13': (QP, QP, None),
'iso-8859-14': (QP, QP, None),
'iso-8859-15': (QP, QP, None),
'iso-8859-16': (QP, QP, None),
'windows-1252':(QP, QP, None),
'viscii': (QP, QP, None),
'us-ascii': (None, None, None),
'big5': (BASE64, BASE64, None),
'gb2312': (BASE64, BASE64, None),
'euc-jp': (BASE64, None, 'iso-2022-jp'),
'shift_jis': (BASE64, None, 'iso-2022-jp'),
'iso-2022-jp': (BASE64, None, None),
'koi8-r': (BASE64, BASE64, None),
'utf-8': (SHORTEST, BASE64, 'utf-8'),
# We're making this one up to represent raw unencoded 8-bit
'8bit': (None, BASE64, 'utf-8'),
}
# Aliases for other commonly-used names for character sets. Map
# them to the real ones used in email.
ALIASES = {
'latin_1': 'iso-8859-1',
'latin-1': 'iso-8859-1',
'latin_2': 'iso-8859-2',
'latin-2': 'iso-8859-2',
'latin_3': 'iso-8859-3',
'latin-3': 'iso-8859-3',
'latin_4': 'iso-8859-4',
'latin-4': 'iso-8859-4',
'latin_5': 'iso-8859-9',
'latin-5': 'iso-8859-9',
'latin_6': 'iso-8859-10',
'latin-6': 'iso-8859-10',
'latin_7': 'iso-8859-13',
'latin-7': 'iso-8859-13',
'latin_8': 'iso-8859-14',
'latin-8': 'iso-8859-14',
'latin_9': 'iso-8859-15',
'latin-9': 'iso-8859-15',
'latin_10':'iso-8859-16',
'latin-10':'iso-8859-16',
'cp949': 'ks_c_5601-1987',
'euc_jp': 'euc-jp',
'euc_kr': 'euc-kr',
'ascii': 'us-ascii',
}
# Map charsets to their Unicode codec strings.
CODEC_MAP = {
'gb2312': 'eucgb2312_cn',
'big5': 'big5_tw',
# Hack: We don't want *any* conversion for stuff marked us-ascii, as all
# sorts of garbage might be sent to us in the guise of 7-bit us-ascii.
# Let that stuff pass through without conversion to/from Unicode.
'us-ascii': None,
}
# Convenience functions for extending the above mappings
def add_charset(charset, header_enc=None, body_enc=None, output_charset=None):
"""Add character set properties to the global registry.
charset is the input character set, and must be the canonical name of a
character set.
Optional header_enc and body_enc is either Charset.QP for
quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for
the shortest of qp or base64 encoding, or None for no encoding. SHORTEST
is only valid for header_enc. It describes how message headers and
message bodies in the input charset are to be encoded. Default is no
encoding.
Optional output_charset is the character set that the output should be
in. Conversions will proceed from input charset, to Unicode, to the
output charset when the method Charset.convert() is called. The default
is to output in the same character set as the input.
Both input_charset and output_charset must have Unicode codec entries in
the module's charset-to-codec mapping; use add_codec(charset, codecname)
to add codecs the module does not know about. See the codecs module's
documentation for more information.
"""
if body_enc == SHORTEST:
raise ValueError('SHORTEST not allowed for body_enc')
CHARSETS[charset] = (header_enc, body_enc, output_charset)
def add_alias(alias, canonical):
"""Add a character set alias.
alias is the alias name, e.g. latin-1
canonical is the character set's canonical name, e.g. iso-8859-1
"""
ALIASES[alias] = canonical
def add_codec(charset, codecname):
"""Add a codec that map characters in the given charset to/from Unicode.
charset is the canonical name of a character set. codecname is the name
of a Python codec, as appropriate for the second argument to the unicode()
built-in, or to the encode() method of a Unicode string.
"""
CODEC_MAP[charset] = codecname
class Charset:
"""Map character sets to their email properties.
This class provides information about the requirements imposed on email
for a specific character set. It also provides convenience routines for
converting between character sets, given the availability of the
applicable codecs. Given a character set, it will do its best to provide
information on how to use that character set in an email in an
RFC-compliant way.
Certain character sets must be encoded with quoted-printable or base64
when used in email headers or bodies. Certain character sets must be
converted outright, and are not allowed in email. Instances of this
module expose the following information about a character set:
input_charset: The initial character set specified. Common aliases
are converted to their `official' email names (e.g. latin_1
is converted to iso-8859-1). Defaults to 7-bit us-ascii.
header_encoding: If the character set must be encoded before it can be
used in an email header, this attribute will be set to
Charset.QP (for quoted-printable), Charset.BASE64 (for
base64 encoding), or Charset.SHORTEST for the shortest of
QP or BASE64 encoding. Otherwise, it will be None.
body_encoding: Same as header_encoding, but describes the encoding for the
mail message's body, which indeed may be different than the
header encoding. Charset.SHORTEST is not allowed for
body_encoding.
output_charset: Some character sets must be converted before the can be
used in email headers or bodies. If the input_charset is
one of them, this attribute will contain the name of the
charset output will be converted to. Otherwise, it will
be None.
input_codec: The name of the Python codec used to convert the
input_charset to Unicode. If no conversion codec is
necessary, this attribute will be None.
output_codec: The name of the Python codec used to convert Unicode
to the output_charset. If no conversion codec is necessary,
this attribute will have the same value as the input_codec.
"""
def __init__(self, input_charset=DEFAULT_CHARSET):
# RFC 2046, $4.1.2 says charsets are not case sensitive. We coerce to
# unicode because its .lower() is locale insensitive. If the argument
# is already a unicode, we leave it at that, but ensure that the
# charset is ASCII, as the standard (RFC XXX) requires.
try:
if isinstance(input_charset, unicode):
input_charset.encode('ascii')
else:
input_charset = unicode(input_charset, 'ascii')
except UnicodeError:
raise errors.CharsetError(input_charset)
input_charset = input_charset.lower().encode('ascii')
# Set the input charset after filtering through the aliases and/or codecs
if not (input_charset in ALIASES or input_charset in CHARSETS):
try:
input_charset = codecs.lookup(input_charset).name
except LookupError:
pass
self.input_charset = ALIASES.get(input_charset, input_charset)
# We can try to guess which encoding and conversion to use by the
# charset_map dictionary. Try that first, but let the user override
# it.
henc, benc, conv = CHARSETS.get(self.input_charset,
(SHORTEST, BASE64, None))
if not conv:
conv = self.input_charset
# Set the attributes, allowing the arguments to override the default.
self.header_encoding = henc
self.body_encoding = benc
self.output_charset = ALIASES.get(conv, conv)
# Now set the codecs. If one isn't defined for input_charset,
# guess and try a Unicode codec with the same name as input_codec.
self.input_codec = CODEC_MAP.get(self.input_charset,
self.input_charset)
self.output_codec = CODEC_MAP.get(self.output_charset,
self.output_charset)
def __str__(self):
return self.input_charset.lower()
__repr__ = __str__
def __eq__(self, other):
return str(self) == str(other).lower()
def __ne__(self, other):
return not self.__eq__(other)
def get_body_encoding(self):
"""Return the content-transfer-encoding used for body encoding.
This is either the string `quoted-printable' or `base64' depending on
the encoding used, or it is a function in which case you should call
the function with a single argument, the Message object being
encoded. The function should then set the Content-Transfer-Encoding
header itself to whatever is appropriate.
Returns "quoted-printable" if self.body_encoding is QP.
Returns "base64" if self.body_encoding is BASE64.
Returns "7bit" otherwise.
"""
assert self.body_encoding != SHORTEST
if self.body_encoding == QP:
return 'quoted-printable'
elif self.body_encoding == BASE64:
return 'base64'
else:
return encode_7or8bit
def convert(self, s):
"""Convert a string from the input_codec to the output_codec."""
if self.input_codec != self.output_codec:
return unicode(s, self.input_codec).encode(self.output_codec)
else:
return s
def to_splittable(self, s):
"""Convert a possibly multibyte string to a safely splittable format.
Uses the input_codec to try and convert the string to Unicode, so it
can be safely split on character boundaries (even for multibyte
characters).
Returns the string as-is if it isn't known how to convert it to
Unicode with the input_charset.
Characters that could not be converted to Unicode will be replaced
with the Unicode replacement character U+FFFD.
"""
if isinstance(s, unicode) or self.input_codec is None:
return s
try:
return unicode(s, self.input_codec, 'replace')
except LookupError:
# Input codec not installed on system, so return the original
# string unchanged.
return s
def from_splittable(self, ustr, to_output=True):
"""Convert a splittable string back into an encoded string.
Uses the proper codec to try and convert the string from Unicode back
into an encoded format. Return the string as-is if it is not Unicode,
or if it could not be converted from Unicode.
Characters that could not be converted from Unicode will be replaced
with an appropriate character (usually '?').
If to_output is True (the default), uses output_codec to convert to an
encoded format. If to_output is False, uses input_codec.
"""
if to_output:
codec = self.output_codec
else:
codec = self.input_codec
if not isinstance(ustr, unicode) or codec is None:
return ustr
try:
return ustr.encode(codec, 'replace')
except LookupError:
# Output codec not installed
return ustr
def get_output_charset(self):
"""Return the output character set.
This is self.output_charset if that is not None, otherwise it is
self.input_charset.
"""
return self.output_charset or self.input_charset
def encoded_header_len(self, s):
"""Return the length of the encoded header string."""
cset = self.get_output_charset()
# The len(s) of a 7bit encoding is len(s)
if self.header_encoding == BASE64:
return email.base64mime.base64_len(s) + len(cset) + MISC_LEN
elif self.header_encoding == QP:
return email.quoprimime.header_quopri_len(s) + len(cset) + MISC_LEN
elif self.header_encoding == SHORTEST:
lenb64 = email.base64mime.base64_len(s)
lenqp = email.quoprimime.header_quopri_len(s)
return min(lenb64, lenqp) + len(cset) + MISC_LEN
else:
return len(s)
def header_encode(self, s, convert=False):
"""Header-encode a string, optionally converting it to output_charset.
If convert is True, the string will be converted from the input
charset to the output charset automatically. This is not useful for
multibyte character sets, which have line length issues (multibyte
characters must be split on a character, not a byte boundary); use the
high-level Header class to deal with these issues. convert defaults
to False.
The type of encoding (base64 or quoted-printable) will be based on
self.header_encoding.
"""
cset = self.get_output_charset()
if convert:
s = self.convert(s)
# 7bit/8bit encodings return the string unchanged (modulo conversions)
if self.header_encoding == BASE64:
return email.base64mime.header_encode(s, cset)
elif self.header_encoding == QP:
return email.quoprimime.header_encode(s, cset, maxlinelen=None)
elif self.header_encoding == SHORTEST:
lenb64 = email.base64mime.base64_len(s)
lenqp = email.quoprimime.header_quopri_len(s)
if lenb64 < lenqp:
return email.base64mime.header_encode(s, cset)
else:
return email.quoprimime.header_encode(s, cset, maxlinelen=None)
else:
return s
def body_encode(self, s, convert=True):
"""Body-encode a string and convert it to output_charset.
If convert is True (the default), the string will be converted from
the input charset to output charset automatically. Unlike
header_encode(), there are no issues with byte boundaries and
multibyte charsets in email bodies, so this is usually pretty safe.
The type of encoding (base64 or quoted-printable) will be based on
self.body_encoding.
"""
if convert:
s = self.convert(s)
# 7bit/8bit encodings return the string unchanged (module conversions)
if self.body_encoding is BASE64:
return email.base64mime.body_encode(s)
elif self.body_encoding is QP:
return email.quoprimime.body_encode(s)
else:
return s
|
gpl-2.0
|
erkanay/django
|
django/http/request.py
|
11
|
21075
|
from __future__ import unicode_literals
import copy
import os
import re
import sys
from io import BytesIO
from itertools import chain
from pprint import pformat
from django.conf import settings
from django.core import signing
from django.core.exceptions import DisallowedHost, ImproperlyConfigured
from django.core.files import uploadhandler
from django.http.multipartparser import MultiPartParser, MultiPartParserError
from django.utils import six
from django.utils.datastructures import MultiValueDict, ImmutableList
from django.utils.encoding import force_bytes, force_text, force_str, iri_to_uri
from django.utils.six.moves.urllib.parse import parse_qsl, urlencode, quote, urljoin, urlsplit
RAISE_ERROR = object()
absolute_http_url_re = re.compile(r"^https?://", re.I)
host_validation_re = re.compile(r"^([a-z0-9.-]+|\[[a-f0-9]*:[a-f0-9:]+\])(:\d+)?$")
class UnreadablePostError(IOError):
pass
class RawPostDataException(Exception):
"""
You cannot access raw_post_data from a request that has
multipart/* POST data if it has been accessed via POST,
FILES, etc..
"""
pass
class HttpRequest(object):
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
# WARNING: The `WSGIRequest` subclass doesn't call `super`.
# Any variable assignment made here should also happen in
# `WSGIRequest.__init__()`.
self.GET = QueryDict(mutable=True)
self.POST = QueryDict(mutable=True)
self.COOKIES = {}
self.META = {}
self.FILES = MultiValueDict()
self.path = ''
self.path_info = ''
self.method = None
self.resolver_match = None
self._post_parse_error = False
def __repr__(self):
return build_request_repr(self)
def get_host(self):
"""Returns the HTTP host using the environment or request headers."""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and (
'HTTP_X_FORWARDED_HOST' in self.META):
host = self.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.META:
host = self.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META['SERVER_NAME']
server_port = str(self.META['SERVER_PORT'])
if server_port != ('443' if self.is_secure() else '80'):
host = '%s:%s' % (host, server_port)
# There is no hostname validation when DEBUG=True
if settings.DEBUG:
return host
domain, port = split_domain_port(host)
if domain and validate_host(domain, settings.ALLOWED_HOSTS):
return host
else:
msg = "Invalid HTTP_HOST header: %r." % host
if domain:
msg += " You may need to add %r to ALLOWED_HOSTS." % domain
else:
msg += " The domain name provided is not valid according to RFC 1034/1035."
raise DisallowedHost(msg)
def get_full_path(self):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s' % (
self.path,
('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) if self.META.get('QUERY_STRING', '') else ''
)
def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None):
"""
Attempts to return a signed cookie. If the signature fails or the
cookie has expired, raises an exception... unless you provide the
default argument in which case that value will be returned instead.
"""
try:
cookie_value = self.COOKIES[key]
except KeyError:
if default is not RAISE_ERROR:
return default
else:
raise
try:
value = signing.get_cookie_signer(salt=key + salt).unsign(
cookie_value, max_age=max_age)
except signing.BadSignature:
if default is not RAISE_ERROR:
return default
else:
raise
return value
def build_absolute_uri(self, location=None):
"""
Builds an absolute URI from the location and the variables available in
this request. If no ``location`` is specified, the absolute URI is
built on ``request.get_full_path()``. Anyway, if the location is
absolute, it is simply converted to an RFC 3987 compliant URI and
returned and if location is relative or is scheme-relative (i.e.,
``//example.com/``), it is urljoined to a base URL constructed from the
request variables.
"""
if location is None:
# Make it an absolute url (but schemeless and domainless) for the
# edge case that the path starts with '//'.
location = '//%s' % self.get_full_path()
bits = urlsplit(location)
if not (bits.scheme and bits.netloc):
current_uri = '{scheme}://{host}{path}'.format(scheme=self.scheme,
host=self.get_host(),
path=self.path)
# Join the constructed URL with the provided location, which will
# allow the provided ``location`` to apply query strings to the
# base path as well as override the host, if it begins with //
location = urljoin(current_uri, location)
return iri_to_uri(location)
def _get_scheme(self):
return 'https' if os.environ.get("HTTPS") == "on" else 'http'
@property
def scheme(self):
# First, check the SECURE_PROXY_SSL_HEADER setting.
if settings.SECURE_PROXY_SSL_HEADER:
try:
header, value = settings.SECURE_PROXY_SSL_HEADER
except ValueError:
raise ImproperlyConfigured(
'The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.'
)
if self.META.get(header, None) == value:
return 'https'
# Failing that, fall back to _get_scheme(), which is a hook for
# subclasses to implement.
return self._get_scheme()
def is_secure(self):
return self.scheme == 'https'
def is_ajax(self):
return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
@property
def encoding(self):
return self._encoding
@encoding.setter
def encoding(self, val):
"""
Sets the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, it is removed and recreated on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, '_get'):
del self._get
if hasattr(self, '_post'):
del self._post
def _initialize_handlers(self):
self._upload_handlers = [uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS]
@property
def upload_handlers(self):
if not self._upload_handlers:
# If there are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
@upload_handlers.setter
def upload_handlers(self, upload_handlers):
if hasattr(self, '_files'):
raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
self._upload_handlers = upload_handlers
def parse_file_upload(self, META, post_data):
"""Returns a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning="You cannot alter upload handlers after the upload has been processed."
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
@property
def body(self):
if not hasattr(self, '_body'):
if self._read_started:
raise RawPostDataException("You cannot access body after reading from request's data stream")
try:
self._body = self.read()
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
self._stream = BytesIO(self._body)
return self._body
def _mark_post_parse_error(self):
self._post = QueryDict('')
self._files = MultiValueDict()
self._post_parse_error = True
def _load_post_and_files(self):
"""Populate self._post and self._files if the content-type is a form type"""
if self.method != 'POST':
self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
return
if self._read_started and not hasattr(self, '_body'):
self._mark_post_parse_error()
return
if self.META.get('CONTENT_TYPE', '').startswith('multipart/form-data'):
if hasattr(self, '_body'):
# Use already read data
data = BytesIO(self._body)
else:
data = self
try:
self._post, self._files = self.parse_file_upload(self.META, data)
except MultiPartParserError:
# An error occurred while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
# Mark that an error occurred. This allows self.__repr__ to
# be explicit about it instead of simply representing an
# empty POST
self._mark_post_parse_error()
raise
elif self.META.get('CONTENT_TYPE', '').startswith('application/x-www-form-urlencoded'):
self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict()
else:
self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
def close(self):
if hasattr(self, '_files'):
for f in chain.from_iterable(l[1] for l in self._files.lists()):
f.close()
# File-like and iterator interface.
#
# Expects self._stream to be set to an appropriate source of bytes by
# a corresponding request subclass (e.g. WSGIRequest).
# Also when request data has already been read by request.POST or
# request.body, self._stream points to a BytesIO instance
# containing that data.
def read(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.read(*args, **kwargs)
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
def readline(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.readline(*args, **kwargs)
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
def xreadlines(self):
while True:
buf = self.readline()
if not buf:
break
yield buf
__iter__ = xreadlines
def readlines(self):
return list(iter(self))
class QueryDict(MultiValueDict):
"""
A specialized MultiValueDict which represents a query string.
A QueryDict can be used to represent GET or POST data. It subclasses
MultiValueDict since keys in such data can be repeated, for instance
in the data from a form with a <select multiple> field.
By default QueryDicts are immutable, though the copy() method
will always return a mutable copy.
Both keys and values set on this class are converted from the given encoding
(DEFAULT_CHARSET by default) to unicode.
"""
# These are both reset in __init__, but is specified here at the class
# level so that unpickling will have valid values
_mutable = True
_encoding = None
def __init__(self, query_string=None, mutable=False, encoding=None):
super(QueryDict, self).__init__()
if not encoding:
encoding = settings.DEFAULT_CHARSET
self.encoding = encoding
if six.PY3:
if isinstance(query_string, bytes):
# query_string normally contains URL-encoded data, a subset of ASCII.
try:
query_string = query_string.decode(encoding)
except UnicodeDecodeError:
# ... but some user agents are misbehaving :-(
query_string = query_string.decode('iso-8859-1')
for key, value in parse_qsl(query_string or '',
keep_blank_values=True,
encoding=encoding):
self.appendlist(key, value)
else:
for key, value in parse_qsl(query_string or '',
keep_blank_values=True):
try:
value = value.decode(encoding)
except UnicodeDecodeError:
value = value.decode('iso-8859-1')
self.appendlist(force_text(key, encoding, errors='replace'),
value)
self._mutable = mutable
@property
def encoding(self):
if self._encoding is None:
self._encoding = settings.DEFAULT_CHARSET
return self._encoding
@encoding.setter
def encoding(self, value):
self._encoding = value
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This QueryDict instance is immutable")
def __setitem__(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super(QueryDict, self).__setitem__(key, value)
def __delitem__(self, key):
self._assert_mutable()
super(QueryDict, self).__delitem__(key)
def __copy__(self):
result = self.__class__('', mutable=True, encoding=self.encoding)
for key, value in six.iterlists(self):
result.setlist(key, value)
return result
def __deepcopy__(self, memo):
result = self.__class__('', mutable=True, encoding=self.encoding)
memo[id(self)] = result
for key, value in six.iterlists(self):
result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def setlist(self, key, list_):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
list_ = [bytes_to_text(elt, self.encoding) for elt in list_]
super(QueryDict, self).setlist(key, list_)
def setlistdefault(self, key, default_list=None):
self._assert_mutable()
return super(QueryDict, self).setlistdefault(key, default_list)
def appendlist(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super(QueryDict, self).appendlist(key, value)
def pop(self, key, *args):
self._assert_mutable()
return super(QueryDict, self).pop(key, *args)
def popitem(self):
self._assert_mutable()
return super(QueryDict, self).popitem()
def clear(self):
self._assert_mutable()
super(QueryDict, self).clear()
def setdefault(self, key, default=None):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
default = bytes_to_text(default, self.encoding)
return super(QueryDict, self).setdefault(key, default)
def copy(self):
"""Returns a mutable copy of this object."""
return self.__deepcopy__({})
def urlencode(self, safe=None):
"""
Returns an encoded string of all query string arguments.
:arg safe: Used to specify characters which do not require quoting, for
example::
>>> q = QueryDict('', mutable=True)
>>> q['next'] = '/a&b/'
>>> q.urlencode()
'next=%2Fa%26b%2F'
>>> q.urlencode(safe='/')
'next=/a%26b/'
"""
output = []
if safe:
safe = force_bytes(safe, self.encoding)
encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe)))
else:
encode = lambda k, v: urlencode({k: v})
for k, list_ in self.lists():
k = force_bytes(k, self.encoding)
output.extend([encode(k, force_bytes(v, self.encoding))
for v in list_])
return '&'.join(output)
def build_request_repr(request, path_override=None, GET_override=None,
POST_override=None, COOKIES_override=None,
META_override=None):
"""
Builds and returns the request's representation string. The request's
attributes may be overridden by pre-processed values.
"""
# Since this is called as part of error handling, we need to be very
# robust against potentially malformed input.
try:
get = (pformat(GET_override)
if GET_override is not None
else pformat(request.GET))
except Exception:
get = '<could not parse>'
if request._post_parse_error:
post = '<could not parse>'
else:
try:
post = (pformat(POST_override)
if POST_override is not None
else pformat(request.POST))
except Exception:
post = '<could not parse>'
try:
cookies = (pformat(COOKIES_override)
if COOKIES_override is not None
else pformat(request.COOKIES))
except Exception:
cookies = '<could not parse>'
try:
meta = (pformat(META_override)
if META_override is not None
else pformat(request.META))
except Exception:
meta = '<could not parse>'
path = path_override if path_override is not None else request.path
return force_str('<%s\npath:%s,\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' %
(request.__class__.__name__,
path,
six.text_type(get),
six.text_type(post),
six.text_type(cookies),
six.text_type(meta)))
# It's neither necessary nor appropriate to use
# django.utils.encoding.smart_text for parsing URLs and form inputs. Thus,
# this slightly more restricted function, used by QueryDict.
def bytes_to_text(s, encoding):
"""
Converts basestring objects to unicode, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Returns any non-basestring objects without change.
"""
if isinstance(s, bytes):
return six.text_type(s, encoding, 'replace')
else:
return s
def split_domain_port(host):
"""
Return a (domain, port) tuple from a given host.
Returned domain is lower-cased. If the host is invalid, the domain will be
empty.
"""
host = host.lower()
if not host_validation_re.match(host):
return '', ''
if host[-1] == ']':
# It's an IPv6 address without a port.
return host, ''
bits = host.rsplit(':', 1)
if len(bits) == 2:
return tuple(bits)
return bits[0], ''
def validate_host(host, allowed_hosts):
"""
Validate the given host for this site.
Check that the host looks valid and matches a host or host pattern in the
given list of ``allowed_hosts``. Any pattern beginning with a period
matches a domain and all its subdomains (e.g. ``.example.com`` matches
``example.com`` and any subdomain), ``*`` matches anything, and anything
else must match exactly.
Note: This function assumes that the given host is lower-cased and has
already had the port, if any, stripped off.
Return ``True`` for a valid host, ``False`` otherwise.
"""
host = host[:-1] if host.endswith('.') else host
for pattern in allowed_hosts:
pattern = pattern.lower()
match = (
pattern == '*' or
pattern.startswith('.') and (
host.endswith(pattern) or host == pattern[1:]
) or
pattern == host
)
if match:
return True
return False
|
bsd-3-clause
|
freakboy3742/django
|
tests/forms_tests/widget_tests/test_splitdatetimewidget.py
|
84
|
2669
|
from datetime import date, datetime, time
from django.forms import SplitDateTimeWidget
from .base import WidgetTest
class SplitDateTimeWidgetTest(WidgetTest):
widget = SplitDateTimeWidget()
def test_render_empty(self):
self.check_html(self.widget, 'date', '', html=(
'<input type="text" name="date_0"><input type="text" name="date_1">'
))
def test_render_none(self):
self.check_html(self.widget, 'date', None, html=(
'<input type="text" name="date_0"><input type="text" name="date_1">'
))
def test_render_datetime(self):
self.check_html(self.widget, 'date', datetime(2006, 1, 10, 7, 30), html=(
'<input type="text" name="date_0" value="2006-01-10">'
'<input type="text" name="date_1" value="07:30:00">'
))
def test_render_date_and_time(self):
self.check_html(self.widget, 'date', [date(2006, 1, 10), time(7, 30)], html=(
'<input type="text" name="date_0" value="2006-01-10">'
'<input type="text" name="date_1" value="07:30:00">'
))
def test_constructor_attrs(self):
widget = SplitDateTimeWidget(attrs={'class': 'pretty'})
self.check_html(widget, 'date', datetime(2006, 1, 10, 7, 30), html=(
'<input type="text" class="pretty" value="2006-01-10" name="date_0">'
'<input type="text" class="pretty" value="07:30:00" name="date_1">'
))
def test_constructor_different_attrs(self):
html = (
'<input type="text" class="foo" value="2006-01-10" name="date_0">'
'<input type="text" class="bar" value="07:30:00" name="date_1">'
)
widget = SplitDateTimeWidget(date_attrs={'class': 'foo'}, time_attrs={'class': 'bar'})
self.check_html(widget, 'date', datetime(2006, 1, 10, 7, 30), html=html)
widget = SplitDateTimeWidget(date_attrs={'class': 'foo'}, attrs={'class': 'bar'})
self.check_html(widget, 'date', datetime(2006, 1, 10, 7, 30), html=html)
widget = SplitDateTimeWidget(time_attrs={'class': 'bar'}, attrs={'class': 'foo'})
self.check_html(widget, 'date', datetime(2006, 1, 10, 7, 30), html=html)
def test_formatting(self):
"""
Use 'date_format' and 'time_format' to change the way a value is
displayed.
"""
widget = SplitDateTimeWidget(
date_format='%d/%m/%Y', time_format='%H:%M',
)
self.check_html(widget, 'date', datetime(2006, 1, 10, 7, 30), html=(
'<input type="text" name="date_0" value="10/01/2006">'
'<input type="text" name="date_1" value="07:30">'
))
|
bsd-3-clause
|
ulif/pulp
|
server/pulp/server/db/migrations/0027_importer_schema_change.py
|
1
|
1091
|
import datetime
import isodate
from pulp.server.db.connection import get_collection
def migrate(*args, **kwargs):
"""
Add last_updated and last_override_config to the importer collection.
"""
updated_key = 'last_updated'
config_key = 'last_override_config'
collection = get_collection('repo_importers')
for importer in collection.find():
modified = False
if config_key not in importer:
importer[config_key] = {}
modified = True
# If the key doesn't exist, or does exist but has no value, set it based on the
# last sync time, if possible. Otherwise, set it to now.
if not importer.get(updated_key, None):
try:
importer[updated_key] = isodate.parse_datetime(importer['last_sync'])
# The attribute doesn't exist, or parsing failed. It's safe to set a newer timestamp.
except:
importer[updated_key] = datetime.datetime.now(tz=isodate.UTC)
modified = True
if modified:
collection.save(importer)
|
gpl-2.0
|
jinnykoo/wuyisj
|
tests/integration/offer/manager_tests.py
|
53
|
1741
|
import datetime
from django.test import TestCase
from django.utils import timezone
from oscar.test import factories
from oscar.apps.offer import models
class TestActiveOfferManager(TestCase):
def test_includes_offers_in_date_range(self):
# Create offer that is available but with the wrong status
now = timezone.now()
start = now - datetime.timedelta(days=1)
end = now + datetime.timedelta(days=1)
factories.create_offer(start=start, end=end)
filtered_offers = models.ConditionalOffer.active.all()
self.assertEqual(1, len(filtered_offers))
def test_filters_out_expired_offers(self):
# Create offer that is available but with the wrong status
now = timezone.now()
start = now - datetime.timedelta(days=3)
end = now - datetime.timedelta(days=1)
factories.create_offer(start=start, end=end)
filtered_offers = models.ConditionalOffer.active.all()
self.assertEqual(0, len(filtered_offers))
def test_filters_out_offers_yet_to_start(self):
# Create offer that is available but with the wrong status
now = timezone.now()
start = now + datetime.timedelta(days=1)
end = now + datetime.timedelta(days=3)
factories.create_offer(start=start, end=end)
filtered_offers = models.ConditionalOffer.active.all()
self.assertEqual(0, len(filtered_offers))
def test_filters_out_suspended_offers(self):
# Create offer that is available but with the wrong status
factories.create_offer(
status=models.ConditionalOffer.SUSPENDED)
filtered_offers = models.ConditionalOffer.active.all()
self.assertEqual(0, len(filtered_offers))
|
bsd-3-clause
|
bitifirefly/edx-platform
|
lms/djangoapps/verify_student/views.py
|
5
|
58018
|
"""
Views for the verification flow
"""
import datetime
import decimal
import json
import logging
import urllib
from pytz import UTC
from ipware.ip import get_ip
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseBadRequest, Http404
from django.contrib.auth.models import User
from django.shortcuts import redirect
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _, ugettext_lazy
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.views.generic.base import View, RedirectView
import analytics
from eventtracking import tracker
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, UsageKey
from commerce import ecommerce_api_client
from commerce.utils import audit_log
from course_modes.models import CourseMode
from courseware.url_helpers import get_redirect_url
from ecommerce_api_client.exceptions import SlumberBaseException
from edxmako.shortcuts import render_to_response, render_to_string
from embargo import api as embargo_api
from microsite_configuration import microsite
from openedx.core.djangoapps.user_api.accounts import NAME_MIN_LENGTH
from openedx.core.djangoapps.user_api.accounts.api import update_account_settings
from openedx.core.djangoapps.user_api.errors import UserNotFound, AccountValidationError
from openedx.core.djangoapps.credit.api import set_credit_requirement_status
from student.models import CourseEnrollment
from shoppingcart.models import Order, CertificateItem
from shoppingcart.processors import (
get_signed_purchase_params, get_purchase_endpoint
)
from verify_student.ssencrypt import has_valid_signature
from verify_student.models import (
VerificationDeadline,
SoftwareSecurePhotoVerification,
VerificationCheckpoint,
VerificationStatus,
)
from verify_student.image import decode_image_data, InvalidImageData
from util.json_request import JsonResponse
from util.date_utils import get_default_time_display
from xmodule.modulestore.django import modulestore
from staticfiles.storage import staticfiles_storage
log = logging.getLogger(__name__)
class PayAndVerifyView(View):
"""
View for the "verify and pay" flow.
This view is somewhat complicated, because the user
can enter it from a number of different places:
* From the "choose your track" page.
* After completing payment.
* From the dashboard in order to complete verification.
* From the dashboard in order to upgrade to a verified track.
The page will display different steps and requirements
depending on:
* Whether the user has submitted a photo verification recently.
* Whether the user has paid for the course.
* How the user reached the page (mostly affects messaging)
We are also super-paranoid about how users reach this page.
If they somehow aren't enrolled, or the course doesn't exist,
or they've unenrolled, or they've already paid/verified,
... then we try to redirect them to the page with the
most appropriate messaging (including the dashboard).
Note that this page does NOT handle re-verification
(photo verification that was denied or had an error);
that is handled by the "reverify" view.
"""
# Step definitions
#
# These represent the numbered steps a user sees in
# the verify / payment flow.
#
# Steps can either be:
# - displayed or hidden
# - complete or incomplete
#
# For example, when a user enters the verification/payment
# flow for the first time, the user will see steps
# for both payment and verification. As the user
# completes these steps (for example, submitting a photo)
# the steps will be marked "complete".
#
# If a user has already verified for another course,
# then the verification steps will be hidden,
# since the user has already completed them.
#
# If a user re-enters the flow from another application
# (for example, after completing payment through
# a third-party payment processor), then the user
# will resume the flow at an intermediate step.
#
INTRO_STEP = 'intro-step'
MAKE_PAYMENT_STEP = 'make-payment-step'
PAYMENT_CONFIRMATION_STEP = 'payment-confirmation-step'
FACE_PHOTO_STEP = 'face-photo-step'
ID_PHOTO_STEP = 'id-photo-step'
REVIEW_PHOTOS_STEP = 'review-photos-step'
ENROLLMENT_CONFIRMATION_STEP = 'enrollment-confirmation-step'
ALL_STEPS = [
INTRO_STEP,
MAKE_PAYMENT_STEP,
PAYMENT_CONFIRMATION_STEP,
FACE_PHOTO_STEP,
ID_PHOTO_STEP,
REVIEW_PHOTOS_STEP,
ENROLLMENT_CONFIRMATION_STEP
]
PAYMENT_STEPS = [
MAKE_PAYMENT_STEP,
PAYMENT_CONFIRMATION_STEP
]
VERIFICATION_STEPS = [
FACE_PHOTO_STEP,
ID_PHOTO_STEP,
REVIEW_PHOTOS_STEP,
ENROLLMENT_CONFIRMATION_STEP
]
# These steps can be skipped using the ?skip-first-step GET param
SKIP_STEPS = [
INTRO_STEP,
]
STEP_TITLES = {
INTRO_STEP: ugettext_lazy("Intro"),
MAKE_PAYMENT_STEP: ugettext_lazy("Make payment"),
PAYMENT_CONFIRMATION_STEP: ugettext_lazy("Payment confirmation"),
FACE_PHOTO_STEP: ugettext_lazy("Take photo"),
ID_PHOTO_STEP: ugettext_lazy("Take a photo of your ID"),
REVIEW_PHOTOS_STEP: ugettext_lazy("Review your info"),
ENROLLMENT_CONFIRMATION_STEP: ugettext_lazy("Enrollment confirmation"),
}
# Messages
#
# Depending on how the user entered reached the page,
# we will display different text messaging.
# For example, we show users who are upgrading
# slightly different copy than users who are verifying
# for the first time.
#
FIRST_TIME_VERIFY_MSG = 'first-time-verify'
VERIFY_NOW_MSG = 'verify-now'
VERIFY_LATER_MSG = 'verify-later'
UPGRADE_MSG = 'upgrade'
PAYMENT_CONFIRMATION_MSG = 'payment-confirmation'
# Requirements
#
# These explain to the user what he or she
# will need to successfully pay and/or verify.
#
# These are determined by the steps displayed
# to the user; for example, if the user does not
# need to complete the verification steps,
# then the photo ID and webcam requirements are hidden.
#
ACCOUNT_ACTIVATION_REQ = "account-activation-required"
PHOTO_ID_REQ = "photo-id-required"
WEBCAM_REQ = "webcam-required"
STEP_REQUIREMENTS = {
ID_PHOTO_STEP: [PHOTO_ID_REQ, WEBCAM_REQ],
FACE_PHOTO_STEP: [WEBCAM_REQ],
}
# Deadline types
VERIFICATION_DEADLINE = "verification"
UPGRADE_DEADLINE = "upgrade"
@method_decorator(login_required)
def get(
self, request, course_id,
always_show_payment=False,
current_step=None,
message=FIRST_TIME_VERIFY_MSG
):
"""
Render the payment and verification flow.
Arguments:
request (HttpRequest): The request object.
course_id (unicode): The ID of the course the user is trying
to enroll in.
Keyword Arguments:
always_show_payment (bool): If True, show the payment steps
even if the user has already paid. This is useful
for users returning to the flow after paying.
current_step (string): The current step in the flow.
message (string): The messaging to display.
Returns:
HttpResponse
Raises:
Http404: The course does not exist or does not
have a verified mode.
"""
# Parse the course key
# The URL regex should guarantee that the key format is valid.
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key)
# Verify that the course exists
if course is None:
log.warn(u"Could not find course with ID %s.", course_id)
raise Http404
# Check whether the user has access to this course
# based on country access rules.
redirect_url = embargo_api.redirect_if_blocked(
course_key,
user=request.user,
ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return redirect(redirect_url)
# If the verification deadline has passed
# then show the user a message that he/she can't verify.
#
# We're making the assumptions (enforced in Django admin) that:
#
# 1) Only verified modes have verification deadlines.
#
# 2) If set, verification deadlines are always AFTER upgrade deadlines, because why would you
# let someone upgrade into a verified track if they can't complete verification?
#
verification_deadline = VerificationDeadline.deadline_for_course(course.id)
response = self._response_if_deadline_passed(course, self.VERIFICATION_DEADLINE, verification_deadline)
if response is not None:
log.info(u"Verification deadline for '%s' has passed.", course.id)
return response
# Retrieve the relevant course mode for the payment/verification flow.
#
# WARNING: this is technical debt! A much better way to do this would be to
# separate out the payment flow and use the product SKU to figure out what
# the user is trying to purchase.
#
# Nonethless, for the time being we continue to make the really ugly assumption
# that at some point there was a paid course mode we can query for the price.
relevant_course_mode = self._get_paid_mode(course_key)
# If we can find a relevant course mode, then log that we're entering the flow
# Otherwise, this course does not support payment/verification, so respond with a 404.
if relevant_course_mode is not None:
if CourseMode.is_verified_mode(relevant_course_mode):
log.info(
u"Entering payment and verification flow for user '%s', course '%s', with current step '%s'.",
request.user.id, course_id, current_step
)
else:
log.info(
u"Entering payment flow for user '%s', course '%s', with current step '%s'",
request.user.id, course_id, current_step
)
else:
# Otherwise, there has never been a verified/paid mode,
# so return a page not found response.
log.warn(
u"No paid/verified course mode found for course '%s' for verification/payment flow request",
course_id
)
raise Http404
# If the user is trying to *pay* and the upgrade deadline has passed,
# then they shouldn't be able to enter the flow.
#
# NOTE: This should match the availability dates used by the E-Commerce service
# to determine whether a user can purchase a product. The idea is that if the service
# won't fulfill the order, we shouldn't even let the user get into the payment flow.
#
user_is_trying_to_pay = message in [self.FIRST_TIME_VERIFY_MSG, self.UPGRADE_MSG]
if user_is_trying_to_pay:
upgrade_deadline = relevant_course_mode.expiration_datetime
response = self._response_if_deadline_passed(course, self.UPGRADE_DEADLINE, upgrade_deadline)
if response is not None:
log.info(u"Upgrade deadline for '%s' has passed.", course.id)
return response
# Check whether the user has verified, paid, and enrolled.
# A user is considered "paid" if he or she has an enrollment
# with a paid course mode (such as "verified").
# For this reason, every paid user is enrolled, but not
# every enrolled user is paid.
# If the course mode is not verified(i.e only paid) then already_verified is always True
already_verified = (
self._check_already_verified(request.user)
if CourseMode.is_verified_mode(relevant_course_mode)
else True
)
already_paid, is_enrolled = self._check_enrollment(request.user, course_key)
# Redirect the user to a more appropriate page if the
# messaging won't make sense based on the user's
# enrollment / payment / verification status.
redirect_response = self._redirect_if_necessary(
message,
already_verified,
already_paid,
is_enrolled,
course_key
)
if redirect_response is not None:
return redirect_response
display_steps = self._display_steps(
always_show_payment,
already_verified,
already_paid,
relevant_course_mode
)
requirements = self._requirements(display_steps, request.user.is_active)
if current_step is None:
current_step = display_steps[0]['name']
# Allow the caller to skip the first page
# This is useful if we want the user to be able to
# use the "back" button to return to the previous step.
# This parameter should only work for known skip-able steps
if request.GET.get('skip-first-step') and current_step in self.SKIP_STEPS:
display_step_names = [step['name'] for step in display_steps]
current_step_idx = display_step_names.index(current_step)
if (current_step_idx + 1) < len(display_steps):
current_step = display_steps[current_step_idx + 1]['name']
courseware_url = ""
if not course.start or course.start < datetime.datetime.today().replace(tzinfo=UTC):
courseware_url = reverse(
'course_root',
kwargs={'course_id': unicode(course_key)}
)
full_name = (
request.user.profile.name
if request.user.profile.name
else ""
)
# If the user set a contribution amount on another page,
# use that amount to pre-fill the price selection form.
contribution_amount = request.session.get(
'donation_for_course', {}
).get(unicode(course_key), '')
# Remember whether the user is upgrading
# so we can fire an analytics event upon payment.
request.session['attempting_upgrade'] = (message == self.UPGRADE_MSG)
# Determine the photo verification status
verification_good_until = self._verification_valid_until(request.user)
# get available payment processors
if relevant_course_mode.sku:
# transaction will be conducted via ecommerce service
processors = ecommerce_api_client(request.user).payment.processors.get()
else:
# transaction will be conducted using legacy shopping cart
processors = [settings.CC_PROCESSOR_NAME]
# Render the top-level page
context = {
'contribution_amount': contribution_amount,
'course': course,
'course_key': unicode(course_key),
'checkpoint_location': request.GET.get('checkpoint'),
'course_mode': relevant_course_mode,
'courseware_url': courseware_url,
'current_step': current_step,
'disable_courseware_js': True,
'display_steps': display_steps,
'is_active': json.dumps(request.user.is_active),
'message_key': message,
'platform_name': settings.PLATFORM_NAME,
'processors': processors,
'requirements': requirements,
'user_full_name': full_name,
'verification_deadline': (
get_default_time_display(verification_deadline)
if verification_deadline else ""
),
'already_verified': already_verified,
'verification_good_until': verification_good_until,
'capture_sound': staticfiles_storage.url("audio/camera_capture.wav"),
'nav_hidden': True,
}
return render_to_response("verify_student/pay_and_verify.html", context)
def _redirect_if_necessary(
self,
message,
already_verified,
already_paid,
is_enrolled,
course_key
):
"""Redirect the user to a more appropriate page if necessary.
In some cases, a user may visit this page with
verification / enrollment / payment state that
we don't anticipate. For example, a user may unenroll
from the course after paying for it, then visit the
"verify now" page to complete verification.
When this happens, we try to redirect the user to
the most appropriate page.
Arguments:
message (string): The messaging of the page. Should be a key
in `MESSAGES`.
already_verified (bool): Whether the user has submitted
a verification request recently.
already_paid (bool): Whether the user is enrolled in a paid
course mode.
is_enrolled (bool): Whether the user has an active enrollment
in the course.
course_key (CourseKey): The key for the course.
Returns:
HttpResponse or None
"""
url = None
course_kwargs = {'course_id': unicode(course_key)}
if already_verified and already_paid:
# If they've already paid and verified, there's nothing else to do,
# so redirect them to the dashboard.
if message != self.PAYMENT_CONFIRMATION_MSG:
url = reverse('dashboard')
elif message in [self.VERIFY_NOW_MSG, self.VERIFY_LATER_MSG, self.PAYMENT_CONFIRMATION_MSG]:
if is_enrolled:
# If the user is already enrolled but hasn't yet paid,
# then the "upgrade" messaging is more appropriate.
if not already_paid:
url = reverse('verify_student_upgrade_and_verify', kwargs=course_kwargs)
else:
# If the user is NOT enrolled, then send him/her
# to the first time verification page.
url = reverse('verify_student_start_flow', kwargs=course_kwargs)
elif message == self.UPGRADE_MSG:
if is_enrolled:
if already_paid:
# If the student has paid, but not verified, redirect to the verification flow.
url = reverse('verify_student_verify_now', kwargs=course_kwargs)
else:
url = reverse('verify_student_start_flow', kwargs=course_kwargs)
# Redirect if necessary, otherwise implicitly return None
if url is not None:
return redirect(url)
def _get_paid_mode(self, course_key):
"""
Retrieve the paid course mode for a course.
The returned course mode may or may not be expired.
Unexpired modes are preferred to expired modes.
Arguments:
course_key (CourseKey): The location of the course.
Returns:
CourseMode tuple
"""
# Retrieve all the modes at once to reduce the number of database queries
all_modes, unexpired_modes = CourseMode.all_and_unexpired_modes_for_courses([course_key])
# Retrieve the first unexpired, paid mode, if there is one
for mode in unexpired_modes[course_key]:
if mode.min_price > 0:
return mode
# Otherwise, find the first expired mode
for mode in all_modes[course_key]:
if mode.min_price > 0:
return mode
# Otherwise, return None and so the view knows to respond with a 404.
return None
def _display_steps(self, always_show_payment, already_verified, already_paid, course_mode):
"""Determine which steps to display to the user.
Includes all steps by default, but removes steps
if the user has already completed them.
Arguments:
always_show_payment (bool): If True, display the payment steps
even if the user has already paid.
already_verified (bool): Whether the user has submitted
a verification request recently.
already_paid (bool): Whether the user is enrolled in a paid
course mode.
Returns:
list
"""
display_steps = self.ALL_STEPS
remove_steps = set()
if already_verified or not CourseMode.is_verified_mode(course_mode):
remove_steps |= set(self.VERIFICATION_STEPS)
if already_paid and not always_show_payment:
remove_steps |= set(self.PAYMENT_STEPS)
else:
# The "make payment" step doubles as an intro step,
# so if we're showing the payment step, hide the intro step.
remove_steps |= set([self.INTRO_STEP])
return [
{
'name': step,
'title': unicode(self.STEP_TITLES[step]),
}
for step in display_steps
if step not in remove_steps
]
def _requirements(self, display_steps, is_active):
"""Determine which requirements to show the user.
For example, if the user needs to submit a photo
verification, tell the user that she will need
a photo ID and a webcam.
Arguments:
display_steps (list): The steps to display to the user.
is_active (bool): If False, adds a requirement to activate the user account.
Returns:
dict: Keys are requirement names, values are booleans
indicating whether to show the requirement.
"""
all_requirements = {
self.ACCOUNT_ACTIVATION_REQ: not is_active,
self.PHOTO_ID_REQ: False,
self.WEBCAM_REQ: False,
}
display_steps = set(step['name'] for step in display_steps)
for step, step_requirements in self.STEP_REQUIREMENTS.iteritems():
if step in display_steps:
for requirement in step_requirements:
all_requirements[requirement] = True
return all_requirements
def _verification_valid_until(self, user, date_format="%m/%d/%Y"):
"""
Check whether the user has a valid or pending verification.
Arguments:
user:
date_format: optional parameter for formatting datetime
object to string in response
Returns:
datetime object in string format
"""
photo_verifications = SoftwareSecurePhotoVerification.verification_valid_or_pending(user)
# return 'expiration_datetime' of latest photo verification if found,
# otherwise implicitly return ''
if photo_verifications:
return photo_verifications[0].expiration_datetime.strftime(date_format)
return ''
def _check_already_verified(self, user):
"""Check whether the user has a valid or pending verification.
Note that this includes cases in which the user's verification
has not been accepted (either because it hasn't been processed,
or there was an error).
This should return True if the user has done their part:
submitted photos within the expiration period.
"""
return SoftwareSecurePhotoVerification.user_has_valid_or_pending(user)
def _check_enrollment(self, user, course_key):
"""Check whether the user has an active enrollment and has paid.
If a user is enrolled in a paid course mode, we assume
that the user has paid.
Arguments:
user (User): The user to check.
course_key (CourseKey): The key of the course to check.
Returns:
Tuple `(has_paid, is_active)` indicating whether the user
has paid and whether the user has an active account.
"""
enrollment_mode, is_active = CourseEnrollment.enrollment_mode_for_user(user, course_key)
has_paid = False
if enrollment_mode is not None and is_active:
all_modes = CourseMode.modes_for_course_dict(course_key, include_expired=True)
course_mode = all_modes.get(enrollment_mode)
has_paid = (course_mode and course_mode.min_price > 0)
return (has_paid, bool(is_active))
def _response_if_deadline_passed(self, course, deadline_name, deadline_datetime):
"""
Respond with some error messaging if the deadline has passed.
Arguments:
course (Course): The course the user is trying to enroll in.
deadline_name (str): One of the deadline constants.
deadline_datetime (datetime): The deadline.
Returns: HttpResponse or None
"""
if deadline_name not in [self.VERIFICATION_DEADLINE, self.UPGRADE_DEADLINE]:
log.error("Invalid deadline name %s. Skipping check for whether the deadline passed.", deadline_name)
return None
deadline_passed = (
deadline_datetime is not None and
deadline_datetime < datetime.datetime.now(UTC)
)
if deadline_passed:
context = {
'course': course,
'deadline_name': deadline_name,
'deadline': (
get_default_time_display(deadline_datetime)
if deadline_datetime else ""
)
}
return render_to_response("verify_student/missed_deadline.html", context)
def checkout_with_ecommerce_service(user, course_key, course_mode, processor): # pylint: disable=invalid-name
""" Create a new basket and trigger immediate checkout, using the E-Commerce API. """
course_id = unicode(course_key)
try:
api = ecommerce_api_client(user)
# Make an API call to create the order and retrieve the results
result = api.baskets.post({
'products': [{'sku': course_mode.sku}],
'checkout': True,
'payment_processor_name': processor
})
# Pass the payment parameters directly from the API response.
return result.get('payment_data')
except SlumberBaseException:
params = {'username': user.username, 'mode': course_mode.slug, 'course_id': course_id}
log.exception('Failed to create order for %(username)s %(mode)s mode of %(course_id)s', params)
raise
finally:
audit_log(
'checkout_requested',
course_id=course_id,
mode=course_mode.slug,
processor_name=processor,
user_id=user.id
)
def checkout_with_shoppingcart(request, user, course_key, course_mode, amount):
""" Create an order and trigger checkout using shoppingcart."""
cart = Order.get_cart_for_user(user)
cart.clear()
enrollment_mode = course_mode.slug
CertificateItem.add_to_order(cart, course_key, amount, enrollment_mode)
# Change the order's status so that we don't accidentally modify it later.
# We need to do this to ensure that the parameters we send to the payment system
# match what we store in the database.
# (Ordinarily we would do this client-side when the user submits the form, but since
# the JavaScript on this page does that immediately, we make the change here instead.
# This avoids a second AJAX call and some additional complication of the JavaScript.)
# If a user later re-enters the verification / payment flow, she will create a new order.
cart.start_purchase()
callback_url = request.build_absolute_uri(
reverse("shoppingcart.views.postpay_callback")
)
payment_data = {
'payment_processor_name': settings.CC_PROCESSOR_NAME,
'payment_page_url': get_purchase_endpoint(),
'payment_form_data': get_signed_purchase_params(
cart,
callback_url=callback_url,
extra_data=[unicode(course_key), course_mode.slug]
),
}
return payment_data
@require_POST
@login_required
def create_order(request):
"""
This endpoint is named 'create_order' for backward compatibility, but its
actual use is to add a single product to the user's cart and request
immediate checkout.
"""
course_id = request.POST['course_id']
course_id = CourseKey.from_string(course_id)
donation_for_course = request.session.get('donation_for_course', {})
contribution = request.POST.get("contribution", donation_for_course.get(unicode(course_id), 0))
try:
amount = decimal.Decimal(contribution).quantize(decimal.Decimal('.01'), rounding=decimal.ROUND_DOWN)
except decimal.InvalidOperation:
return HttpResponseBadRequest(_("Selected price is not valid number."))
current_mode = None
paid_modes = CourseMode.paid_modes_for_course(course_id)
# Check if there are more than 1 paid(mode with min_price>0 e.g verified/professional/no-id-professional) modes
# for course exist then choose the first one
if paid_modes:
if len(paid_modes) > 1:
log.warn(u"Multiple paid course modes found for course '%s' for create order request", course_id)
current_mode = paid_modes[0]
# Make sure this course has a paid mode
if not current_mode:
log.warn(u"Create order requested for course '%s' without a paid mode.", course_id)
return HttpResponseBadRequest(_("This course doesn't support paid certificates"))
if CourseMode.is_professional_mode(current_mode):
amount = current_mode.min_price
if amount < current_mode.min_price:
return HttpResponseBadRequest(_("No selected price or selected price is below minimum."))
if current_mode.sku:
# if request.POST doesn't contain 'processor' then the service's default payment processor will be used.
payment_data = checkout_with_ecommerce_service(
request.user,
course_id,
current_mode,
request.POST.get('processor')
)
else:
payment_data = checkout_with_shoppingcart(request, request.user, course_id, current_mode, amount)
if 'processor' not in request.POST:
# (XCOM-214) To be removed after release.
# the absence of this key in the POST payload indicates that the request was initiated from
# a stale js client, which expects a response containing only the 'payment_form_data' part of
# the payment data result.
payment_data = payment_data['payment_form_data']
return HttpResponse(json.dumps(payment_data), content_type="application/json")
class SubmitPhotosView(View):
"""
End-point for submitting photos for verification.
"""
@method_decorator(login_required)
def post(self, request):
"""
Submit photos for verification.
This end-point is used for the following cases:
* Initial verification through the pay-and-verify flow.
* Initial verification initiated from a checkpoint within a course.
* Re-verification initiated from a checkpoint within a course.
POST Parameters:
face_image (str): base64-encoded image data of the user's face.
photo_id_image (str): base64-encoded image data of the user's photo ID.
full_name (str): The user's full name, if the user is requesting a name change as well.
course_key (str): Identifier for the course, if initiated from a checkpoint.
checkpoint (str): Location of the checkpoint in the course.
"""
# If the user already has an initial verification attempt, we can re-use the photo ID
# the user submitted with the initial attempt. This is useful for the in-course reverification
# case in which users submit only the face photo and have it matched against their ID photos
# submitted with the initial verification.
initial_verification = SoftwareSecurePhotoVerification.get_initial_verification(request.user)
# Validate the POST parameters
params, response = self._validate_parameters(request, bool(initial_verification))
if response is not None:
return response
# If necessary, update the user's full name
if "full_name" in params:
response = self._update_full_name(request.user, params["full_name"])
if response is not None:
return response
# Retrieve the image data
# Validation ensures that we'll have a face image, but we may not have
# a photo ID image if this is a reverification.
face_image, photo_id_image, response = self._decode_image_data(
params["face_image"], params.get("photo_id_image")
)
if response is not None:
return response
# Submit the attempt
attempt = self._submit_attempt(request.user, face_image, photo_id_image, initial_verification)
# If this attempt was submitted at a checkpoint, then associate
# the attempt with the checkpoint.
submitted_at_checkpoint = "checkpoint" in params and "course_key" in params
if submitted_at_checkpoint:
checkpoint = self._associate_attempt_with_checkpoint(
request.user, attempt,
params["course_key"],
params["checkpoint"]
)
# If the submission came from an in-course checkpoint
if initial_verification is not None and submitted_at_checkpoint:
self._fire_event(request.user, "edx.bi.reverify.submitted", {
"category": "verification",
"label": unicode(params["course_key"]),
"checkpoint": checkpoint.checkpoint_name,
})
# Send a URL that the client can redirect to in order
# to return to the checkpoint in the courseware.
redirect_url = get_redirect_url(params["course_key"], params["checkpoint"])
return JsonResponse({"url": redirect_url})
# Otherwise, the submission came from an initial verification flow.
else:
self._fire_event(request.user, "edx.bi.verify.submitted", {"category": "verification"})
self._send_confirmation_email(request.user)
redirect_url = None
return JsonResponse({})
def _validate_parameters(self, request, has_initial_verification):
"""
Check that the POST parameters are valid.
Arguments:
request (HttpRequest): The request object.
has_initial_verification (bool): Whether the user has an initial verification attempt.
Returns:
HttpResponse or None
"""
# Pull out the parameters we care about.
params = {
param_name: request.POST[param_name]
for param_name in [
"face_image",
"photo_id_image",
"course_key",
"checkpoint",
"full_name"
]
if param_name in request.POST
}
# If the user already has an initial verification attempt, then we don't
# require the user to submit a photo ID image, since we can re-use the photo ID
# image from the initial attempt.
# If we don't have an initial verification OR a photo ID image, something has gone
# terribly wrong in the JavaScript. Log this as an error so we can track it down.
if "photo_id_image" not in params and not has_initial_verification:
log.error(
(
"User %s does not have an initial verification attempt "
"and no photo ID image data was provided. "
"This most likely means that the JavaScript client is not "
"correctly constructing the request to submit photos."
), request.user.id
)
return None, HttpResponseBadRequest(
_("Photo ID image is required if the user does not have an initial verification attempt.")
)
# The face image is always required.
if "face_image" not in params:
msg = _("Missing required parameter face_image")
return None, HttpResponseBadRequest(msg)
# If provided, parse the course key and checkpoint location
if "course_key" in params:
try:
params["course_key"] = CourseKey.from_string(params["course_key"])
except InvalidKeyError:
return None, HttpResponseBadRequest(_("Invalid course key"))
if "checkpoint" in params:
try:
params["checkpoint"] = UsageKey.from_string(params["checkpoint"]).replace(
course_key=params["course_key"]
)
except InvalidKeyError:
return None, HttpResponseBadRequest(_("Invalid checkpoint location"))
return params, None
def _update_full_name(self, user, full_name):
"""
Update the user's full name.
Arguments:
user (User): The user to update.
full_name (unicode): The user's updated full name.
Returns:
HttpResponse or None
"""
try:
update_account_settings(user, {"name": full_name})
except UserNotFound:
return HttpResponseBadRequest(_("No profile found for user"))
except AccountValidationError:
msg = _(
"Name must be at least {min_length} characters long."
).format(min_length=NAME_MIN_LENGTH)
return HttpResponseBadRequest(msg)
def _decode_image_data(self, face_data, photo_id_data=None):
"""
Decode image data sent with the request.
Arguments:
face_data (str): base64-encoded face image data.
Keyword Arguments:
photo_id_data (str): base64-encoded photo ID image data.
Returns:
tuple of (str, str, HttpResponse)
"""
try:
# Decode face image data (used for both an initial and re-verification)
face_image = decode_image_data(face_data)
# Decode the photo ID image data if it's provided
photo_id_image = (
decode_image_data(photo_id_data)
if photo_id_data is not None else None
)
return face_image, photo_id_image, None
except InvalidImageData:
msg = _("Image data is not valid.")
return None, None, HttpResponseBadRequest(msg)
def _submit_attempt(self, user, face_image, photo_id_image=None, initial_verification=None):
"""
Submit a verification attempt.
Arguments:
user (User): The user making the attempt.
face_image (str): Decoded face image data.
Keyword Arguments:
photo_id_image (str or None): Decoded photo ID image data.
initial_verification (SoftwareSecurePhotoVerification): The initial verification attempt.
"""
attempt = SoftwareSecurePhotoVerification(user=user)
# We will always have face image data, so upload the face image
attempt.upload_face_image(face_image)
# If an ID photo wasn't submitted, re-use the ID photo from the initial attempt.
# Earlier validation rules ensure that at least one of these is available.
if photo_id_image is not None:
attempt.upload_photo_id_image(photo_id_image)
elif initial_verification is None:
# Earlier validation should ensure that we never get here.
log.error(
"Neither a photo ID image or initial verification attempt provided. "
"Parameter validation in the view should prevent this from happening!"
)
# Submit the attempt
attempt.mark_ready()
attempt.submit(copy_id_photo_from=initial_verification)
return attempt
def _associate_attempt_with_checkpoint(self, user, attempt, course_key, usage_id):
"""
Associate the verification attempt with a checkpoint within a course.
Arguments:
user (User): The user making the attempt.
attempt (SoftwareSecurePhotoVerification): The verification attempt.
course_key (CourseKey): The identifier for the course.
usage_key (UsageKey): The location of the checkpoint within the course.
Returns:
VerificationCheckpoint
"""
checkpoint = VerificationCheckpoint.get_or_create_verification_checkpoint(course_key, usage_id)
checkpoint.add_verification_attempt(attempt)
VerificationStatus.add_verification_status(checkpoint, user, "submitted")
return checkpoint
def _send_confirmation_email(self, user):
"""
Send an email confirming that the user submitted photos
for initial verification.
"""
context = {
'full_name': user.profile.name,
'platform_name': microsite.get_value("PLATFORM_NAME", settings.PLATFORM_NAME)
}
subject = _("Verification photos received")
message = render_to_string('emails/photo_submission_confirmation.txt', context)
from_address = microsite.get_value('default_from_email', settings.DEFAULT_FROM_EMAIL)
to_address = user.email
try:
send_mail(subject, message, from_address, [to_address], fail_silently=False)
except: # pylint: disable=bare-except
# We catch all exceptions and log them.
# It would be much, much worse to roll back the transaction due to an uncaught
# exception than to skip sending the notification email.
log.exception("Could not send notification email for initial verification for user %s", user.id)
def _fire_event(self, user, event_name, parameters):
"""
Fire an analytics event.
Arguments:
user (User): The user who submitted photos.
event_name (str): Name of the analytics event.
parameters (dict): Event parameters.
Returns: None
"""
if settings.FEATURES.get('SEGMENT_IO_LMS') and hasattr(settings, 'SEGMENT_IO_LMS_KEY'):
tracking_context = tracker.get_tracker().resolve_context()
context = {
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
analytics.track(user.id, event_name, parameters, context=context)
def _compose_message_reverification_email(
course_key, user_id, related_assessment_location, status, request
): # pylint: disable=invalid-name
"""
Compose subject and message for photo reverification email.
Args:
course_key(CourseKey): CourseKey object
user_id(str): User Id
related_assessment_location(str): Location of reverification XBlock
photo_verification(QuerySet): Queryset of SoftwareSecure objects
status(str): Approval status
is_secure(Bool): Is running on secure protocol or not
Returns:
None if any error occurred else Tuple of subject and message strings
"""
try:
usage_key = UsageKey.from_string(related_assessment_location)
reverification_block = modulestore().get_item(usage_key)
course = modulestore().get_course(course_key)
redirect_url = get_redirect_url(course_key, usage_key.replace(course_key=course_key))
subject = "Re-verification Status"
context = {
"status": status,
"course_name": course.display_name_with_default,
"assessment": reverification_block.related_assessment
}
# Allowed attempts is 1 if not set on verification block
allowed_attempts = reverification_block.attempts + 1
used_attempts = VerificationStatus.get_user_attempts(user_id, course_key, related_assessment_location)
left_attempts = allowed_attempts - used_attempts
is_attempt_allowed = left_attempts > 0
verification_open = True
if reverification_block.due:
verification_open = timezone.now() <= reverification_block.due
context["left_attempts"] = left_attempts
context["is_attempt_allowed"] = is_attempt_allowed
context["verification_open"] = verification_open
context["due_date"] = get_default_time_display(reverification_block.due)
context['platform_name'] = settings.PLATFORM_NAME
context["used_attempts"] = used_attempts
context["allowed_attempts"] = allowed_attempts
context["support_link"] = microsite.get_value('email_from_address', settings.CONTACT_EMAIL)
re_verification_link = reverse(
'verify_student_incourse_reverify',
args=(
unicode(course_key),
related_assessment_location
)
)
context["course_link"] = request.build_absolute_uri(redirect_url)
context["reverify_link"] = request.build_absolute_uri(re_verification_link)
message = render_to_string('emails/reverification_processed.txt', context)
log.info(
"Sending email to User_Id=%s. Attempts left for this user are %s. "
"Allowed attempts %s. "
"Due Date %s",
str(user_id), left_attempts, allowed_attempts, str(reverification_block.due)
)
return subject, message
# Catch all exception to avoid raising back to view
except: # pylint: disable=bare-except
log.exception("The email for re-verification sending failed for user_id %s", user_id)
def _send_email(user_id, subject, message):
""" Send email to given user
Args:
user_id(str): User Id
subject(str): Subject lines of emails
message(str): Email message body
Returns:
None
"""
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
user = User.objects.get(id=user_id)
user.email_user(subject, message, from_address)
def _set_user_requirement_status(attempt, namespace, status, reason=None):
"""Sets the status of a credit requirement for the user,
based on a verification checkpoint.
"""
checkpoint = None
try:
checkpoint = VerificationCheckpoint.objects.get(photo_verification=attempt)
except VerificationCheckpoint.DoesNotExist:
log.error("Unable to find checkpoint for user with id %d", attempt.user.id)
if checkpoint is not None:
try:
set_credit_requirement_status(
attempt.user.username,
checkpoint.course_id,
namespace,
checkpoint.checkpoint_location,
status=status,
reason=reason,
)
except Exception: # pylint: disable=broad-except
# Catch exception if unable to add credit requirement
# status for user
log.error("Unable to add Credit requirement status for user with id %d", attempt.user.id)
@require_POST
@csrf_exempt # SS does its own message signing, and their API won't have a cookie value
def results_callback(request):
"""
Software Secure will call this callback to tell us whether a user is
verified to be who they said they are.
"""
body = request.body
try:
body_dict = json.loads(body)
except ValueError:
log.exception("Invalid JSON received from Software Secure:\n\n{}\n".format(body))
return HttpResponseBadRequest("Invalid JSON. Received:\n\n{}".format(body))
if not isinstance(body_dict, dict):
log.error("Reply from Software Secure is not a dict:\n\n{}\n".format(body))
return HttpResponseBadRequest("JSON should be dict. Received:\n\n{}".format(body))
headers = {
"Authorization": request.META.get("HTTP_AUTHORIZATION", ""),
"Date": request.META.get("HTTP_DATE", "")
}
has_valid_signature(
"POST",
headers,
body_dict,
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"],
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_SECRET_KEY"]
)
_response, access_key_and_sig = headers["Authorization"].split(" ")
access_key = access_key_and_sig.split(":")[0]
# This is what we should be doing...
#if not sig_valid:
# return HttpResponseBadRequest("Signature is invalid")
# This is what we're doing until we can figure out why we disagree on sigs
if access_key != settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"]:
return HttpResponseBadRequest("Access key invalid")
receipt_id = body_dict.get("EdX-ID")
result = body_dict.get("Result")
reason = body_dict.get("Reason", "")
error_code = body_dict.get("MessageType", "")
try:
attempt = SoftwareSecurePhotoVerification.objects.get(receipt_id=receipt_id)
except SoftwareSecurePhotoVerification.DoesNotExist:
log.error("Software Secure posted back for receipt_id %s, but not found", receipt_id)
return HttpResponseBadRequest("edX ID {} not found".format(receipt_id))
if result == "PASS":
log.debug("Approving verification for %s", receipt_id)
attempt.approve()
status = "approved"
_set_user_requirement_status(attempt, 'reverification', 'satisfied')
elif result == "FAIL":
log.debug("Denying verification for %s", receipt_id)
attempt.deny(json.dumps(reason), error_code=error_code)
status = "denied"
_set_user_requirement_status(
attempt, 'reverification', 'failed', json.dumps(reason)
)
elif result == "SYSTEM FAIL":
log.debug("System failure for %s -- resetting to must_retry", receipt_id)
attempt.system_error(json.dumps(reason), error_code=error_code)
status = "error"
log.error("Software Secure callback attempt for %s failed: %s", receipt_id, reason)
else:
log.error("Software Secure returned unknown result %s", result)
return HttpResponseBadRequest(
"Result {} not understood. Known results: PASS, FAIL, SYSTEM FAIL".format(result)
)
checkpoints = VerificationCheckpoint.objects.filter(photo_verification=attempt).all()
VerificationStatus.add_status_from_checkpoints(checkpoints=checkpoints, user=attempt.user, status=status)
# If this is re-verification then send the update email
if checkpoints:
user_id = attempt.user.id
course_key = checkpoints[0].course_id
related_assessment_location = checkpoints[0].checkpoint_location
subject, message = _compose_message_reverification_email(
course_key, user_id, related_assessment_location, status, request
)
_send_email(user_id, subject, message)
return HttpResponse("OK!")
class ReverifyView(View):
"""
Reverification occurs when a user's initial verification is denied
or expires. When this happens, users can re-submit photos through
the re-verification flow.
Unlike in-course reverification, this flow requires users to submit
*both* face and ID photos. In contrast, during in-course reverification,
students submit only face photos, which are matched against the ID photo
the user submitted during initial verification.
"""
@method_decorator(login_required)
def get(self, request):
"""
Render the reverification flow.
Most of the work is done client-side by composing the same
Backbone views used in the initial verification flow.
"""
status, _ = SoftwareSecurePhotoVerification.user_status(request.user)
if status in ["must_reverify", "expired"]:
context = {
"user_full_name": request.user.profile.name,
"platform_name": settings.PLATFORM_NAME,
"capture_sound": staticfiles_storage.url("audio/camera_capture.wav"),
}
return render_to_response("verify_student/reverify.html", context)
else:
context = {
"status": status
}
return render_to_response("verify_student/reverify_not_allowed.html", context)
class InCourseReverifyView(View):
"""
The in-course reverification view.
In-course reverification occurs while a student is taking a course.
At points in the course, students are prompted to submit face photos,
which are matched against the ID photos the user submitted during their
initial verification.
Students are prompted to enter this flow from an "In Course Reverification"
XBlock (courseware component) that course authors add to the course.
See https://github.com/edx/edx-reverification-block for more details.
"""
@method_decorator(login_required)
def get(self, request, course_id, usage_id):
"""Display the view for face photo submission.
Args:
request(HttpRequest): HttpRequest object
course_id(str): A string of course id
usage_id(str): Location of Reverification XBlock in courseware
Returns:
HttpResponse
"""
user = request.user
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key)
if course is None:
log.error(u"Could not find course '%s' for in-course reverification.", course_key)
raise Http404
try:
checkpoint = VerificationCheckpoint.objects.get(course_id=course_key, checkpoint_location=usage_id)
except VerificationCheckpoint.DoesNotExist:
log.error(
u"No verification checkpoint exists for the "
u"course '%s' and checkpoint location '%s'.",
course_key, usage_id
)
raise Http404
initial_verification = SoftwareSecurePhotoVerification.get_initial_verification(user)
if not initial_verification:
return self._redirect_to_initial_verification(user, course_key, usage_id)
# emit the reverification event
self._track_reverification_events('edx.bi.reverify.started', user.id, course_id, checkpoint.checkpoint_name)
context = {
'course_key': unicode(course_key),
'course_name': course.display_name_with_default,
'checkpoint_name': checkpoint.checkpoint_name,
'platform_name': settings.PLATFORM_NAME,
'usage_id': usage_id,
'capture_sound': staticfiles_storage.url("audio/camera_capture.wav"),
}
return render_to_response("verify_student/incourse_reverify.html", context)
def _track_reverification_events(self, event_name, user_id, course_id, checkpoint): # pylint: disable=invalid-name
"""Track re-verification events for a user against a reverification
checkpoint of a course.
Arguments:
event_name (str): Name of event being tracked
user_id (str): The ID of the user
course_id (unicode): ID associated with the course
checkpoint (str): Checkpoint name
Returns:
None
"""
log.info(
u"In-course reverification: event %s occurred for user '%s' in course '%s' at checkpoint '%s'",
event_name, user_id, course_id, checkpoint
)
if settings.FEATURES.get('SEGMENT_IO_LMS') and hasattr(settings, 'SEGMENT_IO_LMS_KEY'):
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(
user_id,
event_name,
{
'category': "verification",
'label': unicode(course_id),
'checkpoint': checkpoint
},
context={
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
def _redirect_to_initial_verification(self, user, course_key, checkpoint):
"""
Redirect because the user does not have an initial verification.
We will redirect the user to the initial verification flow,
passing the identifier for this checkpoint. When the user
submits a verification attempt, it will count for *both*
the initial and checkpoint verification.
Arguments:
user (User): The user who made the request.
course_key (CourseKey): The identifier for the course for which
the user is attempting to re-verify.
checkpoint (string): Location of the checkpoint in the courseware.
Returns:
HttpResponse
"""
log.info(
u"User %s does not have an initial verification, so "
u"he/she will be redirected to the \"verify later\" flow "
u"for the course %s.",
user.id, course_key
)
base_url = reverse('verify_student_verify_now', kwargs={'course_id': unicode(course_key)})
params = urllib.urlencode({"checkpoint": checkpoint})
full_url = u"{base}?{params}".format(base=base_url, params=params)
return redirect(full_url)
|
agpl-3.0
|
cobalys/django
|
django/contrib/webdesign/lorem_ipsum.py
|
230
|
4908
|
"""
Utility functions for generating "lorem ipsum" Latin text.
"""
from __future__ import unicode_literals
import random
COMMON_P = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'
WORDS = ('exercitationem', 'perferendis', 'perspiciatis', 'laborum', 'eveniet',
'sunt', 'iure', 'nam', 'nobis', 'eum', 'cum', 'officiis', 'excepturi',
'odio', 'consectetur', 'quasi', 'aut', 'quisquam', 'vel', 'eligendi',
'itaque', 'non', 'odit', 'tempore', 'quaerat', 'dignissimos',
'facilis', 'neque', 'nihil', 'expedita', 'vitae', 'vero', 'ipsum',
'nisi', 'animi', 'cumque', 'pariatur', 'velit', 'modi', 'natus',
'iusto', 'eaque', 'sequi', 'illo', 'sed', 'ex', 'et', 'voluptatibus',
'tempora', 'veritatis', 'ratione', 'assumenda', 'incidunt', 'nostrum',
'placeat', 'aliquid', 'fuga', 'provident', 'praesentium', 'rem',
'necessitatibus', 'suscipit', 'adipisci', 'quidem', 'possimus',
'voluptas', 'debitis', 'sint', 'accusantium', 'unde', 'sapiente',
'voluptate', 'qui', 'aspernatur', 'laudantium', 'soluta', 'amet',
'quo', 'aliquam', 'saepe', 'culpa', 'libero', 'ipsa', 'dicta',
'reiciendis', 'nesciunt', 'doloribus', 'autem', 'impedit', 'minima',
'maiores', 'repudiandae', 'ipsam', 'obcaecati', 'ullam', 'enim',
'totam', 'delectus', 'ducimus', 'quis', 'voluptates', 'dolores',
'molestiae', 'harum', 'dolorem', 'quia', 'voluptatem', 'molestias',
'magni', 'distinctio', 'omnis', 'illum', 'dolorum', 'voluptatum', 'ea',
'quas', 'quam', 'corporis', 'quae', 'blanditiis', 'atque', 'deserunt',
'laboriosam', 'earum', 'consequuntur', 'hic', 'cupiditate',
'quibusdam', 'accusamus', 'ut', 'rerum', 'error', 'minus', 'eius',
'ab', 'ad', 'nemo', 'fugit', 'officia', 'at', 'in', 'id', 'quos',
'reprehenderit', 'numquam', 'iste', 'fugiat', 'sit', 'inventore',
'beatae', 'repellendus', 'magnam', 'recusandae', 'quod', 'explicabo',
'doloremque', 'aperiam', 'consequatur', 'asperiores', 'commodi',
'optio', 'dolor', 'labore', 'temporibus', 'repellat', 'veniam',
'architecto', 'est', 'esse', 'mollitia', 'nulla', 'a', 'similique',
'eos', 'alias', 'dolore', 'tenetur', 'deleniti', 'porro', 'facere',
'maxime', 'corrupti')
COMMON_WORDS = ('lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur',
'adipisicing', 'elit', 'sed', 'do', 'eiusmod', 'tempor', 'incididunt',
'ut', 'labore', 'et', 'dolore', 'magna', 'aliqua')
def sentence():
"""
Returns a randomly generated sentence of lorem ipsum text.
The first word is capitalized, and the sentence ends in either a period or
question mark. Commas are added at random.
"""
# Determine the number of comma-separated sections and number of words in
# each section for this sentence.
sections = [' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))]
s = ', '.join(sections)
# Convert to sentence case and add end punctuation.
return '%s%s%s' % (s[0].upper(), s[1:], random.choice('?.'))
def paragraph():
"""
Returns a randomly generated paragraph of lorem ipsum text.
The paragraph consists of between 1 and 4 sentences, inclusive.
"""
return ' '.join([sentence() for i in range(random.randint(1, 4))])
def paragraphs(count, common=True):
"""
Returns a list of paragraphs as returned by paragraph().
If `common` is True, then the first paragraph will be the standard
'lorem ipsum' paragraph. Otherwise, the first paragraph will be random
Latin text. Either way, subsequent paragraphs will be random Latin text.
"""
paras = []
for i in range(count):
if common and i == 0:
paras.append(COMMON_P)
else:
paras.append(paragraph())
return paras
def words(count, common=True):
"""
Returns a string of `count` lorem ipsum words separated by a single space.
If `common` is True, then the first 19 words will be the standard
'lorem ipsum' words. Otherwise, all words will be selected randomly.
"""
if common:
word_list = list(COMMON_WORDS)
else:
word_list = []
c = len(word_list)
if count > c:
count -= c
while count > 0:
c = min(count, len(WORDS))
count -= c
word_list += random.sample(WORDS, c)
else:
word_list = word_list[:count]
return ' '.join(word_list)
|
bsd-3-clause
|
phalax4/CarnotKE
|
jyhton/lib-python/2.7/tarfile.py
|
8
|
89024
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
#-------------------------------------------------------------------
# tarfile.py
#-------------------------------------------------------------------
# Copyright (C) 2002 Lars Gustรคbel <[email protected]>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""Read from and write to tar format archives.
"""
__version__ = "$Revision: 85213 $"
# $Source$
version = "0.9.0"
__author__ = "Lars Gustรคbel ([email protected])"
__date__ = "$Date$"
__cvsid__ = "$Id$"
__credits__ = "Gustavo Niemeyer, Niels Gustรคbel, Richard Townsend."
#---------
# Imports
#---------
import sys
import os
import shutil
import stat
import errno
import time
import struct
import copy
import re
import operator
try:
import grp, pwd
except ImportError:
grp = pwd = None
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = "\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = "ustar \0" # magic gnu tar string
POSIX_MAGIC = "ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = "0" # regular file
AREGTYPE = "\0" # regular file
LNKTYPE = "1" # link (inside tarfile)
SYMTYPE = "2" # symbolic link
CHRTYPE = "3" # character special device
BLKTYPE = "4" # block special device
DIRTYPE = "5" # directory
FIFOTYPE = "6" # fifo special device
CONTTYPE = "7" # contiguous file
GNUTYPE_LONGNAME = "L" # GNU tar longname
GNUTYPE_LONGLINK = "K" # GNU tar longlink
GNUTYPE_SPARSE = "S" # GNU tar sparse file
XHDTYPE = "x" # POSIX.1-2001 extended header
XGLTYPE = "g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = "X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
#---------------------------------------------------------
# Bits used in the mode field, values in octal.
#---------------------------------------------------------
S_IFLNK = 0120000 # symbolic link
S_IFREG = 0100000 # regular file
S_IFBLK = 0060000 # block device
S_IFDIR = 0040000 # directory
S_IFCHR = 0020000 # character device
S_IFIFO = 0010000 # fifo
TSUID = 04000 # set UID on execution
TSGID = 02000 # set GID on execution
TSVTX = 01000 # reserved
TUREAD = 0400 # read by owner
TUWRITE = 0200 # write by owner
TUEXEC = 0100 # execute/search by owner
TGREAD = 0040 # read by group
TGWRITE = 0020 # write by group
TGEXEC = 0010 # execute/search by group
TOREAD = 0004 # read by other
TOWRITE = 0002 # write by other
TOEXEC = 0001 # execute/search by other
#---------------------------------------------------------
# initialization
#---------------------------------------------------------
ENCODING = sys.getfilesystemencoding()
if ENCODING is None:
ENCODING = sys.getdefaultencoding()
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length):
"""Convert a python string to a null-terminated string buffer.
"""
return s[:length] + (length - len(s)) * NUL
def nts(s):
"""Convert a null-terminated string field to a python string.
"""
# Use the string up to the first null char.
p = s.find("\0")
if p == -1:
return s
return s[:p]
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] != chr(0200):
try:
n = int(nts(s) or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
else:
n = 0L
for i in xrange(len(s) - 1):
n <<= 8
n += ord(s[i + 1])
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0200 byte indicates this particular
# encoding, the following digits-1 bytes are a big-endian
# representation. This allows values up to (256**(digits-1))-1.
if 0 <= n < 8 ** (digits - 1):
s = "%0*o" % (digits - 1, n) + NUL
else:
if format != GNU_FORMAT or n >= 256 ** (digits - 1):
raise ValueError("overflow in number field")
if n < 0:
# XXX We mimic GNU tar's behaviour with negative numbers,
# this could raise OverflowError.
n = struct.unpack("L", struct.pack("l", n))[0]
s = ""
for i in xrange(digits - 1):
s = chr(n & 0377) + s
n >>= 8
s = chr(0200) + s
return s
def uts(s, encoding, errors):
"""Convert a unicode object to a string.
"""
if errors == "utf-8":
# An extra error handler similar to the -o invalid=UTF-8 option
# in POSIX.1-2001. Replace untranslatable characters with their
# UTF-8 representation.
try:
return s.encode(encoding, "strict")
except UnicodeEncodeError:
x = []
for c in s:
try:
x.append(c.encode(encoding, "strict"))
except UnicodeEncodeError:
x.append(c.encode("utf8"))
return "".join(x)
else:
return s.encode(encoding, errors)
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
if length == 0:
return
if length is None:
shutil.copyfileobj(src, dst)
return
BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
for b in xrange(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise IOError("end of file reached")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise IOError("end of file reached")
dst.write(buf)
return
filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((TUREAD, "r"),),
((TUWRITE, "w"),),
((TUEXEC|TSUID, "s"),
(TSUID, "S"),
(TUEXEC, "x")),
((TGREAD, "r"),),
((TGWRITE, "w"),),
((TGEXEC|TSGID, "s"),
(TSGID, "S"),
(TGEXEC, "x")),
((TOREAD, "r"),),
((TOWRITE, "w"),),
((TOEXEC|TSVTX, "t"),
(TSVTX, "T"),
(TOEXEC, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form
-rwxrwxrwx.
Used by TarFile.list()
"""
perm = []
for table in filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadble tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
class HeaderError(TarError):
"""Base exception for header errors."""
pass
class EmptyHeaderError(HeaderError):
"""Exception for empty headers."""
pass
class TruncatedHeaderError(HeaderError):
"""Exception for truncated headers."""
pass
class EOFHeaderError(HeaderError):
"""Exception for end of file headers."""
pass
class InvalidHeaderError(HeaderError):
"""Exception for invalid headers."""
pass
class SubsequentHeaderError(HeaderError):
"""Exception for missing and invalid extended headers."""
pass
#---------------------------
# internal stream interface
#---------------------------
class _LowLevelFile:
"""Low-level file object. Supports reading and writing.
It is used instead of a regular file object for streaming
access.
"""
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode, 0666)
def close(self):
os.close(self.fd)
def read(self, size):
return os.read(self.fd, size)
def write(self, s):
os.write(self.fd, s)
class _Stream:
"""Class that serves as an adapter between TarFile and
a stream-like object. The stream-like object only
needs to have a read() or write() method and is accessed
blockwise. Use of gzip or bzip2 compression is possible.
A stream-like object could be for example: sys.stdin,
sys.stdout, a socket, a tape device etc.
_Stream is intended to be used only internally.
"""
def __init__(self, name, mode, comptype, fileobj, bufsize):
"""Construct a _Stream object.
"""
self._extfileobj = True
if fileobj is None:
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if comptype == '*':
# Enable transparent compression detection for the
# stream interface
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = name or ""
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = ""
self.pos = 0L
self.closed = False
if comptype == "gz":
try:
import zlib
except ImportError:
raise CompressionError("zlib module is not available")
self.zlib = zlib
self.crc = zlib.crc32("") & 0xffffffffL
if mode == "r":
self._init_read_gz()
else:
self._init_write_gz()
if comptype == "bz2":
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if mode == "r":
self.dbuf = ""
self.cmp = bz2.BZ2Decompressor()
else:
self.cmp = bz2.BZ2Compressor()
def __del__(self):
if hasattr(self, "closed") and not self.closed:
self.close()
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", long(time.time()))
self.__write("\037\213\010\010%s\002\377" % timestamp)
if type(self.name) is unicode:
self.name = self.name.encode("iso-8859-1", "replace")
if self.name.endswith(".gz"):
self.name = self.name[:-3]
self.__write(self.name + NUL)
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc) & 0xffffffffL
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = ""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffffL))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFFL))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = ""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != "\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != "\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
def tell(self):
"""Return the stream's file pointer position.
"""
return self.pos
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in xrange(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
t = [self.dbuf]
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError("invalid compressed data")
t.append(buf)
c += len(buf)
t = "".join(t)
self.dbuf = t[size:]
return t[:size]
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
t = [self.buf]
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
t.append(buf)
c += len(buf)
t = "".join(t)
self.buf = t[size:]
return t[:size]
# class _Stream
class _StreamProxy(object):
"""Small proxy class that enables transparent compression
detection for the Stream interface (mode 'r|*').
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.buf = self.fileobj.read(BLOCKSIZE)
def read(self, size):
self.read = self.fileobj.read
return self.buf
def getcomptype(self):
if self.buf.startswith("\037\213\010"):
return "gz"
if self.buf[0:3] == "BZh" and self.buf[4:10] == "1AY&SY":
return "bz2"
return "tar"
def close(self):
self.fileobj.close()
# class StreamProxy
class _BZ2Proxy(object):
"""Small proxy class that enables external file object
support for "r:bz2" and "w:bz2" modes. This is actually
a workaround for a limitation in bz2 module's BZ2File
class which (unlike gzip.GzipFile) has no support for
a file object argument.
"""
blocksize = 16 * 1024
def __init__(self, fileobj, mode):
self.fileobj = fileobj
self.mode = mode
self.name = getattr(self.fileobj, "name", None)
self.init()
def init(self):
import bz2
self.pos = 0
if self.mode == "r":
self.bz2obj = bz2.BZ2Decompressor()
self.fileobj.seek(0)
self.buf = ""
else:
self.bz2obj = bz2.BZ2Compressor()
def read(self, size):
b = [self.buf]
x = len(self.buf)
while x < size:
raw = self.fileobj.read(self.blocksize)
if not raw:
break
data = self.bz2obj.decompress(raw)
b.append(data)
x += len(data)
self.buf = "".join(b)
buf = self.buf[:size]
self.buf = self.buf[size:]
self.pos += len(buf)
return buf
def seek(self, pos):
if pos < self.pos:
self.init()
self.read(pos - self.pos)
def tell(self):
return self.pos
def write(self, data):
self.pos += len(data)
raw = self.bz2obj.compress(data)
self.fileobj.write(raw)
def close(self):
if self.mode == "w":
raw = self.bz2obj.flush()
self.fileobj.write(raw)
# class _BZ2Proxy
#------------------------
# Extraction file object
#------------------------
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, sparse=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.sparse = sparse
self.position = 0
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position):
"""Seek to a position in the file.
"""
self.position = position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
if self.sparse is None:
return self.readnormal(size)
else:
return self.readsparse(size)
def readnormal(self, size):
"""Read operation for regular files.
"""
self.fileobj.seek(self.offset + self.position)
self.position += size
return self.fileobj.read(size)
def readsparse(self, size):
"""Read operation for sparse files.
"""
data = []
while size > 0:
buf = self.readsparsesection(size)
if not buf:
break
size -= len(buf)
data.append(buf)
return "".join(data)
def readsparsesection(self, size):
"""Read a single section of a sparse file.
"""
section = self.sparse.find(self.position)
if section is None:
return ""
size = min(size, section.offset + section.size - self.position)
if isinstance(section, _data):
realpos = section.realpos + self.position - section.offset
self.fileobj.seek(self.offset + realpos)
self.position += size
return self.fileobj.read(size)
else:
self.position += size
return NUL * size
#class _FileInFile
class ExFileObject(object):
"""File-like object for reading an archive member.
Is returned by TarFile.extractfile().
"""
blocksize = 1024
def __init__(self, tarfile, tarinfo):
self.fileobj = _FileInFile(tarfile.fileobj,
tarinfo.offset_data,
tarinfo.size,
getattr(tarinfo, "sparse", None))
self.name = tarinfo.name
self.mode = "r"
self.closed = False
self.size = tarinfo.size
self.position = 0
self.buffer = ""
def read(self, size=None):
"""Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
buf = ""
if self.buffer:
if size is None:
buf = self.buffer
self.buffer = ""
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if size is None:
buf += self.fileobj.read()
else:
buf += self.fileobj.read(size - len(buf))
self.position += len(buf)
return buf
def readline(self, size=-1):
"""Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if "\n" in self.buffer:
pos = self.buffer.find("\n") + 1
else:
buffers = [self.buffer]
while True:
buf = self.fileobj.read(self.blocksize)
buffers.append(buf)
if not buf or "\n" in buf:
self.buffer = "".join(buffers)
pos = self.buffer.find("\n") + 1
if pos == 0:
# no newline found.
pos = len(self.buffer)
break
if size != -1:
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
def readlines(self):
"""Return a list with all remaining lines.
"""
result = []
while True:
line = self.readline()
if not line: break
result.append(line)
return result
def tell(self):
"""Return the current file position.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
return self.position
def seek(self, pos, whence=os.SEEK_SET):
"""Seek to a position in the file.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == os.SEEK_SET:
self.position = min(max(pos, 0), self.size)
elif whence == os.SEEK_CUR:
if pos < 0:
self.position = max(self.position + pos, 0)
else:
self.position = min(self.position + pos, self.size)
elif whence == os.SEEK_END:
self.position = max(min(self.size + pos, self.size), 0)
else:
raise ValueError("Invalid argument")
self.buffer = ""
self.fileobj.seek(self.position)
def close(self):
"""Close the file object.
"""
self.closed = True
def __iter__(self):
"""Get an iterator over the file's lines.
"""
while True:
line = self.readline()
if not line:
break
yield line
#class ExFileObject
#------------------
# Exported Classes
#------------------
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name
self.mode = 0644 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "" # user name
self.gname = "" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
self.pax_headers = {} # pax header information
# In pax headers the "name" and "linkname" field are called
# "path" and "linkpath".
def _getpath(self):
return self.name
def _setpath(self, name):
self.name = name
path = property(_getpath, _setpath)
def _getlinkpath(self):
return self.linkname
def _setlinkpath(self, linkname):
self.linkname = linkname
linkpath = property(_getlinkpath, _setlinkpath)
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
def get_info(self, encoding, errors):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 07777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
for key in ("name", "linkname", "uname", "gname"):
if type(info[key]) is unicode:
info[key] = info[key].encode(encoding, errors)
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="strict"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info(encoding, errors)
if format == USTAR_FORMAT:
return self.create_ustar_header(info)
elif format == GNU_FORMAT:
return self.create_gnu_header(info)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding, errors)
else:
raise ValueError("invalid format")
def create_ustar_header(self, info):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT)
def create_gnu_header(self, info):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = ""
if len(info["linkname"]) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK)
if len(info["name"]) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME)
return buf + self._create_header(info, GNU_FORMAT)
def create_pax_header(self, info, encoding, errors):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
val = info[name].decode(encoding, errors)
# Try to encode the string as ASCII.
try:
val.encode("ascii")
except UnicodeEncodeError:
pax_headers[hname] = val
continue
if len(info[name]) > length:
pax_headers[hname] = val
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = unicode(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers)
else:
buf = ""
return buf + self._create_header(info, USTAR_FORMAT)
@classmethod
def create_pax_global_header(cls, pax_headers):
"""Return the object as a pax global header block sequence.
"""
return cls._create_pax_generic_header(pax_headers, type=XGLTYPE)
def _posix_split_name(self, name):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name
@staticmethod
def _create_header(info, format):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100),
itn(info.get("mode", 0) & 07777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100),
stn(info.get("magic", POSIX_MAGIC), 8),
stn(info.get("uname", ""), 32),
stn(info.get("gname", ""), 32),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155)
]
buf = struct.pack("%ds" % BLOCKSIZE, "".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + "%06o\0" % chksum + buf[-357:]
return buf
@staticmethod
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
@classmethod
def _create_gnu_long_header(cls, name, type):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name += NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT) + \
cls._create_payload(name)
@classmethod
def _create_pax_generic_header(cls, pax_headers, type=XHDTYPE):
"""Return a POSIX.1-2001 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be unicode objects.
"""
records = []
for keyword, value in pax_headers.iteritems():
keyword = keyword.encode("utf8")
value = value.encode("utf8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records.append("%d %s=%s\n" % (p, keyword, value))
records = "".join(records)
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT) + \
cls._create_payload(records)
@classmethod
def frombuf(cls, buf):
"""Construct a TarInfo object from a 512 byte string buffer.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.buf = buf
obj.name = nts(buf[0:100])
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257])
obj.uname = nts(buf[265:297])
obj.gname = nts(buf[297:329])
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500])
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
@classmethod
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
#--------------------------------------------------------------------------
# The following are methods that are called depending on the type of a
# member. The entry point is _proc_member() which can be overridden in a
# subclass to add custom _proc_*() methods. A _proc_*() method MUST
# implement the following
# operations:
# 1. Set self.offset_data to the position where the data blocks begin,
# if there is data that follows.
# 2. Set tarfile.offset to the position where the next member's header will
# begin.
# 3. Return self or another valid TarInfo object.
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf)
return next
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
buf = self.buf
sp = _ringbuffer()
pos = 386
lastpos = 0L
realpos = 0L
# There are 4 possible sparse structs in the
# first header.
for i in xrange(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset > lastpos:
sp.append(_hole(lastpos, offset - lastpos))
sp.append(_data(offset, numbytes, realpos))
realpos += numbytes
lastpos = offset + numbytes
pos += 24
isextended = ord(buf[482])
origsize = nti(buf[483:495])
# If the isextended flag is given,
# there are extra headers to process.
while isextended == 1:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in xrange(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset > lastpos:
sp.append(_hole(lastpos, offset - lastpos))
sp.append(_data(offset, numbytes, realpos))
realpos += numbytes
lastpos = offset + numbytes
pos += 24
isextended = ord(buf[504])
if lastpos < origsize:
sp.append(_hole(lastpos, origsize - lastpos))
self.sparse = sp
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2001.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(r"(\d+) ([^=]+)=", re.U)
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
keyword = keyword.decode("utf8")
value = value.decode("utf8")
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.iteritems():
if keyword not in PAX_FIELDS:
continue
if keyword == "path":
value = value.rstrip("/")
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
else:
value = uts(value, encoding, errors)
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.type == GNUTYPE_SPARSE
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarFile(object):
"""The TarFile Class provides an interface to tar archives.
"""
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
dereference = False # If true, add content of linked file to the
# tar file, else the link.
ignore_zeros = False # If true, skips empty or invalid blocks and
# continues processing.
errorlevel = 1 # If 0, fatal errors only appear in debug
# messages (if debug >= 0). If > 0, errors
# are passed to the caller as exceptions.
format = DEFAULT_FORMAT # The format to use when creating an archive.
encoding = ENCODING # Encoding for 8-bit character strings.
errors = None # Error handler for unicode conversion.
tarinfo = TarInfo # The default TarInfo class to use.
fileobject = ExFileObject # The default ExFileObject class to use.
def __init__(self, name=None, mode="r", fileobj=None, format=None,
tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
errors=None, pax_headers=None, debug=None, errorlevel=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
self.mode = mode
self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
# Create nonexistent files in append mode.
self.mode = "w"
self._mode = "wb"
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if name is None and hasattr(fileobj, "name"):
name = fileobj.name
if hasattr(fileobj, "mode"):
self._mode = fileobj.mode
self._extfileobj = True
self.name = os.path.abspath(name) if name else None
self.fileobj = fileobj
# Init attributes.
if format is not None:
self.format = format
if tarinfo is not None:
self.tarinfo = tarinfo
if dereference is not None:
self.dereference = dereference
if ignore_zeros is not None:
self.ignore_zeros = ignore_zeros
if encoding is not None:
self.encoding = encoding
if errors is not None:
self.errors = errors
elif mode == "r":
self.errors = "utf-8"
else:
self.errors = "strict"
if pax_headers is not None and self.format == PAX_FORMAT:
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if debug is not None:
self.debug = debug
if errorlevel is not None:
self.errorlevel = errorlevel
# Init datastructures.
self.closed = False
self.members = [] # list of members as TarInfo objects
self._loaded = False # flag if all members have been read
self.offset = self.fileobj.tell()
# current position in the archive file
self.inodes = {} # dictionary caching the inodes of
# archive members already added
try:
if self.mode == "r":
self.firstmember = None
self.firstmember = self.next()
if self.mode == "a":
# Move to the end of the archive,
# before the first empty block.
while True:
self.fileobj.seek(self.offset)
try:
tarinfo = self.tarinfo.fromtarfile(self)
self.members.append(tarinfo)
except EOFHeaderError:
self.fileobj.seek(self.offset)
break
except HeaderError, e:
raise ReadError(str(e))
if self.mode in "aw":
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
def _getposix(self):
return self.format == USTAR_FORMAT
def _setposix(self, value):
import warnings
warnings.warn("use the format attribute instead", DeprecationWarning,
2)
if value:
self.format = USTAR_FORMAT
else:
self.format = GNU_FORMAT
posix = property(_getposix, _setposix)
#--------------------------------------------------------------------------
# Below are the classmethods which act as alternate constructors to the
# TarFile class. The open() method is the only one that is needed for
# public use; it is the "super"-constructor and is able to select an
# adequate "sub"-constructor for a particular compression using the mapping
# from OPEN_METH.
#
# This concept allows one to subclass TarFile without losing the comfort of
# the super-constructor. A sub-constructor is registered and made available
# by adding it to the mapping in OPEN_METH.
@classmethod
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError), e:
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
t = cls(name, filemode,
_Stream(name, filemode, comptype, fileobj, bufsize),
**kwargs)
t._extfileobj = False
return t
elif mode in "aw":
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
@classmethod
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
@classmethod
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
if fileobj is None:
fileobj = bltn_open(name, mode + "b")
try:
t = cls.taropen(name, mode,
gzip.GzipFile(name, mode, compresslevel, fileobj),
**kwargs)
except IOError:
raise ReadError("not a gzip file")
t._extfileobj = False
return t
@classmethod
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if fileobj is not None:
fileobj = _BZ2Proxy(fileobj, mode)
else:
fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (IOError, EOFError):
raise ReadError("not a bzip2 file")
t._extfileobj = False
return t
# All *open() methods are registered here.
OPEN_METH = {
"tar": "taropen", # uncompressed tar
"gz": "gzopen", # gzip compressed tar
"bz2": "bz2open" # bzip2 compressed tar
}
#--------------------------------------------------------------------------
# The public methods which TarFile provides:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
if self.mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
def getnames(self):
"""Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().
"""
return [tarinfo.name for tarinfo in self.getmembers()]
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0L
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
def list(self, verbose=True):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced.
"""
self._check()
for tarinfo in self:
if verbose:
print filemode(tarinfo.mode),
print "%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid),
if tarinfo.ischr() or tarinfo.isblk():
print "%10s" % ("%d,%d" \
% (tarinfo.devmajor, tarinfo.devminor)),
else:
print "%10d" % tarinfo.size,
print "%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6],
print tarinfo.name + ("/" if tarinfo.isdir() else ""),
if verbose:
if tarinfo.issym():
print "->", tarinfo.linkname,
if tarinfo.islnk():
print "link to", tarinfo.linkname,
print
def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
"""Add the file `name' to the archive. `name' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive' to False. `exclude' is a function that should
return True for each filename to be excluded. `filter' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object, if it returns None the TarInfo object will be
excluded from the archive.
"""
self._check("aw")
if arcname is None:
arcname = name
# Exclude pathnames.
if exclude is not None:
import warnings
warnings.warn("use the filter argument instead",
DeprecationWarning, 2)
if exclude(name):
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Skip if somebody tries to archive the archive...
if self.name is not None and os.path.abspath(name) == self.name:
self._dbg(2, "tarfile: Skipped %r" % name)
return
self._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = self.gettarinfo(name, arcname)
if tarinfo is None:
self._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Change or exclude the TarInfo object.
if filter is not None:
tarinfo = filter(tarinfo)
if tarinfo is None:
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Append the tar header and data to the archive.
if tarinfo.isreg():
with bltn_open(name, "rb") as f:
self.addfile(tarinfo, f)
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f),
recursive, exclude, filter)
else:
self.addfile(tarinfo)
def addfile(self, tarinfo, fileobj=None):
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
given, tarinfo.size bytes are read from it and added to the archive.
You can create TarInfo objects using gettarinfo().
On Windows platforms, `fileobj' should always be opened with mode
'rb' to avoid irritation about the file size.
"""
self._check("aw")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj(fileobj, self.fileobj, tarinfo.size)
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
if remainder > 0:
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * BLOCKSIZE
self.members.append(tarinfo)
def extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0700
self.extract(tarinfo, path)
# Reverse sort directories.
directories.sort(key=operator.attrgetter('name'))
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError, e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extract(self, member, path=""):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'.
"""
self._check("r")
if isinstance(member, basestring):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name))
except EnvironmentError, e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError, e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
a filename or a TarInfo object. If `member' is a regular file, a
file-like object is returned. If `member' is a link, a file-like
object is constructed from the link's target. If `member' is none of
the above, None is returned.
The file-like object is read-only and provides the following
methods: read(), readline(), readlines(), seek() and tell()
"""
self._check("r")
if isinstance(member, basestring):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg():
return self.fileobject(self, tarinfo)
elif tarinfo.type not in SUPPORTED_TYPES:
# If a member's type is unknown, it is treated as a
# regular file.
return self.fileobject(self, tarinfo)
elif tarinfo.islnk() or tarinfo.issym():
if isinstance(self.fileobj, _Stream):
# A small but ugly workaround for the case that someone tries
# to extract a (sym)link as a file-object from a non-seekable
# stream of tar blocks.
raise StreamError("cannot extract (sym)link as file object")
else:
# A (sym)link's file object is its target's file object.
return self.extractfile(self._find_link_target(tarinfo))
else:
# If there's no data associated with the member (directory, chrdev,
# blkdev, etc.), return None instead of a file object.
return None
def _extract_member(self, tarinfo, targetpath):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
self.chown(tarinfo, targetpath)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
#--------------------------------------------------------------------------
# Below are the different file methods. They are called via
# _extract_member() when extract() is called. They can be replaced in a
# subclass to implement other functionality.
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
"""
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0700)
except EnvironmentError, e:
if e.errno != errno.EEXIST:
raise
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
"""
source = self.extractfile(tarinfo)
try:
with bltn_open(targetpath, "wb") as target:
copyfileobj(source, target)
finally:
source.close()
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
"""
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type)
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
"""
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system")
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
"""
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
"""
if hasattr(os, "symlink") and hasattr(os, "link"):
# For systems that support symbolic and hard links.
if tarinfo.issym():
if os.path.lexists(targetpath):
os.unlink(targetpath)
os.symlink(tarinfo.linkname, targetpath)
else:
# See extract().
if os.path.exists(tarinfo._link_target):
if os.path.lexists(targetpath):
os.unlink(targetpath)
os.link(tarinfo._link_target, targetpath)
else:
self._extract_member(self._find_link_target(tarinfo), targetpath)
else:
try:
self._extract_member(self._find_link_target(tarinfo), targetpath)
except KeyError:
raise ExtractError("unable to resolve link inside archive")
def chown(self, tarinfo, targetpath):
"""Set owner of targetpath according to tarinfo.
"""
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
g = tarinfo.gid
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
u = tarinfo.uid
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
if sys.platform != "os2emx":
os.chown(targetpath, u, g)
except EnvironmentError, e:
raise ExtractError("could not change owner")
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
"""
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except EnvironmentError, e:
raise ExtractError("could not change mode")
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
"""
if not hasattr(os, 'utime'):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except EnvironmentError, e:
raise ExtractError("could not change modification time")
#--------------------------------------------------------------------------
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError, e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError, e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
elif self.offset == 0:
raise ReadError(str(e))
except EmptyHeaderError:
if self.offset == 0:
raise ReadError("empty file")
except TruncatedHeaderError, e:
if self.offset == 0:
raise ReadError(str(e))
except SubsequentHeaderError, e:
raise ReadError(str(e))
break
if tarinfo is not None:
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo
#--------------------------------------------------------------------------
# Little helper methods:
def _getmember(self, name, tarinfo=None, normalize=False):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
"""
# Ensure that all members have been loaded.
members = self.getmembers()
# Limit the member search list up to tarinfo.
if tarinfo is not None:
members = members[:members.index(tarinfo)]
if normalize:
name = os.path.normpath(name)
for member in reversed(members):
if normalize:
member_name = os.path.normpath(member.name)
else:
member_name = member.name
if name == member_name:
return member
def _load(self):
"""Read through the entire archive file and look for readable
members.
"""
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
"""
if self.closed:
raise IOError("%s is closed" % self.__class__.__name__)
if mode is not None and self.mode not in mode:
raise IOError("bad operation for mode %r" % self.mode)
def _find_link_target(self, tarinfo):
"""Find the target member of a symlink or hardlink member in the
archive.
"""
if tarinfo.issym():
# Always search the entire archive.
linkname = "/".join(filter(None, (os.path.dirname(tarinfo.name), tarinfo.linkname)))
limit = None
else:
# Search the archive before the link, because a hard link is
# just a reference to an already archived file.
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if member is None:
raise KeyError("linkname %r not found" % linkname)
return member
def __iter__(self):
"""Provide an iterator object.
"""
if self._loaded:
return iter(self.members)
else:
return TarIter(self)
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print >> sys.stderr, msg
def __enter__(self):
self._check()
return self
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
# An exception occurred. We must not call close() because
# it would try to write end-of-archive blocks and padding.
if not self._extfileobj:
self.fileobj.close()
self.closed = True
# class TarFile
class TarIter:
"""Iterator Class.
for tarinfo in TarFile(...):
suite...
"""
def __init__(self, tarfile):
"""Construct a TarIter object.
"""
self.tarfile = tarfile
self.index = 0
def __iter__(self):
"""Return iterator object.
"""
return self
def next(self):
"""Return the next item using TarFile's next() method.
When all members have been read, set TarFile as _loaded.
"""
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will cause TarIter to stop prematurely.
if not self.tarfile._loaded:
tarinfo = self.tarfile.next()
if not tarinfo:
self.tarfile._loaded = True
raise StopIteration
else:
try:
tarinfo = self.tarfile.members[self.index]
except IndexError:
raise StopIteration
self.index += 1
return tarinfo
# Helper classes for sparse file support
class _section:
"""Base class for _data and _hole.
"""
def __init__(self, offset, size):
self.offset = offset
self.size = size
def __contains__(self, offset):
return self.offset <= offset < self.offset + self.size
class _data(_section):
"""Represent a data section in a sparse file.
"""
def __init__(self, offset, size, realpos):
_section.__init__(self, offset, size)
self.realpos = realpos
class _hole(_section):
"""Represent a hole section in a sparse file.
"""
pass
class _ringbuffer(list):
"""Ringbuffer class which increases performance
over a regular list.
"""
def __init__(self):
self.idx = 0
def find(self, offset):
idx = self.idx
while True:
item = self[idx]
if offset in item:
break
idx += 1
if idx == len(self):
idx = 0
if idx == self.idx:
# End of File
return None
self.idx = idx
return item
#---------------------------------------------
# zipfile compatible TarFile class
#---------------------------------------------
TAR_PLAIN = 0 # zipfile.ZIP_STORED
TAR_GZIPPED = 8 # zipfile.ZIP_DEFLATED
class TarFileCompat:
"""TarFile class compatible with standard module zipfile's
ZipFile class.
"""
def __init__(self, file, mode="r", compression=TAR_PLAIN):
from warnings import warnpy3k
warnpy3k("the TarFileCompat class has been removed in Python 3.0",
stacklevel=2)
if compression == TAR_PLAIN:
self.tarfile = TarFile.taropen(file, mode)
elif compression == TAR_GZIPPED:
self.tarfile = TarFile.gzopen(file, mode)
else:
raise ValueError("unknown compression constant")
if mode[0:1] == "r":
members = self.tarfile.getmembers()
for m in members:
m.filename = m.name
m.file_size = m.size
m.date_time = time.gmtime(m.mtime)[:6]
def namelist(self):
return map(lambda m: m.name, self.infolist())
def infolist(self):
return filter(lambda m: m.type in REGULAR_TYPES,
self.tarfile.getmembers())
def printdir(self):
self.tarfile.list()
def testzip(self):
return
def getinfo(self, name):
return self.tarfile.getmember(name)
def read(self, name):
return self.tarfile.extractfile(self.tarfile.getmember(name)).read()
def write(self, filename, arcname=None, compress_type=None):
self.tarfile.add(filename, arcname)
def writestr(self, zinfo, bytes):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import calendar
tinfo = TarInfo(zinfo.filename)
tinfo.size = len(bytes)
tinfo.mtime = calendar.timegm(zinfo.date_time)
self.tarfile.addfile(tinfo, StringIO(bytes))
def close(self):
self.tarfile.close()
#class TarFileCompat
#--------------------
# exported functions
#--------------------
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
bltn_open = open
open = TarFile.open
|
apache-2.0
|
akkana/scripts
|
astro/marsoppy.py
|
1
|
14537
|
#!/usr/bin/env python3
# Plot all the planets' orbits, as viewed from a point that
# floats above the Earth's north ecliptic pole and moves with
# the Earth, to demonstrate phenomena like epicycles and the
# Venus pentagram. Idea from Galen Gisler's planetarium show.
#
# Copyright 2020 by Akkana Peck: Share and enjoy under the GPLv2 or later.
# Weird GLib bug: GLib.timeout_add takes an integer number
# of milliseconds, but if you pass it a float, it sets the timeout
# but then doesn't call draw() automatically after configure(),
# resulting in a transparent window since the black background
# doesn't get drawn. I guess it's some mysterious GTK/Cairo bug.
# Of course I could require an integer timeout when parsing arguments,
# but it's such an amusingly weird bug that I've left it as a float.
import ephem
from tkinter import Tk, Canvas, PhotoImage, mainloop, LEFT
import math
import argparse
import sys, os
from datetime import datetime, timezone
ICONDIR = os.path.expanduser("~/Docs/Preso/mars/pix/")
earth = { "name": "Earth", "obj": ephem.Sun(), "color": "#08f",
"path": [], "xypath": [],
"line": None, "disk": None,
"imgname": os.path.join(ICONDIR, "tinyearth.png") }
mars = { "name": "Mars", "obj": ephem.Mars(), "color": "#f80",
"path": [], "xypath": [], "oppositions": [],
"line": None, "disk": None,
"imgname": os.path.join(ICONDIR, "tinymars.png") }
# oppositions will include date, earth hlon, earth dist, mars hlon, mars dist
oppositions = []
table_header = "%-20s %10s %10s" % ("Date", "Distance", "Size")
table_format = "%-20s %10.3f %10.2f"
def find_next_opposition(start_time):
"""Find oppsition and time of closest approach for the given time range.
Input is the start time, either in ephem.Date or float julian.
Output is two ephem.Dates: opposition, closest approach
"""
t = start_time
timedelta = ephem.hour * 6
mars = ephem.Mars()
sun = ephem.Sun()
min_dist = 20
oppy_date = None
closest_date = None
last_dlon = None
# Loop til we've found opposition, plus 15 days.
# Opposition is when dlon changes sign and is very small.
while not oppy_date or t - oppy_date < 15:
mars.compute(t)
sun.compute(t)
dlon = mars.hlon - sun.hlon
# Does dlon have the opposite sign from last_dlon?
if last_dlon and abs(dlon) < .1 and \
(dlon == 0 or (dlon < 0) != (last_dlon < 0)):
oppy_date = t
if mars.earth_distance < min_dist:
closest_date = t
min_dist = mars.earth_distance
if oppy_date and closest_date:
return ephem.Date(oppy_date), ephem.Date(closest_date)
last_dlon = dlon
t += timedelta
class OrbitViewWindow():
def __init__(self, auscale, timestep, time_increment=1,
start_time=None, stopped=False):
"""time_increment is in days.
start_time is anything that can be turned into a ephem.Date object.
"""
self.auscale = auscale
self.timestep = timestep
if stopped:
self.stepping = False
else:
self.stepping = True
if start_time:
self.time = ephem.Date(start_time)
else:
# Default to starting 30 days before present
self.time = ephem.Date(datetime.now(tz=timezone.utc)) \
- ephem.hour * 24 * 30
print("Start time:", ephem.Date(self.time))
self.opp_date, self.closest_date = find_next_opposition(self.time)
# print("Next opposition:", self.opp_date)
# print("Next closest:", self.closest_date)
self.time_increment = ephem.hour * time_increment * 24
self.linewidth = 3
self.width = 1024
self.height = 768
self.halfwidth = self.width/2.
self.halfheight = self.height/2.
self.dist_scale = self.halfheight / self.auscale
tkmaster = Tk()
tkmaster.title("Mars Oppositions")
self.canvas = Canvas(tkmaster, bg="black",
width=self.width, height=self.height)
# Start with just the Sun
try:
self.sunimg = PhotoImage(file=os.path.join(ICONDIR, "tinysun.png"))
self.canvas.create_image(self.width/2, self.height/2,
image=self.sunimg)
except:
sunrad = 20
self.canvas.create_oval(self.width/2 - sunrad,
self.height/2 - sunrad,
self.width/2 + sunrad,
self.height/2 + sunrad,
fill="yellow")
self.canvas.pack()
# Canvas requires that the app save a reference to PhotoImages:
# the canvas doesn't keep the references.
try:
earth["tinyimg"] = PhotoImage(file=earth["imgname"])
except:
earth["tinyimg"] = None
try:
mars["tinyimg"] = PhotoImage(file=mars["imgname"])
except:
mars["tinyimg"] = None
tkmaster.bind("<KeyPress-q>", sys.exit)
tkmaster.bind("<KeyPress-space>", self.toggle_stepping)
print(table_header)
# Schedule the first draw
self.step_draw()
def toggle_stepping(self, key):
self.stepping = not self.stepping
def step_draw(self):
"""Calculate and draw the next position of each planet.
"""
# If we don't call step_draw at all, we'll never get further key events
# that could restart the animation. So just step at a much slower pace.
if not self.stepping:
self.canvas.after(500, self.step_draw)
return
# Adding a float to ephem.Date turns it into a float.
# You can get back an ephem.Date with: ephem.Date(self.time).
self.time += self.time_increment
for p in (earth, mars):
p["obj"].compute(self.time)
# ephem treats Earth specially, what a hassle!
# There is no ephem.Earth body; ephem.Sun gives the Earth's
# hlon as hlon, but I guess we need to use earth_distance.
oppy = False
if p["name"] == "Earth":
hlon = p["obj"].hlon
sundist = p["obj"].earth_distance
earthdist = 0
size = 0
else:
hlon = p["obj"].hlon
sundist = p["obj"].sun_distance
earthdist = p["obj"].earth_distance
size = p["obj"].size
if abs(self.time - self.opp_date) <= .5:
oppy = True
if self.opp_date < self.closest_date:
print(table_format % (self.opp_date, earthdist, size),
"Opposition")
print(table_format % (self.closest_date,
earthdist, size),
"Closest approach")
else:
print(table_format % (self.closest_date,
earthdist, size),
"Closest approach")
print(table_format % (self.opp_date, earthdist, size),
"Opposition")
xn, yn = self.planet_x_y(hlon, sundist)
radius = 10
if oppy:
# Create outline circles for Mars and Earth at opposition.
# xn, yn should be Mars since Earth was done first.
# Create the open circle at the bottom of the stacking order.
# There may be a way to do this by passing in tags,
# but I can't find any documentation on tags.
self.canvas.tag_lower(
self.canvas.create_oval(xn-radius, yn-radius,
xn+radius, yn+radius,
outline=p["color"], width=3)
)
earthx = earth["xypath"][-2]
earthy = earth["xypath"][-1]
self.canvas.tag_lower(
self.canvas.create_oval(earthx-radius, earthy-radius,
earthx+radius, earthy+radius,
outline=earth["color"], width=3)
)
localtz = datetime.now().astimezone().tzinfo
oppdate = ephem.to_timezone(self.opp_date, localtz)
opp_str = oppdate.strftime("%Y-%m-%d") + \
'\n%.3f AU\n%.1f"' % (earthdist, size)
if xn < self.width/2:
if yn < self.height / 2:
anchor = "se"
else:
anchor = "ne"
xtxt = xn - radius
else:
if yn < self.height / 2:
anchor = "sw"
else:
anchor = "nw"
xtxt = xn + radius
ytxt = yn
txtobj = self.canvas.create_text(xtxt, ytxt,
fill="white", justify=LEFT,
font=('sans', 14, 'bold'),
anchor=anchor,
text=opp_str)
# Make sure it's not offscreen
xt1, yt1, xt2, yt2 = self.canvas.bbox(txtobj)
if xt1 < 0:
xtxt -= xt1
elif xt2 > self.width:
xtxt -= (xt2 - self.width)
if yt1 < 0:
ytxt -= yt1
elif yt2 > self.height:
ytxt -= yt2 - self.height
self.canvas.coords(txtobj, xtxt, ytxt)
# Done with this opposition: find the next one.
self.opp_date, self.closest_date \
= find_next_opposition(self.time + 500)
p["xypath"].append(int(xn))
p["xypath"].append(int(yn))
if p["line"]:
self.canvas.coords(p["line"], p["xypath"])
if p["tinyimg"]:
self.canvas.coords(p["disk"], xn, yn)
else:
self.canvas.coords(p["disk"], xn-radius, yn-radius,
xn+radius, yn+radius)
else:
p["line"] = self.canvas.create_line(xn, yn, xn, yn,
width=self.linewidth,
fill=p["color"])
# Use images if there are any, else circles
if p["tinyimg"]:
p["disk"] = self.canvas.create_image(xn-radius, yn-radius,
image=p["tinyimg"])
else:
p["disk"] = self.canvas.create_oval(xn-radius, yn-radius,
xn+radius, yn+radius,
fill=p["color"])
p["path"].append((hlon, sundist, earthdist, size))
if self.stepping:
self.canvas.after(self.timestep, self.step_draw)
def planet_x_y(self, hlon, dist):
return (dist * self.dist_scale * math.cos(hlon) + self.halfwidth,
dist * self.dist_scale * math.sin(hlon) + self.halfheight)
def print_table():
"""Super quickie hack to print out a table for the current opposition.
Unrelated to any of the other code in this script.
Ought to be generalized to take start and stop times, etc.
"""
start_date = ephem.Date(datetime.now()) - ephem.hour*24*10
end_date = start_date + ephem.hour*24*40
opp_date, closest_date = find_next_opposition(start_date)
print("Opposition:", opp_date)
print("Closest approach:", closest_date)
# Define "opposition season"
d = opp_date - ephem.hour * 24 * 15
end_date = opp_date + ephem.hour * 24 * 20
mars = ephem.Mars()
print(table_header)
while d < start_date + 60:
mars.compute(d)
d += ephem.hour * 24
if abs(d - opp_date) <= .5:
print(table_format % (ephem.Date(d), mars.earth_distance,
mars.size), "** OPPOSITION")
elif abs(d - closest_date) <= .5:
print(table_format % (ephem.Date(d), mars.earth_distance,
mars.size), "** CLOSEST APPROACH")
else:
print(table_format % (ephem.Date(d), mars.earth_distance,
mars.size))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Draw planet orbits from the north ecliptic pole.
Key bindings:
space Start/stop animation
q quit""",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-a', "--au", dest="auscale", type=float, default=1.7,
action="store",
help="""Scale of the window in astronomical units.""")
parser.add_argument("-s", "--start", dest="start", default=None,
help="Start date, YYYY-MM-DD, "
"default: 30 days beforetoday")
parser.add_argument('-t', "--timestep", dest="timestep",
type=int, default=30,
help="""Time step in milliseconds (default 30).
Controls how fast the orbits are drawn.""")
parser.add_argument('-S', "--stopped", dest="stopped", action="store_true",
help="Bring up the window but don't immediately "
"start animating: wait for the spacebar.")
parser.add_argument('-T', "--table", dest="table", action="store_true",
help="Forget all that graphic stuff and just "
"print a table of sizes around opposition")
args = parser.parse_args(sys.argv[1:])
if args.start:
args.start = datetime.strptime(args.start, "%Y-%m-%d")
if args.table:
print_table()
sys.exit(0)
win = OrbitViewWindow(auscale=args.auscale, start_time=args.start,
timestep=args.timestep, stopped=args.stopped)
mainloop()
|
gpl-2.0
|
bunnyinc/django-oidc-provider
|
oidc_provider/tests/test_utils.py
|
1
|
2459
|
import time
from datetime import datetime
from django.test import TestCase
from django.utils import timezone
from oidc_provider.lib.utils.common import get_issuer
from oidc_provider.lib.utils.token import create_id_token
from oidc_provider.tests.app.utils import create_fake_user
class Request(object):
"""
Mock request object.
"""
scheme = 'http'
def get_host(self):
return 'host-from-request:8888'
class CommonTest(TestCase):
"""
Test cases for common utils.
"""
def test_get_issuer(self):
request = Request()
# from default settings
self.assertEqual(get_issuer(),
'http://localhost:8000/openid')
# from custom settings
with self.settings(SITE_URL='http://otherhost:8000'):
self.assertEqual(get_issuer(),
'http://otherhost:8000/openid')
# `SITE_URL` not set, from `request`
with self.settings(SITE_URL=''):
self.assertEqual(get_issuer(request=request),
'http://host-from-request:8888/openid')
# use settings first if both are provided
self.assertEqual(get_issuer(request=request),
'http://localhost:8000/openid')
# `site_url` can even be overridden manually
self.assertEqual(get_issuer(site_url='http://127.0.0.1:9000',
request=request),
'http://127.0.0.1:9000/openid')
def timestamp_to_datetime(timestamp):
tz = timezone.get_current_timezone()
return datetime.fromtimestamp(timestamp, tz=tz)
class TokenTest(TestCase):
def setUp(self):
self.user = create_fake_user()
def test_create_id_token(self):
start_time = int(time.time())
login_timestamp = start_time - 1234
self.user.last_login = timestamp_to_datetime(login_timestamp)
id_token_data = create_id_token(self.user, aud='test-aud')
iat = id_token_data['iat']
self.assertEqual(type(iat), int)
self.assertGreaterEqual(iat, start_time)
self.assertLessEqual(iat - start_time, 5) # Can't take more than 5 s
self.assertEqual(id_token_data, {
'aud': 'test-aud',
'auth_time': login_timestamp,
'exp': iat + 600,
'iat': iat,
'iss': 'http://localhost:8000/openid',
'sub': str(self.user.id),
})
|
mit
|
mrry/tensorflow
|
tensorflow/contrib/learn/python/learn/basic_session_run_hooks.py
|
3
|
14433
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Some common SessionRunHook classes.
@@
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import six
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import session_run_hook
from tensorflow.contrib.learn.python.learn.session_run_hook import SessionRunArgs
from tensorflow.contrib.learn.python.learn.summary_writer_cache import SummaryWriterCache
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training_util
class LoggingTensorHook(session_run_hook.SessionRunHook):
"""Prints given tensors every N iteration.
The tensors will be printed to the log, with `INFO` severity.
"""
def __init__(self, tensors, every_n_iter=100):
"""Initializes a LoggingHook monitor.
Args:
tensors: `dict` of tag to tensors/names or
`iterable` of tensors/names.
every_n_iter: `int`, print every N iteration.
Raises:
ValueError: if `every_n_iter` is non-positive.
"""
if every_n_iter <= 0:
raise ValueError("Invalid every_n_iter=%s." % every_n_iter)
if not isinstance(tensors, dict):
tensors = {item: item for item in tensors}
self._tensors = tensors
self._every_n_iter = every_n_iter
def begin(self):
self._iter_count = 0
# Convert names to tensors if given
self._current_tensors = {tag: _as_graph_element(tensor)
for (tag, tensor) in self._tensors.items()}
def before_run(self, run_context): # pylint: disable=unused-argument
if self._iter_count % self._every_n_iter == 0:
return SessionRunArgs(self._current_tensors)
else:
return None
def after_run(self, run_context, run_values):
_ = run_context
if self._iter_count % self._every_n_iter == 0:
stats = []
for tag in sorted(self._current_tensors.keys()):
stats.append("%s = %s" % (tag, run_values.results[tag]))
logging.info("%s", ", ".join(stats))
self._iter_count += 1
class StopAtStepHook(session_run_hook.SessionRunHook):
"""Monitor to request stop at a specified step."""
def __init__(self, num_steps=None, last_step=None):
"""Create a StopAtStep Hook.
This hook requests stop after either a number of steps have been
executed or a last step has been reached. Only of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `after_run()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
self._num_steps = num_steps
self._last_step = last_step
def begin(self):
self._global_step_tensor = contrib_variables.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created to use StopAtStepHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_values.results
if self._last_step is None:
self._last_step = global_step + self._num_steps - 1
if global_step >= self._last_step:
run_context.request_stop()
class CheckpointSaverHook(session_run_hook.SessionRunHook):
"""Saves checkpoints every N steps or seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None):
"""Initialize CheckpointSaverHook monitor.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
"""
logging.info("Create CheckpointSaverHook.")
self._saver = saver
self._checkpoint_dir = checkpoint_dir
self._summary_writer = SummaryWriterCache.get(checkpoint_dir)
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._save_secs = save_secs
self._save_steps = save_steps
self._last_saved_time = None
self._last_saved_step = None
if save_steps is None and save_secs is None:
raise ValueError("Either save_steps or save_secs should be provided")
if (save_steps is not None) and (save_secs is not None):
raise ValueError("Can not provide both save_steps and save_secs.")
def begin(self):
self._last_saved_time = None
self._last_saved_step = None
self._global_step_tensor = contrib_variables.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use CheckpointSaverHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
if self._last_saved_time is None:
# Write graph in the first call.
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir,
"graph.pbtxt")
self._summary_writer.add_graph(ops.get_default_graph())
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_values.results
if self._last_saved_time is None:
self._save(global_step, run_context.session)
if self._save_steps is not None:
if global_step >= self._last_saved_step + self._save_steps:
self._save(global_step, run_context.session)
if self._save_secs is not None:
if time.time() >= self._last_saved_time + self._save_secs:
self._save(global_step, run_context.session)
def end(self, session):
last_step = session.run(contrib_variables.get_global_step())
self._save(last_step, session)
def _save(self, step, session):
"""Saves the latest checkpoint."""
if step == self._last_saved_step:
return
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
self._last_saved_time = time.time()
self._last_saved_step = step
if self._saver is None:
self._scaffold.saver.save(session, self._save_path, global_step=step)
else:
self._saver.save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
class StepCounterHook(session_run_hook.SessionRunHook):
"""Steps per second monitor."""
def __init__(self, every_n_steps=100, output_dir=None, summary_writer=None):
self._summary_tag = "global_step/sec"
self._every_n_steps = every_n_steps
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = SummaryWriterCache.get(output_dir)
def begin(self):
self._last_reported_time = None
self._last_reported_step = None
self._global_step_tensor = contrib_variables.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use StepCounterHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
_ = run_context
if not self._summary_writer:
return
global_step = run_values.results
current_time = time.time()
if self._last_reported_time is None:
self._last_reported_step = global_step
self._last_reported_time = current_time
else:
if global_step >= self._every_n_steps + self._last_reported_step:
added_steps = global_step - self._last_reported_step
elapsed_time = current_time - self._last_reported_time
steps_per_sec = added_steps / elapsed_time
summary = Summary(value=[Summary.Value(
tag=self._summary_tag, simple_value=steps_per_sec)])
self._summary_writer.add_summary(summary, global_step)
self._last_reported_step = global_step
self._last_reported_time = current_time
class NanLossDuringTrainingError(RuntimeError):
def __str__(self):
return "NaN loss during training."
class NanTensorHook(session_run_hook.SessionRunHook):
"""NaN Loss monitor.
Monitors loss and stops training if loss is NaN.
Can either fail with exception or just stop training.
"""
def __init__(self, loss_tensor, fail_on_nan_loss=True):
"""Initializes NanLoss monitor.
Args:
loss_tensor: `Tensor`, the loss tensor.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
"""
self._loss_tensor = loss_tensor
self._fail_on_nan_loss = fail_on_nan_loss
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._loss_tensor)
def after_run(self, run_context, run_values):
if np.isnan(run_values.results):
failure_message = "Model diverged with loss = NaN."
if self._fail_on_nan_loss:
logging.error(failure_message)
raise NanLossDuringTrainingError
else:
logging.warning(failure_message)
# We don't raise an error but we request stop without an exception.
run_context.request_stop()
class SummarySaverHook(session_run_hook.SessionRunHook):
"""Saves summaries every N steps."""
def __init__(self,
save_steps=100,
output_dir=None,
summary_writer=None,
scaffold=None,
summary_op=None):
"""Initializes a `SummarySaver` monitor.
Args:
save_steps: `int`, save summaries every N steps. See `EveryN`.
output_dir: `string`, the directory to save the summaries to. Only used
if no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
summary_op: `Tensor` of type `string`. A serialized `Summary` protocol
buffer, as output by TF summary methods like `scalar_summary` or
`merge_all_summaries`.
"""
# TODO(ipolosukhin): Implement every N seconds.
self._summary_op = summary_op
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = SummaryWriterCache.get(output_dir)
self._scaffold = scaffold
self._save_steps = save_steps
# TODO(mdan): Throw an error if output_dir and summary_writer are None.
def begin(self):
self._last_saved_step = None
self._request_summary = True
self._global_step_tensor = contrib_variables.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use SummarySaverHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
requests = {"global_step": self._global_step_tensor}
if self._request_summary:
if self._summary_op is not None:
requests["summary"] = self._summary_op
elif self._scaffold.summary_op is not None:
requests["summary"] = self._scaffold.summary_op
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
_ = run_context
if not self._summary_writer:
return
global_step = run_values.results["global_step"]
if self._last_saved_step is None:
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START), global_step)
if self._request_summary:
self._last_saved_step = global_step
if "summary" in run_values.results:
self._summary_writer.add_summary(run_values.results["summary"],
global_step)
self._request_summary = (
global_step >= self._last_saved_step + self._save_steps - 1)
def end(self, session=None):
if self._summary_writer:
self._summary_writer.flush()
def _as_graph_element(obj):
"""Retrieves Graph element."""
graph = ops.get_default_graph()
if not isinstance(obj, six.string_types):
if not hasattr(obj, "graph") or obj.graph != graph:
raise ValueError("Passed %s should have graph attribute that is equal "
"to current graph %s." % (obj, graph))
return obj
if ":" in obj:
element = graph.as_graph_element(obj)
else:
element = graph.as_graph_element(obj + ":0")
# Check that there is no :1 (e.g. it's single output).
try:
graph.as_graph_element(obj + ":1")
except (KeyError, ValueError):
pass
else:
raise ValueError("Name %s is ambiguous, "
"as this `Operation` has multiple outputs "
"(at least 2)." % obj)
return element
|
apache-2.0
|
coderbone/SickRage
|
lib/tvdb_api/tvdb_api.py
|
12
|
35715
|
# !/usr/bin/env python2
# encoding:utf-8
# author:dbr/Ben
#project:tvdb_api
#repository:http://github.com/dbr/tvdb_api
#license:unlicense (http://unlicense.org/)
from functools import wraps
import traceback
__author__ = "dbr/Ben"
__version__ = "1.9"
import os
import re
import time
import getpass
import StringIO
import tempfile
import warnings
import logging
import zipfile
import datetime as dt
import requests
import xmltodict
try:
import xml.etree.cElementTree as ElementTree
except ImportError:
import xml.etree.ElementTree as ElementTree
try:
import gzip
except ImportError:
gzip = None
from dateutil.parser import parse
from cachecontrol import CacheControl, caches
from tvdb_ui import BaseUI, ConsoleUI
from tvdb_exceptions import (tvdb_error, tvdb_userabort, tvdb_shownotfound, tvdb_showincomplete,
tvdb_seasonnotfound, tvdb_episodenotfound, tvdb_attributenotfound)
def log():
return logging.getLogger("tvdb_api")
def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):
"""Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param ExceptionToCheck: the exception to check. may be a tuple of
exceptions to check
:type ExceptionToCheck: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay: initial delay between retries in seconds
:type delay: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
:param logger: logger to use. If None, print
:type logger: logging.Logger instance
"""
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck, e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
if logger:
logger.warning(msg)
else:
print msg
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry # true decorator
return deco_retry
class ShowContainer(dict):
"""Simple dict that holds a series of Show instances
"""
def __init__(self):
self._stack = []
self._lastgc = time.time()
def __setitem__(self, key, value):
self._stack.append(key)
#keep only the 100th latest results
if time.time() - self._lastgc > 20:
for o in self._stack[:-100]:
del self[o]
self._stack = self._stack[-100:]
self._lastgc = time.time()
super(ShowContainer, self).__setitem__(key, value)
class Show(dict):
"""Holds a dict of seasons, and show data.
"""
def __init__(self):
dict.__init__(self)
self.data = {}
def __repr__(self):
return "<Show %s (containing %s seasons)>" % (
self.data.get(u'seriesname', 'instance'),
len(self)
)
def __getattr__(self, key):
if key in self:
# Key is an episode, return it
return self[key]
if key in self.data:
# Non-numeric request is for show-data
return self.data[key]
raise AttributeError
def __getitem__(self, key):
if key in self:
# Key is an episode, return it
return dict.__getitem__(self, key)
if key in self.data:
# Non-numeric request is for show-data
return dict.__getitem__(self.data, key)
# Data wasn't found, raise appropriate error
if isinstance(key, int) or key.isdigit():
# Episode number x was not found
raise tvdb_seasonnotfound("Could not find season %s" % (repr(key)))
else:
# If it's not numeric, it must be an attribute name, which
# doesn't exist, so attribute error.
raise tvdb_attributenotfound("Cannot find attribute %s" % (repr(key)))
def airedOn(self, date):
ret = self.search(str(date), 'firstaired')
if len(ret) == 0:
raise tvdb_episodenotfound("Could not find any episodes that aired on %s" % date)
return ret
def search(self, term=None, key=None):
"""
Search all episodes in show. Can search all data, or a specific key (for
example, episodename)
Always returns an array (can be empty). First index contains the first
match, and so on.
Each array index is an Episode() instance, so doing
search_results[0]['episodename'] will retrieve the episode name of the
first match.
Search terms are converted to lower case (unicode) strings.
# Examples
These examples assume t is an instance of Tvdb():
>>> t = Tvdb()
>>>
To search for all episodes of Scrubs with a bit of data
containing "my first day":
>>> t['Scrubs'].search("my first day")
[<Episode 01x01 - My First Day>]
>>>
Search for "My Name Is Earl" episode named "Faked His Own Death":
>>> t['My Name Is Earl'].search('Faked His Own Death', key = 'episodename')
[<Episode 01x04 - Faked His Own Death>]
>>>
To search Scrubs for all episodes with "mentor" in the episode name:
>>> t['scrubs'].search('mentor', key = 'episodename')
[<Episode 01x02 - My Mentor>, <Episode 03x15 - My Tormented Mentor>]
>>>
# Using search results
>>> results = t['Scrubs'].search("my first")
>>> print results[0]['episodename']
My First Day
>>> for x in results: print x['episodename']
My First Day
My First Step
My First Kill
>>>
"""
results = []
for cur_season in self.values():
searchresult = cur_season.search(term=term, key=key)
if len(searchresult) != 0:
results.extend(searchresult)
return results
class Season(dict):
def __init__(self, show=None):
"""The show attribute points to the parent show
"""
self.show = show
def __repr__(self):
return "<Season instance (containing %s episodes)>" % (
len(self.keys())
)
def __getattr__(self, episode_number):
if episode_number in self:
return self[episode_number]
raise AttributeError
def __getitem__(self, episode_number):
if episode_number not in self:
raise tvdb_episodenotfound("Could not find episode %s" % (repr(episode_number)))
else:
return dict.__getitem__(self, episode_number)
def search(self, term=None, key=None):
"""Search all episodes in season, returns a list of matching Episode
instances.
>>> t = Tvdb()
>>> t['scrubs'][1].search('first day')
[<Episode 01x01 - My First Day>]
>>>
See Show.search documentation for further information on search
"""
results = []
for ep in self.values():
searchresult = ep.search(term=term, key=key)
if searchresult is not None:
results.append(
searchresult
)
return results
class Episode(dict):
def __init__(self, season=None):
"""The season attribute points to the parent season
"""
self.season = season
def __repr__(self):
seasno = int(self.get(u'seasonnumber', 0))
epno = int(self.get(u'episodenumber', 0))
epname = self.get(u'episodename')
if epname is not None:
return "<Episode %02dx%02d - %s>" % (seasno, epno, epname)
else:
return "<Episode %02dx%02d>" % (seasno, epno)
def __getattr__(self, key):
if key in self:
return self[key]
raise AttributeError
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
raise tvdb_attributenotfound("Cannot find attribute %s" % (repr(key)))
def search(self, term=None, key=None):
"""Search episode data for term, if it matches, return the Episode (self).
The key parameter can be used to limit the search to a specific element,
for example, episodename.
This primarily for use use by Show.search and Season.search. See
Show.search for further information on search
Simple example:
>>> e = Episode()
>>> e['episodename'] = "An Example"
>>> e.search("examp")
<Episode 00x00 - An Example>
>>>
Limiting by key:
>>> e.search("examp", key = "episodename")
<Episode 00x00 - An Example>
>>>
"""
if term == None:
raise TypeError("must supply string to search for (contents)")
term = unicode(term).lower()
for cur_key, cur_value in self.items():
cur_key, cur_value = unicode(cur_key).lower(), unicode(cur_value).lower()
if key is not None and cur_key != key:
# Do not search this key
continue
if cur_value.find(unicode(term).lower()) > -1:
return self
class Actors(list):
"""Holds all Actor instances for a show
"""
pass
class Actor(dict):
"""Represents a single actor. Should contain..
id,
image,
name,
role,
sortorder
"""
def __repr__(self):
return "<Actor \"%s\">" % (self.get("name"))
class Tvdb:
"""Create easy-to-use interface to name of season/episode name
>>> t = Tvdb()
>>> t['Scrubs'][1][24]['episodename']
u'My Last Day'
"""
def __init__(self,
interactive=False,
select_first=False,
debug=False,
cache=True,
banners=False,
actors=False,
custom_ui=None,
language=None,
search_all_languages=False,
apikey=None,
forceConnect=False,
useZip=False,
dvdorder=False,
proxy=None):
"""interactive (True/False):
When True, uses built-in console UI is used to select the correct show.
When False, the first search result is used.
select_first (True/False):
Automatically selects the first series search result (rather
than showing the user a list of more than one series).
Is overridden by interactive = False, or specifying a custom_ui
debug (True/False) DEPRECATED:
Replaced with proper use of logging module. To show debug messages:
>>> import logging
>>> logging.basicConfig(level = logging.DEBUG)
cache (True/False/str/unicode/urllib2 opener):
Retrieved XML are persisted to to disc. If true, stores in
tvdb_api folder under your systems TEMP_DIR, if set to
str/unicode instance it will use this as the cache
location. If False, disables caching. Can also be passed
an arbitrary Python object, which is used as a urllib2
opener, which should be created by urllib2.build_opener
banners (True/False):
Retrieves the banners for a show. These are accessed
via the _banners key of a Show(), for example:
>>> Tvdb(banners=True)['scrubs']['_banners'].keys()
['fanart', 'poster', 'series', 'season']
actors (True/False):
Retrieves a list of the actors for a show. These are accessed
via the _actors key of a Show(), for example:
>>> t = Tvdb(actors=True)
>>> t['scrubs']['_actors'][0]['name']
u'Zach Braff'
custom_ui (tvdb_ui.BaseUI subclass):
A callable subclass of tvdb_ui.BaseUI (overrides interactive option)
language (2 character language abbreviation):
The language of the returned data. Is also the language search
uses. Default is "en" (English). For full list, run..
>>> Tvdb().config['valid_languages'] #doctest: +ELLIPSIS
['da', 'fi', 'nl', ...]
search_all_languages (True/False):
By default, Tvdb will only search in the language specified using
the language option. When this is True, it will search for the
show in and language
apikey (str/unicode):
Override the default thetvdb.com API key. By default it will use
tvdb_api's own key (fine for small scripts), but you can use your
own key if desired - this is recommended if you are embedding
tvdb_api in a larger application)
See http://thetvdb.com/?tab=apiregister to get your own key
forceConnect (bool):
If true it will always try to connect to theTVDB.com even if we
recently timed out. By default it will wait one minute before
trying again, and any requests within that one minute window will
return an exception immediately.
useZip (bool):
Download the zip archive where possibale, instead of the xml.
This is only used when all episodes are pulled.
And only the main language xml is used, the actor and banner xml are lost.
"""
self.shows = ShowContainer() # Holds all Show classes
self.corrections = {} # Holds show-name to show_id mapping
self.config = {}
if apikey is not None:
self.config['apikey'] = apikey
else:
self.config['apikey'] = "0629B785CE550C8D" # tvdb_api's API key
self.config['debug_enabled'] = debug # show debugging messages
self.config['custom_ui'] = custom_ui
self.config['interactive'] = interactive # prompt for correct series?
self.config['select_first'] = select_first
self.config['search_all_languages'] = search_all_languages
self.config['useZip'] = useZip
self.config['dvdorder'] = dvdorder
self.config['proxy'] = proxy
if cache is True:
self.config['cache_enabled'] = True
self.config['cache_location'] = self._getTempDir()
elif cache is False:
self.config['cache_enabled'] = False
elif isinstance(cache, basestring):
self.config['cache_enabled'] = True
self.config['cache_location'] = cache
else:
raise ValueError("Invalid value for Cache %r (type was %s)" % (cache, type(cache)))
self.config['session'] = requests.Session()
self.config['banners_enabled'] = banners
self.config['actors_enabled'] = actors
if self.config['debug_enabled']:
warnings.warn("The debug argument to tvdb_api.__init__ will be removed in the next version. "
"To enable debug messages, use the following code before importing: "
"import logging; logging.basicConfig(level=logging.DEBUG)")
logging.basicConfig(level=logging.DEBUG)
# List of language from http://thetvdb.com/api/0629B785CE550C8D/languages.xml
# Hard-coded here as it is realtively static, and saves another HTTP request, as
# recommended on http://thetvdb.com/wiki/index.php/API:languages.xml
self.config['valid_languages'] = [
"da", "fi", "nl", "de", "it", "es", "fr", "pl", "hu", "el", "tr",
"ru", "he", "ja", "pt", "zh", "cs", "sl", "hr", "ko", "en", "sv", "no"
]
# thetvdb.com should be based around numeric language codes,
# but to link to a series like http://thetvdb.com/?tab=series&id=79349&lid=16
# requires the language ID, thus this mapping is required (mainly
# for usage in tvdb_ui - internally tvdb_api will use the language abbreviations)
self.config['langabbv_to_id'] = {'el': 20, 'en': 7, 'zh': 27,
'it': 15, 'cs': 28, 'es': 16, 'ru': 22, 'nl': 13, 'pt': 26, 'no': 9,
'tr': 21, 'pl': 18, 'fr': 17, 'hr': 31, 'de': 14, 'da': 10, 'fi': 11,
'hu': 19, 'ja': 25, 'he': 24, 'ko': 32, 'sv': 8, 'sl': 30}
if language is None:
self.config['language'] = 'en'
else:
if language not in self.config['valid_languages']:
raise ValueError("Invalid language %s, options are: %s" % (
language, self.config['valid_languages']
))
else:
self.config['language'] = language
# The following url_ configs are based of the
# http://thetvdb.com/wiki/index.php/Programmers_API
self.config['base_url'] = "http://thetvdb.com"
if self.config['search_all_languages']:
self.config['url_getSeries'] = u"%(base_url)s/api/GetSeries.php" % self.config
self.config['params_getSeries'] = {"seriesname": "", "language": "all"}
else:
self.config['url_getSeries'] = u"%(base_url)s/api/GetSeries.php" % self.config
self.config['params_getSeries'] = {"seriesname": "", "language": self.config['language']}
self.config['url_epInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/all/%%s.xml" % self.config
self.config['url_epInfo_zip'] = u"%(base_url)s/api/%(apikey)s/series/%%s/all/%%s.zip" % self.config
self.config['url_seriesInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/%%s.xml" % self.config
self.config['url_actorsInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/actors.xml" % self.config
self.config['url_seriesBanner'] = u"%(base_url)s/api/%(apikey)s/series/%%s/banners.xml" % self.config
self.config['url_artworkPrefix'] = u"%(base_url)s/banners/%%s" % self.config
self.config['url_updates_all'] = u"%(base_url)s/api/%(apikey)s/updates_all.zip" % self.config
self.config['url_updates_month'] = u"%(base_url)s/api/%(apikey)s/updates_month.zip" % self.config
self.config['url_updates_week'] = u"%(base_url)s/api/%(apikey)s/updates_week.zip" % self.config
self.config['url_updates_day'] = u"%(base_url)s/api/%(apikey)s/updates_day.zip" % self.config
def _getTempDir(self):
"""Returns the [system temp dir]/tvdb_api-u501 (or
tvdb_api-myuser)
"""
if hasattr(os, 'getuid'):
uid = "u%d" % (os.getuid())
else:
# For Windows
try:
uid = getpass.getuser()
except ImportError:
return os.path.join(tempfile.gettempdir(), "tvdb_api")
return os.path.join(tempfile.gettempdir(), "tvdb_api-%s" % (uid))
@retry(tvdb_error)
def _loadUrl(self, url, params=None, language=None):
try:
log().debug("Retrieving URL %s" % url)
# get response from TVDB
if self.config['cache_enabled']:
session = CacheControl(sess=self.config['session'], cache=caches.FileCache(self.config['cache_location'], use_dir_lock=True), cache_etags=False)
if self.config['proxy']:
log().debug("Using proxy for URL: %s" % url)
session.proxies = {
"http": self.config['proxy'],
"https": self.config['proxy'],
}
resp = session.get(url.strip(), params=params)
else:
resp = requests.get(url.strip(), params=params)
resp.raise_for_status()
except requests.exceptions.HTTPError, e:
raise tvdb_error("HTTP error " + str(e.errno) + " while loading URL " + str(url))
except requests.exceptions.ConnectionError, e:
raise tvdb_error("Connection error " + str(e.message) + " while loading URL " + str(url))
except requests.exceptions.Timeout, e:
raise tvdb_error("Connection timed out " + str(e.message) + " while loading URL " + str(url))
except Exception as e:
raise tvdb_error("Unknown exception while loading URL " + url + ": " + repr(e))
def process(path, key, value):
key = key.lower()
# clean up value and do type changes
if value:
try:
if key == 'firstaired' and value in "0000-00-00":
new_value = str(dt.date.fromordinal(1))
new_value = re.sub("([-]0{2}){1,}", "", new_value)
fixDate = parse(new_value, fuzzy=True).date()
value = fixDate.strftime("%Y-%m-%d")
elif key == 'firstaired':
value = parse(value, fuzzy=True).date()
value = value.strftime("%Y-%m-%d")
#if key == 'airs_time':
# value = parse(value).time()
# value = value.strftime("%I:%M %p")
except:
pass
return key, value
if 'application/zip' in resp.headers.get("Content-Type", ''):
try:
log().debug("We recived a zip file unpacking now ...")
zipdata = StringIO.StringIO()
zipdata.write(resp.content)
myzipfile = zipfile.ZipFile(zipdata)
return xmltodict.parse(myzipfile.read('%s.xml' % language), postprocessor=process)
except zipfile.BadZipfile:
raise tvdb_error("Bad zip file received from thetvdb.com, could not read it")
else:
try:
return xmltodict.parse(resp.content.decode('utf-8'), postprocessor=process)
except:
return dict([(u'data', None)])
def _getetsrc(self, url, params=None, language=None):
"""Loads a URL using caching, returns an ElementTree of the source
"""
try:
return self._loadUrl(url, params=params, language=language).values()[0]
except Exception, e:
raise tvdb_error(e)
def _setItem(self, sid, seas, ep, attrib, value):
"""Creates a new episode, creating Show(), Season() and
Episode()s as required. Called by _getShowData to populate show
Since the nice-to-use tvdb[1][24]['name] interface
makes it impossible to do tvdb[1][24]['name] = "name"
and still be capable of checking if an episode exists
so we can raise tvdb_shownotfound, we have a slightly
less pretty method of setting items.. but since the API
is supposed to be read-only, this is the best way to
do it!
The problem is that calling tvdb[1][24]['episodename'] = "name"
calls __getitem__ on tvdb[1], there is no way to check if
tvdb.__dict__ should have a key "1" before we auto-create it
"""
if sid not in self.shows:
self.shows[sid] = Show()
if seas not in self.shows[sid]:
self.shows[sid][seas] = Season(show=self.shows[sid])
if ep not in self.shows[sid][seas]:
self.shows[sid][seas][ep] = Episode(season=self.shows[sid][seas])
self.shows[sid][seas][ep][attrib] = value
def _setShowData(self, sid, key, value):
"""Sets self.shows[sid] to a new Show instance, or sets the data
"""
if sid not in self.shows:
self.shows[sid] = Show()
self.shows[sid].data[key] = value
def _cleanData(self, data):
"""Cleans up strings returned by TheTVDB.com
Issues corrected:
- Replaces & with &
- Trailing whitespace
"""
data = unicode(data).replace(u"&", u"&")
data = data.strip()
return data
def search(self, series):
"""This searches TheTVDB.com for the series name
and returns the result list
"""
series = series.encode("utf-8")
log().debug("Searching for show %s" % series)
self.config['params_getSeries']['seriesname'] = series
results = self._getetsrc(self.config['url_getSeries'], self.config['params_getSeries'])
if not results:
return
return results.values()[0]
def _getSeries(self, series):
"""This searches TheTVDB.com for the series name,
If a custom_ui UI is configured, it uses this to select the correct
series. If not, and interactive == True, ConsoleUI is used, if not
BaseUI is used to select the first result.
"""
allSeries = self.search(series)
if not allSeries:
log().debug('Series result returned zero')
raise tvdb_shownotfound("Show search returned zero results (cannot find show on TVDB)")
if not isinstance(allSeries, list):
allSeries = [allSeries]
if self.config['custom_ui'] is not None:
log().debug("Using custom UI %s" % (repr(self.config['custom_ui'])))
CustomUI = self.config['custom_ui']
ui = CustomUI(config=self.config)
else:
if not self.config['interactive']:
log().debug('Auto-selecting first search result using BaseUI')
ui = BaseUI(config=self.config)
else:
log().debug('Interactively selecting show using ConsoleUI')
ui = ConsoleUI(config=self.config)
return ui.selectSeries(allSeries)
def _parseBanners(self, sid):
"""Parses banners XML, from
http://thetvdb.com/api/[APIKEY]/series/[SERIES ID]/banners.xml
Banners are retrieved using t['show name]['_banners'], for example:
>>> t = Tvdb(banners = True)
>>> t['scrubs']['_banners'].keys()
['fanart', 'poster', 'series', 'season']
>>> t['scrubs']['_banners']['poster']['680x1000']['35308']['_bannerpath']
u'http://thetvdb.com/banners/posters/76156-2.jpg'
>>>
Any key starting with an underscore has been processed (not the raw
data from the XML)
This interface will be improved in future versions.
"""
log().debug('Getting season banners for %s' % (sid))
bannersEt = self._getetsrc(self.config['url_seriesBanner'] % (sid))
if not bannersEt:
log().debug('Banners result returned zero')
return
banners = {}
for cur_banner in bannersEt['banner'] if isinstance(bannersEt['banner'], list) else [bannersEt['banner']]:
bid = cur_banner['id']
btype = cur_banner['bannertype']
btype2 = cur_banner['bannertype2']
if btype is None or btype2 is None:
continue
if not btype in banners:
banners[btype] = {}
if not btype2 in banners[btype]:
banners[btype][btype2] = {}
if not bid in banners[btype][btype2]:
banners[btype][btype2][bid] = {}
for k, v in cur_banner.items():
if k is None or v is None:
continue
k, v = k.lower(), v.lower()
banners[btype][btype2][bid][k] = v
for k, v in banners[btype][btype2][bid].items():
if k.endswith("path"):
new_key = "_%s" % (k)
log().debug("Transforming %s to %s" % (k, new_key))
new_url = self.config['url_artworkPrefix'] % (v)
banners[btype][btype2][bid][new_key] = new_url
self._setShowData(sid, "_banners", banners)
def _parseActors(self, sid):
"""Parsers actors XML, from
http://thetvdb.com/api/[APIKEY]/series/[SERIES ID]/actors.xml
Actors are retrieved using t['show name]['_actors'], for example:
>>> t = Tvdb(actors = True)
>>> actors = t['scrubs']['_actors']
>>> type(actors)
<class 'tvdb_api.Actors'>
>>> type(actors[0])
<class 'tvdb_api.Actor'>
>>> actors[0]
<Actor "Zach Braff">
>>> sorted(actors[0].keys())
['id', 'image', 'name', 'role', 'sortorder']
>>> actors[0]['name']
u'Zach Braff'
>>> actors[0]['image']
u'http://thetvdb.com/banners/actors/43640.jpg'
Any key starting with an underscore has been processed (not the raw
data from the XML)
"""
log().debug("Getting actors for %s" % (sid))
actorsEt = self._getetsrc(self.config['url_actorsInfo'] % (sid))
if not actorsEt:
log().debug('Actors result returned zero')
return
cur_actors = Actors()
for cur_actor in actorsEt['actor'] if isinstance(actorsEt['actor'], list) else [actorsEt['actor']]:
curActor = Actor()
for k, v in cur_actor.items():
if k is None or v is None:
continue
k = k.lower()
if k == "image":
v = self.config['url_artworkPrefix'] % (v)
else:
v = self._cleanData(v)
curActor[k] = v
cur_actors.append(curActor)
self._setShowData(sid, '_actors', cur_actors)
def _getShowData(self, sid, language, getEpInfo=False):
"""Takes a series ID, gets the epInfo URL and parses the TVDB
XML file into the shows dict in layout:
shows[series_id][season_number][episode_number]
"""
if self.config['language'] is None:
log().debug('Config language is none, using show language')
if language is None:
raise tvdb_error("config['language'] was None, this should not happen")
getShowInLanguage = language
else:
log().debug(
'Configured language %s override show language of %s' % (
self.config['language'],
language
)
)
getShowInLanguage = self.config['language']
# Parse show information
log().debug('Getting all series data for %s' % (sid))
seriesInfoEt = self._getetsrc(
self.config['url_seriesInfo'] % (sid, getShowInLanguage)
)
if not seriesInfoEt:
log().debug('Series result returned zero')
raise tvdb_error("Series result returned zero")
# get series data
for k, v in seriesInfoEt['series'].items():
if v is not None:
if k in ['banner', 'fanart', 'poster']:
v = self.config['url_artworkPrefix'] % (v)
else:
v = self._cleanData(v)
self._setShowData(sid, k, v)
# get episode data
if getEpInfo:
# Parse banners
if self.config['banners_enabled']:
self._parseBanners(sid)
# Parse actors
if self.config['actors_enabled']:
self._parseActors(sid)
# Parse episode data
log().debug('Getting all episodes of %s' % (sid))
if self.config['useZip']:
url = self.config['url_epInfo_zip'] % (sid, language)
else:
url = self.config['url_epInfo'] % (sid, language)
epsEt = self._getetsrc(url, language=language)
if not epsEt:
log().debug('Series results incomplete')
raise tvdb_showincomplete("Show search returned incomplete results (cannot find complete show on TVDB)")
if 'episode' not in epsEt:
return False
episodes = epsEt["episode"]
if not isinstance(episodes, list):
episodes = [episodes]
for cur_ep in episodes:
if self.config['dvdorder']:
log().debug('Using DVD ordering.')
use_dvd = cur_ep['dvd_season'] != None and cur_ep['dvd_episodenumber'] != None
else:
use_dvd = False
if use_dvd:
seasnum, epno = cur_ep['dvd_season'], cur_ep['dvd_episodenumber']
else:
seasnum, epno = cur_ep['seasonnumber'], cur_ep['episodenumber']
if seasnum is None or epno is None:
log().warning("An episode has incomplete season/episode number (season: %r, episode: %r)" % (
seasnum, epno))
continue # Skip to next episode
# float() is because https://github.com/dbr/tvnamer/issues/95 - should probably be fixed in TVDB data
seas_no = int(float(seasnum))
ep_no = int(float(epno))
for k, v in cur_ep.items():
k = k.lower()
if v is not None:
if k == 'filename':
v = self.config['url_artworkPrefix'] % (v)
else:
v = self._cleanData(v)
self._setItem(sid, seas_no, ep_no, k, v)
return True
def _nameToSid(self, name):
"""Takes show name, returns the correct series ID (if the show has
already been grabbed), or grabs all episodes and returns
the correct SID.
"""
if name in self.corrections:
log().debug('Correcting %s to %s' % (name, self.corrections[name]))
return self.corrections[name]
else:
log().debug('Getting show %s' % (name))
selected_series = self._getSeries(name)
if isinstance(selected_series, dict):
selected_series = [selected_series]
sids = list(int(x['id']) for x in selected_series if
self._getShowData(int(x['id']), self.config['language']))
self.corrections.update(dict((x['seriesname'], int(x['id'])) for x in selected_series))
return sids
def __getitem__(self, key):
"""Handles tvdb_instance['seriesname'] calls.
The dict index should be the show id
"""
if isinstance(key, (int, long)):
# Item is integer, treat as show id
if key not in self.shows:
self._getShowData(key, self.config['language'], True)
return self.shows[key]
key = str(key).lower()
self.config['searchterm'] = key
selected_series = self._getSeries(key)
if isinstance(selected_series, dict):
selected_series = [selected_series]
[[self._setShowData(show['id'], k, v) for k, v in show.items()] for show in selected_series]
return selected_series
def __repr__(self):
return str(self.shows)
def main():
"""Simple example of using tvdb_api - it just
grabs an episode name interactively.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
tvdb_instance = Tvdb(interactive=True, cache=False)
print tvdb_instance['Lost']['seriesname']
print tvdb_instance['Lost'][1][4]['episodename']
if __name__ == '__main__':
main()
|
gpl-3.0
|
wldtyp/flask
|
tests/test_helpers.py
|
1
|
27016
|
# -*- coding: utf-8 -*-
"""
tests.helpers
~~~~~~~~~~~~~~~~~~~~~~~
Various helpers.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import os
import datetime
import flask
from logging import StreamHandler
from werkzeug.exceptions import BadRequest
from werkzeug.http import parse_cache_control_header, parse_options_header
from werkzeug.http import http_date
from flask._compat import StringIO, text_type
def has_encoding(name):
try:
import codecs
codecs.lookup(name)
return True
except LookupError:
return False
class TestJSON(object):
def test_post_empty_json_adds_exception_to_response_content_in_debug(self):
app = flask.Flask(__name__)
app.config['DEBUG'] = True
@app.route('/json', methods=['POST'])
def post_json():
flask.request.get_json()
return None
c = app.test_client()
rv = c.post('/json', data=None, content_type='application/json')
assert rv.status_code == 400
assert b'Failed to decode JSON object' in rv.data
def test_post_empty_json_wont_add_exception_to_response_if_no_debug(self):
app = flask.Flask(__name__)
app.config['DEBUG'] = False
@app.route('/json', methods=['POST'])
def post_json():
flask.request.get_json()
return None
c = app.test_client()
rv = c.post('/json', data=None, content_type='application/json')
assert rv.status_code == 400
assert b'Failed to decode JSON object' not in rv.data
def test_json_bad_requests(self):
app = flask.Flask(__name__)
@app.route('/json', methods=['POST'])
def return_json():
return flask.jsonify(foo=text_type(flask.request.get_json()))
c = app.test_client()
rv = c.post('/json', data='malformed', content_type='application/json')
assert rv.status_code == 400
def test_json_custom_mimetypes(self):
app = flask.Flask(__name__)
@app.route('/json', methods=['POST'])
def return_json():
return flask.request.get_json()
c = app.test_client()
rv = c.post('/json', data='"foo"', content_type='application/x+json')
assert rv.data == b'foo'
def test_json_body_encoding(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
return flask.request.get_json()
c = app.test_client()
resp = c.get('/', data=u'"Hรคllo Wรถrld"'.encode('iso-8859-15'),
content_type='application/json; charset=iso-8859-15')
assert resp.data == u'Hรคllo Wรถrld'.encode('utf-8')
def test_json_as_unicode(self):
app = flask.Flask(__name__)
app.config['JSON_AS_ASCII'] = True
with app.app_context():
rv = flask.json.dumps(u'\N{SNOWMAN}')
assert rv == '"\\u2603"'
app.config['JSON_AS_ASCII'] = False
with app.app_context():
rv = flask.json.dumps(u'\N{SNOWMAN}')
assert rv == u'"\u2603"'
def test_jsonify_basic_types(self):
"""Test jsonify with basic types."""
# Should be able to use pytest parametrize on this, but I couldn't
# figure out the correct syntax
# https://pytest.org/latest/parametrize.html#pytest-mark-parametrize-parametrizing-test-functions
test_data = (0, 1, 23, 3.14, 's', "longer string", True, False,)
app = flask.Flask(__name__)
c = app.test_client()
for i, d in enumerate(test_data):
url = '/jsonify_basic_types{0}'.format(i)
app.add_url_rule(url, str(i), lambda x=d: flask.jsonify(x))
rv = c.get(url)
assert rv.mimetype == 'application/json'
assert flask.json.loads(rv.data) == d
def test_jsonify_dicts(self):
"""Test jsonify with dicts and kwargs unpacking."""
d = dict(
a=0, b=23, c=3.14, d='t', e='Hi', f=True, g=False,
h=['test list', 10, False],
i={'test':'dict'}
)
app = flask.Flask(__name__)
@app.route('/kw')
def return_kwargs():
return flask.jsonify(**d)
@app.route('/dict')
def return_dict():
return flask.jsonify(d)
c = app.test_client()
for url in '/kw', '/dict':
rv = c.get(url)
assert rv.mimetype == 'application/json'
assert flask.json.loads(rv.data) == d
def test_jsonify_arrays(self):
"""Test jsonify of lists and args unpacking."""
l = [
0, 42, 3.14, 't', 'hello', True, False,
['test list', 2, False],
{'test':'dict'}
]
app = flask.Flask(__name__)
@app.route('/args_unpack')
def return_args_unpack():
return flask.jsonify(*l)
@app.route('/array')
def return_array():
return flask.jsonify(l)
c = app.test_client()
for url in '/args_unpack', '/array':
rv = c.get(url)
assert rv.mimetype == 'application/json'
assert flask.json.loads(rv.data) == l
def test_jsonify_date_types(self):
"""Test jsonify with datetime.date and datetime.datetime types."""
test_dates = (
datetime.datetime(1973, 3, 11, 6, 30, 45),
datetime.date(1975, 1, 5)
)
app = flask.Flask(__name__)
c = app.test_client()
for i, d in enumerate(test_dates):
url = '/datetest{0}'.format(i)
app.add_url_rule(url, str(i), lambda val=d: flask.jsonify(x=val))
rv = c.get(url)
assert rv.mimetype == 'application/json'
assert flask.json.loads(rv.data)['x'] == http_date(d.timetuple())
def test_json_attr(self):
app = flask.Flask(__name__)
@app.route('/add', methods=['POST'])
def add():
json = flask.request.get_json()
return text_type(json['a'] + json['b'])
c = app.test_client()
rv = c.post('/add', data=flask.json.dumps({'a': 1, 'b': 2}),
content_type='application/json')
assert rv.data == b'3'
def test_template_escaping(self):
app = flask.Flask(__name__)
render = flask.render_template_string
with app.test_request_context():
rv = flask.json.htmlsafe_dumps('</script>')
assert rv == u'"\\u003c/script\\u003e"'
assert type(rv) == text_type
rv = render('{{ "</script>"|tojson }}')
assert rv == '"\\u003c/script\\u003e"'
rv = render('{{ "<\0/script>"|tojson }}')
assert rv == '"\\u003c\\u0000/script\\u003e"'
rv = render('{{ "<!--<script>"|tojson }}')
assert rv == '"\\u003c!--\\u003cscript\\u003e"'
rv = render('{{ "&"|tojson }}')
assert rv == '"\\u0026"'
rv = render('{{ "\'"|tojson }}')
assert rv == '"\\u0027"'
rv = render("<a ng-data='{{ data|tojson }}'></a>",
data={'x': ["foo", "bar", "baz'"]})
assert rv == '<a ng-data=\'{"x": ["foo", "bar", "baz\\u0027"]}\'></a>'
def test_json_customization(self):
class X(object):
def __init__(self, val):
self.val = val
class MyEncoder(flask.json.JSONEncoder):
def default(self, o):
if isinstance(o, X):
return '<%d>' % o.val
return flask.json.JSONEncoder.default(self, o)
class MyDecoder(flask.json.JSONDecoder):
def __init__(self, *args, **kwargs):
kwargs.setdefault('object_hook', self.object_hook)
flask.json.JSONDecoder.__init__(self, *args, **kwargs)
def object_hook(self, obj):
if len(obj) == 1 and '_foo' in obj:
return X(obj['_foo'])
return obj
app = flask.Flask(__name__)
app.testing = True
app.json_encoder = MyEncoder
app.json_decoder = MyDecoder
@app.route('/', methods=['POST'])
def index():
return flask.json.dumps(flask.request.get_json()['x'])
c = app.test_client()
rv = c.post('/', data=flask.json.dumps({
'x': {'_foo': 42}
}), content_type='application/json')
assert rv.data == b'"<42>"'
def test_modified_url_encoding(self):
class ModifiedRequest(flask.Request):
url_charset = 'euc-kr'
app = flask.Flask(__name__)
app.testing = True
app.request_class = ModifiedRequest
app.url_map.charset = 'euc-kr'
@app.route('/')
def index():
return flask.request.args['foo']
rv = app.test_client().get(u'/?foo=์ ์์ฒ๋ฆฌ'.encode('euc-kr'))
assert rv.status_code == 200
assert rv.data == u'์ ์์ฒ๋ฆฌ'.encode('utf-8')
if not has_encoding('euc-kr'):
test_modified_url_encoding = None
def test_json_key_sorting(self):
app = flask.Flask(__name__)
app.testing = True
assert app.config['JSON_SORT_KEYS'] == True
d = dict.fromkeys(range(20), 'foo')
@app.route('/')
def index():
return flask.jsonify(values=d)
c = app.test_client()
rv = c.get('/')
lines = [x.strip() for x in rv.data.strip().decode('utf-8').splitlines()]
sorted_by_str = [
'{',
'"values": {',
'"0": "foo",',
'"1": "foo",',
'"10": "foo",',
'"11": "foo",',
'"12": "foo",',
'"13": "foo",',
'"14": "foo",',
'"15": "foo",',
'"16": "foo",',
'"17": "foo",',
'"18": "foo",',
'"19": "foo",',
'"2": "foo",',
'"3": "foo",',
'"4": "foo",',
'"5": "foo",',
'"6": "foo",',
'"7": "foo",',
'"8": "foo",',
'"9": "foo"',
'}',
'}'
]
sorted_by_int = [
'{',
'"values": {',
'"0": "foo",',
'"1": "foo",',
'"2": "foo",',
'"3": "foo",',
'"4": "foo",',
'"5": "foo",',
'"6": "foo",',
'"7": "foo",',
'"8": "foo",',
'"9": "foo",',
'"10": "foo",',
'"11": "foo",',
'"12": "foo",',
'"13": "foo",',
'"14": "foo",',
'"15": "foo",',
'"16": "foo",',
'"17": "foo",',
'"18": "foo",',
'"19": "foo"',
'}',
'}'
]
try:
assert lines == sorted_by_int
except AssertionError:
assert lines == sorted_by_str
class TestSendfile(object):
def test_send_file_regular(self):
app = flask.Flask(__name__)
with app.test_request_context():
rv = flask.send_file('static/index.html')
assert rv.direct_passthrough
assert rv.mimetype == 'text/html'
with app.open_resource('static/index.html') as f:
rv.direct_passthrough = False
assert rv.data == f.read()
rv.close()
def test_send_file_xsendfile(self):
app = flask.Flask(__name__)
app.use_x_sendfile = True
with app.test_request_context():
rv = flask.send_file('static/index.html')
assert rv.direct_passthrough
assert 'x-sendfile' in rv.headers
assert rv.headers['x-sendfile'] == \
os.path.join(app.root_path, 'static/index.html')
assert rv.mimetype == 'text/html'
rv.close()
def test_send_file_object(self, catch_deprecation_warnings):
app = flask.Flask(__name__)
with catch_deprecation_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'), mode='rb')
rv = flask.send_file(f)
rv.direct_passthrough = False
with app.open_resource('static/index.html') as f:
assert rv.data == f.read()
assert rv.mimetype == 'text/html'
rv.close()
# mimetypes + etag
assert len(captured) == 2
app.use_x_sendfile = True
with catch_deprecation_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f)
assert rv.mimetype == 'text/html'
assert 'x-sendfile' in rv.headers
assert rv.headers['x-sendfile'] == \
os.path.join(app.root_path, 'static/index.html')
rv.close()
# mimetypes + etag
assert len(captured) == 2
app.use_x_sendfile = False
with app.test_request_context():
with catch_deprecation_warnings() as captured:
f = StringIO('Test')
rv = flask.send_file(f)
rv.direct_passthrough = False
assert rv.data == b'Test'
assert rv.mimetype == 'application/octet-stream'
rv.close()
# etags
assert len(captured) == 1
with catch_deprecation_warnings() as captured:
class PyStringIO(object):
def __init__(self, *args, **kwargs):
self._io = StringIO(*args, **kwargs)
def __getattr__(self, name):
return getattr(self._io, name)
f = PyStringIO('Test')
f.name = 'test.txt'
rv = flask.send_file(f)
rv.direct_passthrough = False
assert rv.data == b'Test'
assert rv.mimetype == 'text/plain'
rv.close()
# attachment_filename and etags
assert len(captured) == 3
with catch_deprecation_warnings() as captured:
f = StringIO('Test')
rv = flask.send_file(f, mimetype='text/plain')
rv.direct_passthrough = False
assert rv.data == b'Test'
assert rv.mimetype == 'text/plain'
rv.close()
# etags
assert len(captured) == 1
app.use_x_sendfile = True
with catch_deprecation_warnings() as captured:
with app.test_request_context():
f = StringIO('Test')
rv = flask.send_file(f)
assert 'x-sendfile' not in rv.headers
rv.close()
# etags
assert len(captured) == 1
def test_attachment(self, catch_deprecation_warnings):
app = flask.Flask(__name__)
with catch_deprecation_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f, as_attachment=True)
value, options = parse_options_header(rv.headers['Content-Disposition'])
assert value == 'attachment'
rv.close()
# mimetypes + etag
assert len(captured) == 2
with app.test_request_context():
assert options['filename'] == 'index.html'
rv = flask.send_file('static/index.html', as_attachment=True)
value, options = parse_options_header(rv.headers['Content-Disposition'])
assert value == 'attachment'
assert options['filename'] == 'index.html'
rv.close()
with app.test_request_context():
rv = flask.send_file(StringIO('Test'), as_attachment=True,
attachment_filename='index.txt',
add_etags=False)
assert rv.mimetype == 'text/plain'
value, options = parse_options_header(rv.headers['Content-Disposition'])
assert value == 'attachment'
assert options['filename'] == 'index.txt'
rv.close()
def test_static_file(self):
app = flask.Flask(__name__)
# default cache timeout is 12 hours
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 12 * 60 * 60
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 12 * 60 * 60
rv.close()
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 3600
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 3600
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 3600
rv.close()
class StaticFileApp(flask.Flask):
def get_send_file_max_age(self, filename):
return 10
app = StaticFileApp(__name__)
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 10
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 10
rv.close()
def test_send_from_directory(self):
app = flask.Flask(__name__)
app.testing = True
app.root_path = os.path.join(os.path.dirname(__file__),
'test_apps', 'subdomaintestmodule')
with app.test_request_context():
rv = flask.send_from_directory('static', 'hello.txt')
rv.direct_passthrough = False
assert rv.data.strip() == b'Hello Subdomain'
rv.close()
def test_send_from_directory_bad_request(self):
app = flask.Flask(__name__)
app.testing = True
app.root_path = os.path.join(os.path.dirname(__file__),
'test_apps', 'subdomaintestmodule')
with app.test_request_context():
with pytest.raises(BadRequest):
flask.send_from_directory('static', 'bad\x00')
class TestLogging(object):
def test_logger_cache(self):
app = flask.Flask(__name__)
logger1 = app.logger
assert app.logger is logger1
assert logger1.name == __name__
app.logger_name = __name__ + '/test_logger_cache'
assert app.logger is not logger1
def test_debug_log(self, capsys):
app = flask.Flask(__name__)
app.debug = True
@app.route('/')
def index():
app.logger.warning('the standard library is dead')
app.logger.debug('this is a debug statement')
return ''
@app.route('/exc')
def exc():
1 // 0
with app.test_client() as c:
c.get('/')
out, err = capsys.readouterr()
assert 'WARNING in test_helpers [' in err
assert os.path.basename(__file__.rsplit('.', 1)[0] + '.py') in err
assert 'the standard library is dead' in err
assert 'this is a debug statement' in err
with pytest.raises(ZeroDivisionError):
c.get('/exc')
def test_debug_log_override(self):
app = flask.Flask(__name__)
app.debug = True
app.logger_name = 'flask_tests/test_debug_log_override'
app.logger.level = 10
assert app.logger.level == 10
def test_exception_logging(self):
out = StringIO()
app = flask.Flask(__name__)
app.config['LOGGER_HANDLER_POLICY'] = 'never'
app.logger_name = 'flask_tests/test_exception_logging'
app.logger.addHandler(StreamHandler(out))
@app.route('/')
def index():
1 // 0
rv = app.test_client().get('/')
assert rv.status_code == 500
assert b'Internal Server Error' in rv.data
err = out.getvalue()
assert 'Exception on / [GET]' in err
assert 'Traceback (most recent call last):' in err
assert '1 // 0' in err
assert 'ZeroDivisionError:' in err
def test_processor_exceptions(self):
app = flask.Flask(__name__)
app.config['LOGGER_HANDLER_POLICY'] = 'never'
@app.before_request
def before_request():
if trigger == 'before':
1 // 0
@app.after_request
def after_request(response):
if trigger == 'after':
1 // 0
return response
@app.route('/')
def index():
return 'Foo'
@app.errorhandler(500)
def internal_server_error(e):
return 'Hello Server Error', 500
for trigger in 'before', 'after':
rv = app.test_client().get('/')
assert rv.status_code == 500
assert rv.data == b'Hello Server Error'
def test_url_for_with_anchor(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
assert flask.url_for('index', _anchor='x y') == '/#x%20y'
def test_url_for_with_scheme(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
assert flask.url_for('index', _external=True, _scheme='https') == 'https://localhost/'
def test_url_for_with_scheme_not_external(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
pytest.raises(ValueError,
flask.url_for,
'index',
_scheme='https')
def test_url_for_with_alternating_schemes(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
assert flask.url_for('index', _external=True) == 'http://localhost/'
assert flask.url_for('index', _external=True, _scheme='https') == 'https://localhost/'
assert flask.url_for('index', _external=True) == 'http://localhost/'
def test_url_with_method(self):
from flask.views import MethodView
app = flask.Flask(__name__)
class MyView(MethodView):
def get(self, id=None):
if id is None:
return 'List'
return 'Get %d' % id
def post(self):
return 'Create'
myview = MyView.as_view('myview')
app.add_url_rule('/myview/', methods=['GET'],
view_func=myview)
app.add_url_rule('/myview/<int:id>', methods=['GET'],
view_func=myview)
app.add_url_rule('/myview/create', methods=['POST'],
view_func=myview)
with app.test_request_context():
assert flask.url_for('myview', _method='GET') == '/myview/'
assert flask.url_for('myview', id=42, _method='GET') == '/myview/42'
assert flask.url_for('myview', _method='POST') == '/myview/create'
class TestNoImports(object):
"""Test Flasks are created without import.
Avoiding ``__import__`` helps create Flask instances where there are errors
at import time. Those runtime errors will be apparent to the user soon
enough, but tools which build Flask instances meta-programmatically benefit
from a Flask which does not ``__import__``. Instead of importing to
retrieve file paths or metadata on a module or package, use the pkgutil and
imp modules in the Python standard library.
"""
def test_name_with_import_error(self, modules_tmpdir):
modules_tmpdir.join('importerror.py').write('raise NotImplementedError()')
try:
flask.Flask('importerror')
except NotImplementedError:
assert False, 'Flask(import_name) is importing import_name.'
class TestStreaming(object):
def test_streaming_with_context(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(flask.stream_with_context(generate()))
c = app.test_client()
rv = c.get('/?name=World')
assert rv.data == b'Hello World!'
def test_streaming_with_context_as_decorator(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
@flask.stream_with_context
def generate(hello):
yield hello
yield flask.request.args['name']
yield '!'
return flask.Response(generate('Hello '))
c = app.test_client()
rv = c.get('/?name=World')
assert rv.data == b'Hello World!'
def test_streaming_with_context_and_custom_close(self):
app = flask.Flask(__name__)
app.testing = True
called = []
class Wrapper(object):
def __init__(self, gen):
self._gen = gen
def __iter__(self):
return self
def close(self):
called.append(42)
def __next__(self):
return next(self._gen)
next = __next__
@app.route('/')
def index():
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(flask.stream_with_context(
Wrapper(generate())))
c = app.test_client()
rv = c.get('/?name=World')
assert rv.data == b'Hello World!'
assert called == [42]
|
bsd-3-clause
|
zstackio/zstack-woodpecker
|
zstackwoodpecker/zstackwoodpecker/zstack_test/vcenter_checker/zstack_vcenter_snapshot_checker.py
|
2
|
9805
|
import zstackwoodpecker.header.checker as checker_header
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.zstack_test.zstack_test_snapshot as zstack_sp_header
import zstackwoodpecker.header.volume as vl_header
import zstackwoodpecker.header.vm as vm_header
import zstackwoodpecker.header.snapshot as sp_header
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.config_operations as cfg_ops
import apibinding.inventory as inventory
class zstack_vcenter_backuped_snapshot_checker(checker_header.TestChecker):
def check(self):
'''
will check if backuped snapshot existed.
'''
super(zstack_vcenter_backuped_snapshot_checker, self).check()
backuped_snapshots = self.test_obj.get_backuped_snapshots()
for sp in backuped_snapshots:
sp_bs_refs = sp.get_snapshot().backupStorageRefs
for sp_bs_ref in sp_bs_refs:
bs_uuid = sp_bs_ref.backupStorageUuid
sp_path = sp_bs_ref.installPath
sp_uuid = sp_bs_ref.volumeSnapshotUuid
bs_host = test_lib.lib_get_backup_storage_host(bs_uuid)
bs = test_lib.lib_get_backup_storage_by_uuid(sp_bs_ref.backupStorageUuid)
if hasattr(inventory, 'IMAGE_STORE_BACKUP_STORAGE_TYPE') and bs.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE:
sp_info = sp_path.split('://')[1].split('/')
sp_path = '%s/registry/v1/repos/public/%s/manifests/revisions/%s' % (bs.url, sp_info[0], sp_info[1])
if test_lib.lib_check_file_exist(bs_host, sp_path):
test_util.test_logger('Checker result: backuped snapshot:%s is found in backup storage:%s in path: %s' % (sp_uuid, bs_uuid, sp_path))
if self.exp_result == False:
return self.judge(True)
else:
test_util.test_logger('Checker result: backuped snapshot:%s is NOT found in backup storage:%s in path: %s' % (sp_uuid, bs_uuid, sp_path))
if self.exp_result == True:
return self.judge(False)
test_util.test_logger('Checker result: Finish backuped snapshot checking')
return self.judge(self.exp_result)
class zstack_vcenter_snapshot_checker(checker_header.TestChecker):
def check(self):
'''
Will use snapshot:createDataVolumeFromSnapshot function to do checking.
'''
super(zstack_vcenter_snapshot_checker, self).check()
target_volume = self.test_obj.get_target_volume()
if target_volume.get_volume().type == 'Root':
test_util.test_logger('Checking Result: skip snapshot checking, since target volume: %s is Root volme' % target_volume.get_volume().uuid)
return self.judge(self.exp_result)
#snapshots = self.test_obj.get_snapshot_list()
sp = self.test_obj.get_current_snapshot()
if not sp:
test_util.test_logger('Checker result: no available current snapshot to be checked')
return self.judge(self.exp_result)
utility_vm = self.test_obj.get_utility_vm()
vm_inv = utility_vm.get_vm()
result = True
#only need to test latest current snapshot, since previouse snapshot
#operations should be checked already and assumed won't be changed.
#If there is not true, change following 2 lines to next line:
#for sp in snapshots.get_snapshot_list():
if sp.get_state() == sp_header.DELETED:
#continue
test_util.test_logger('Checking Result: snapshot status is Deleted, it should not be tested')
return self.judge(self.exp_result)
#calculate checking point
checking_points_list = self.test_obj.get_checking_points(sp)
volume_obj = sp.create_data_volume()
volume_obj.attach(utility_vm)
import tempfile
with tempfile.NamedTemporaryFile() as script:
script.write('''
device=/dev/`ls -ltr --file-type /dev | grep disk | awk '{print $NF}' | grep -v '[[:digit:]]' | tail -1`1
mkdir -p %s >/dev/null
mount $device %s >/dev/null
mkdir -p %s >/dev/null
checking_result=''
ls %s
umount %s >/dev/null
''' % (test_lib.WOODPECKER_MOUNT_POINT, \
test_lib.WOODPECKER_MOUNT_POINT, \
zstack_sp_header.checking_point_folder, \
zstack_sp_header.checking_point_folder, \
test_lib.WOODPECKER_MOUNT_POINT))
script.flush()
rsp = test_lib.lib_execute_shell_script_in_vm(vm_inv, \
script.name)
volume_obj.detach()
volume_obj.delete()
if rsp:
result_list = rsp.result.split()
temp_checking_list = list(result_list)
temp_exp_list = list(checking_points_list)
for item in result_list:
if item in checking_points_list:
temp_checking_list.remove(item)
temp_exp_list.remove(item)
if len(temp_exp_list) == 0:
if len(temp_checking_list) == 0:
test_util.test_logger('Checker result: snapshot: %s integrity checking pass' % sp.get_snapshot().uuid)
else:
test_util.test_logger('Checker result: snapshot: %s integrity checking fail, there are something more than expected : %s' % (sp.get_snapshot().uuid, temp_checking_list))
zstack_sp_header.print_snapshot_chain_checking_point(zstack_sp_header.get_all_ancestry(sp))
result = False
else:
if len(temp_checking_list) == 0:
test_util.test_logger('Checker result: snapshot: %s integrity checking fail, there are something less than expected: %s' % (sp.get_snapshot().uuid, temp_exp_list))
zstack_sp_header.print_snapshot_chain_checking_point(zstack_sp_header.get_all_ancestry(sp))
result = False
else:
test_util.test_logger('Checker result: snapshot: %s integrity checking fail, there are something more than expected : %s and there are something less than expected: %s ' % (sp.get_snapshot().uuid, temp_checking_list, temp_exp_list))
zstack_sp_header.print_snapshot_chain_checking_point(zstack_sp_header.get_all_ancestry(sp))
result = False
else:
test_util.test_logger('Checker result: check snapshot: %s failed with checking script.' % sp.get_snapshot().uuid)
zstack_sp_header.print_snapshot_chain_checking_point(zstack_sp_header.get_all_ancestry(sp))
result = False
return self.judge(result)
class zstack_vcenter_snapshot_tree_checker(checker_header.TestChecker):
def check(self):
'''
Will check snapshot tree correctness
To be noticed. The tree depth changing will impact the snapshots who
have been created. So if the snapshots are created before
incrementalSnapshot.maxNum is changed. The checker results will be
untrustable.
'''
import json
import zstacklib.utils.jsonobject as jsonobject
super(zstack_vcenter_snapshot_tree_checker, self).check()
snapshots = self.test_obj.get_snapshot_list()
if not self.test_obj.get_snapshot_head():
test_util.test_logger('Snapshot is not created, skipped checking')
return self.judge(self.exp_result)
utility_vm = self.test_obj.get_utility_vm()
vm_inv = utility_vm.get_vm()
volume_obj = self.test_obj.get_target_volume()
volume_uuid = volume_obj.get_volume().uuid
if volume_obj.get_state() == vl_header.DELETED or \
(volume_obj.get_volume().type == 'Root' and \
volume_obj.get_target_vm().get_state() == vm_header.DESTROYED):
test_util.test_logger('Checker result: target volume is deleted, can not get get and check snapshot tree status')
return self.judge(self.exp_result)
vol_trees = test_lib.lib_get_volume_snapshot_tree(volume_uuid)
tree_allowed_depth = cfg_ops.get_global_config_value('volumeSnapshot', \
'incrementalSnapshot.maxNum')
for vol_tree in vol_trees:
tree = json.loads(jsonobject.dumps(vol_tree))['tree']
tree_max_depth = find_tree_max_depth(tree)
if tree_max_depth > (int(tree_allowed_depth) + 1):
test_util.test_logger(\
'Checker result: volume: %s snapshot tree: %s depth checking failure. The max \
allowed depth is : %s. But we get: %s' % (volume_uuid, tree['inventory'].uuid, \
tree_allowed_depth, str(tree_max_depth - 1)))
return self.judge(False)
test_util.test_logger(\
'Checker result: volume: %s snapshot tree depth checking pass. The max allowed \
depth is : %s. The real snapshot max depth is: %s' % \
(volume_uuid, tree_allowed_depth, str(tree_max_depth - 1)))
return self.judge(True)
def find_tree_max_depth(tree):
'''
tree is a dictionary. Its children were put under keyword of 'children'.
'''
if not tree:
return 0
child_depth = 0
if not tree.has_key('children'):
test_util.test_fail('Snapshot tree has invalid format, it does not has key for children.')
if tree['children']:
child_depth = 1
for child in tree['children']:
current_child_depth = find_tree_max_depth(child)
child_depth = max(child_depth, current_child_depth)
return child_depth + 1
|
apache-2.0
|
tuxnani/open-telugu
|
tests/tamil_regexp.py
|
3
|
4571
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# (C) 2013-2015 Muthiah Annamalai
#
# This file is part of 'open-tamil' package tests
#
# setup the paths
import codecs
import re
from opentamiltests import *
from tamil.utils.santhirules import joinWords
from tamil.regexp import make_pattern
class SantheeRules(unittest.TestCase):
def test_filebased_conjugation_tests( self ):
# write to file
with codecs.open('out.txt','r','utf-8') as FP:
data = FP.readlines()
for no,combo in enumerate(data):
print("testing #",no)
word = combo.split('=')
parts = word[0].split('+')
joind = word[1].strip()
a = parts[0].strip()
b = parts[1].strip()
jword = joinWords(a,b)
if ( LINUX ):
print(a + u' + ' + b + u' = ' + jword+u'\n')
print(jword,u'|',joind)
self.assertEqual( joind, jword )
return
def test_grammar_conjugation( self ):
a = u'เฎเฎฉเฏเฎฉ'
b = u'เฎเฎฉเฏเฎฉ'
w = joinWords(a, b).encode('utf8')
print( w )
self.assertTrue( w.decode('utf-8') == u'เฎเฎฉเฏเฎฉเฏเฎฉเฏเฎฉ')
class TamilRegex(unittest.TestCase):
def test_basic_A2Z( self ):
pattern = u"[เฎ
-เฎ]+"
expected = u"[เฎ
,เฎ,เฎ,เฎ,เฎ,เฎ,เฎ,เฎ,เฎ,เฎ,เฎ,เฎ]+"
[cpattern,opattern] = make_pattern( pattern )
self.assertEqual( opattern, expected )
def test_basic_no2_A2Z( self ):
pattern = u"^[เฎ
-เฎ]+"
expected = u"^[เฎ
,เฎ,เฎ,เฎ,เฎ]+"
[cpattern,opattern] = make_pattern( pattern )
self.assertEqual( opattern, expected )
def test_basic_no3_A2Z( self ):
pattern = u"^[เฎ
-เฎ][0-9]+"
expected = u"^[เฎ
,เฎ,เฎ,เฎ,เฎ,เฎ,เฎ,เฎ,เฎ,เฎ,เฎ,เฎ][0-9]+"
[cpattern,opattern] = make_pattern( pattern )
self.assertEqual( opattern, expected )
def test_basic_no4_A2Z( self ):
pattern = u"^[เฎ
-เฎ][0-9]+"
expected = u"^[เฎ
,เฎ,เฎ,เฎ,เฎ,เฎ,เฎ,เฎ,เฎ,เฎ,เฎ,เฎ][0-9]+"
[cpattern,opattern] = make_pattern( pattern )
self.assertEqual( opattern, expected )
def test_basic_no5_A2Z( self ):
pattern = u"^[เฎเฏ-เฎฎเฏ]+"
expected = u"^[เฎเฏ,เฎเฏ,เฎเฏ,เฎคเฏ,เฎชเฏ,เฎฑเฏ,เฎเฏ,เฎเฏ,เฎฃเฏ,เฎจเฏ,เฎฎเฏ]+"
[cpattern,opattern] = make_pattern( pattern )
self.assertEqual( opattern, expected )
def test_uyirmei_no6_A2Z( self ):
pattern = u"[เฎช-เฎชเฏ]+"
expected = u"[เฎช,เฎชเฎพ,เฎชเฎฟ,เฎชเฏ,เฎชเฏ,เฎชเฏ,เฎชเฏ,เฎชเฏ,เฎชเฏ,เฎชเฏ,เฎชเฏ,เฎชเฏ]+"
[cpattern,opattern] = make_pattern( pattern )
self.assertEqual( opattern, expected )
class GrepTests(unittest.TestCase):
def setUp(self):
self.data = codecs.open('data/richmond.txt','r','utf-8').readlines()
print("\ndata size = %d L"%len(self.data))
def search_test(self,pattern,expected):
return self.match_test(pattern,expected,fcn=re.search)
def match_test(self,pattern,expected,data=None,fcn=re.match):
[repatt,ymp] = make_pattern( pattern )
word_matches = []
if not data:
data = self.data
for idx,line in enumerate(data):
q = fcn(repatt,line.strip())
if q:
print("matched @ %d"%idx)
word_matches.append( idx )
self.assertEqual( word_matches, expected )
return
def test_exprs(self):
pattern = u"^เฎฐเฎฟเฎเฏ.*[เฎเฏ-เฎดเฏ]$"
expected = [0,1,2,3,7,8,10]
self.match_test( pattern, expected )
def test_match_letterend_exprs(self):
pattern = u"เฎเฏ$"
expected = [5,6]
self.search_test(pattern,expected)
return
def test_match_exprs(self):
pattern = u".*[^เฎเฏ-เฎดเฏ]$"
expected = [4,5,6,9]
self.match_test(pattern,expected)
return
def test_demo_regex(self):
pattern = u"^[เฎ-เฎณ].+[เฎเฏ-เฎณเฏ]$"
data = [u"เฎเฎจเฏเฎค",u"เฎคเฎฎเฎฟเฎดเฏ",u"เฎฐเฏเฎเฏเฎธเฏเฎชเฏเฎฒเฏ",u"\"^[เฎ-เฎณ].+[เฎเฏ-เฎณเฏ]$\"",u"เฎเฎคเฏเฎคเฏเฎเฎฐเฎฟเฎฒเฏ", u"เฎเฎคเฏ", u"เฎชเฏเฎฐเฏเฎจเฏเฎคเฏเฎฎเฏ"]
expected = [1,2,6] # i.e.เฎคเฎฎเฎฟเฎดเฏ
self.match_test(pattern,expected,data)
return
if __name__ == '__main__':
unittest.main()
|
mit
|
orcasgit/python-healthvault
|
healthvaultlib/status_codes.py
|
1
|
10202
|
class HealthVaultStatus(object):
"""Status codes that HealthVault can return.
`See also <http://msdn.microsoft.com/en-us/library/hh567902.aspx>`_
"""
OK = 0 # The request was successful.
FAILED = 1 # Generic failure due to unknown causes or internal error.
BAD_HTTP = 2 # Http protocol problem.
INVALID_XML = 3 # Request xml cannot be parsed or nonconformant.
BAD_SIG = 4 # Signature validation failed
BAD_METHOD = 5 # No such method.
INVALID_APP = 6 # App does not exist app is invalid app is not active or calling IP is invalid.
CREDENTIAL_TOKEN_EXPIRED = 7 # Credential token has expired need a new one.
INVALID_TOKEN = 8 # Auth token malformed or otherwise busted.
INVALID_PERSON = 9 # Person does not exist or is not active.
INVALID_RECORD = 10 # Given record id does not exist.
ACCESS_DENIED = 11 # Person or app does not have sufficient rights.
NYI = 12 # The functionality being accessed is not yet implemented.
INVALID_THING = 13 # invalid thing identifier.
CANT_CONVERT_UNITS = 14 # Data table already exists with incompatible units.
INVALID_FILTER = 15 # Missing or invalid GetThingsFilter.
INVALID_FORMAT = 16 # Missing or invalid GetThings format specifier.
MISSING_SHARED_SECRET = 17 # A credential was supplied without a shared secret.
INVALID_APPAUTH = 18 # authorized_applications entry missing.
INVALID_THING_TYPE = 19 # Thing type doesn't exist.
THING_TYPE_IMMUTABLE = 20 # Can't update things of this type.
THING_TYPE_UNCREATABLE = 21 # Can't create things of this type.
DUPLICATE_CREDENTIAL_FOUND = 22 # Duplicate Credential found.
INVALID_RECORD_NAME = 23 # Invalid Record name.
DRUG_NOT_FOUND = 24 # Cannot find the drug specified.
INVALID_PERSON_STATE = 25 # Invalid person state.
INVALID_CODESET = 26 # Requested code set was not found.
INVALID_VALIDATION_TOKEN = 28 # Invalid validation token for contact email validation.
INVALID_CONTACT_EMAIL = 30 # Invalid contact email
INVALID_LOGIN_NAME = 31 # Invalid login name.
INVALID_PASSWORD = 32 # Invalid password.
INVALID_OPENQUERY = 33 # Open query id not found.
INVALID_TRANSFORM = 34 # Transform cannot be loaded.
INVALID_RELATIONSHIP_TYPE = 35 # Invalid relationship type.
INVALID_CREDENTIAL_TYPE = 36 # Invalid credential type.
INVALID_RECORD_STATE = 37 # Invalid record state.
APP_AUTH_NOT_REQUIRED = 38 # Application authorization is not required for this app.
REQUEST_TOO_LONG = 39 # The request provided has exceeded maximum allowed request length.
DUPLICATE_AUTHORIZED_RECORD_FOUND = 40 # Duplicate authorized record found.
EMAIL_NOT_VALIDATED = 41 # Person email must be validated but it's not
MAIL_ADDRESS_MALFORMED = 45 # The email address specified to SendInsecureMessage is malformed.
PASSWORD_NOT_STRONG = 46 # The password does not meet the complexity requirements.
CANNOT_REMOVE_LAST_CUSTODIAN = 47 # The last custodian for a record cannot be removed.
INVALID_EMAIL_ADDRESS = 48 # The email address is invalid.
REQUEST_TIMED_OUT = 49 # The request sent to HealthVault reached its time to live and is now too old to be processed.
INVALID_SPONSOR_EMAIL = 50 # The sponsor email address is invalid.
INVALID_PROMOTION_TOKEN = 51 # Promotion token is invalid.
INVALID_RECORD_AUTHORIZATION_TOKEN = 52 # Record authorization token is invalid.
TOO_MANY_GROUPS_IN_QUERY = 53 # GetThings Query has too many request groups.
GRANT_AUTHZ_EXCEEDS_DEFAULT = 54 # The permissions to be granted exceed the default permissions available to be granted. e.g.attempt to grant all access when only read access is available.
INVALID_VOCABULARY = 55 # Requested vocabulary was not found
DUPLICATE_APPLICATION_FOUND = 56 # An application with the same ID already exists
RECORD_AUTHORIZATION_TOKEN_EXPIRED = 57 # Record authorization token has expired.
RECORD_AUTHORIZATION_DOES_NOT_EXIST = 58 # Record authorization does not exist.
THING_TYPE_UNDELETABLE = 59 # Can't delete things of this type.
VERSION_STAMP_MISSING = 60 # Version stamp is missing.
VERSION_STAMP_MISMATCH = 61 # Version stamp mismatch.
EXPIRED_OPENQUERY = 62 # Requested open query has expired.
INVALID_PUBLIC_KEY = 63 # Public key is invalid.
DOMAIN_NAME_NOT_SET = 64 # The application's domain name hasn't been set.
AUTHENTICATED_SESSION_TOKEN_EXPIRED = 65 # Authenticated session token has expired need a new one.
INVALID_CREDENTIAL_KEY = 66 # The credential key was not found.
INVALID_PERSON_ID = 67 # Pseudo id for person not valid
RECORD_QUOTA_EXCEEDED = 68 # The size occupied by the things in the put things request will cause the record to exceed the size quota alloted to it.
INVALID_DATETIME = 69 # The DateTime supplied is invalid (exceeds the bounds for the DateTime)
BAD_CERT = 70 # Certificate validation failed.
RESPONSE_TOO_LONG = 71 # The response has exceeded maximum allowed size.
INVALID_VERIFICATION_QUESTION = 72 # Verification question for connect request invalid.
INVALID_VERIFICATION_ANSWER = 73 # The verification answer for the connect request is invalid.
INVALID_IDENTITY_CODE = 74 # There is no connect request corresponding to the given code.
RETRY_LIMIT_EXCEEDED = 75 # Maximum number of retries has been exceeded.
CULTURE_NOT_SUPPORTED = 76 # Request header culture not supported.
INVALID_FILE_EXTENSION = 77 # The file extension is not supported.
INVALID_VOCABULARY_ITEM = 78 # The vocabulary item does not exist.
DUPLICATE_CONNECT_REQUEST_FOUND = 79 # Duplicate connect request found.
INVALID_SPECIAL_ACCOUNT_TYPE = 80 # The account type specified is invalid.
DUPLICATE_TYPE_FOUND = 81 # A type with the specified identifier already exists.
CREDENTIAL_NOT_FOUND = 82 # Credential not found
CANNOT_REMOVE_LAST_CREDENTIAL = 83 # Attempt to delete last credential associated with an account
CONNECT_REQUEST_ALREADY_AUTHORIZED = 84 # The connect request has been previously authorized.
INVALID_THING_TYPE_VERSION = 85 # The type specified to update an instance of a thing is an older version of the type than the existing instance.
CREDENTIALS_LIMIT_EXCEEDED = 86 # The maximum number of allowed credentials has been exceeded.
INVALID_METHOD = 87 # One or more invalid methods were specified in the method mask.
INVALID_BLOB_REF_URL = 88 # The blob reference url supplied for the blob streaming API is invalid.
CANNOT_GET_STREAMED_OTHER_DATA = 89 # Other data put in to Healthvault via the streaming API cannot be requested as an other data string.
UPDATE_THING_TYPE_VERSION_NO_DATA_XML = 90 # The type version of the thing cannot be changed without a data xml supplied for validation.
UNSUPPORTED_CONTENT_ENCODING = 91 # The content encoding specified for the blob is not supported.
CONTENT_ENCODING_DATA_MISMATCH = 92 # The content encoding specified for the blob does not match the blob data.
APPLICATION_LIMIT_EXCEEDED = 93 # The user exceeded the maximum number of applications allowed.
INVALID_BINARY_CONTENT_ID = 94 # The specified binary content identifier was not found.
CONNECT_REQUEST_INCOMPLETE = 95 # The connect request was found but does not yet have any contents.
CONNECT_PACKAGE_EXISTS = 96 # The connect package has already been fully created and populated.
INVALID_FILE_NAME = 97 # The file name is not supported.
INVALID_SIGNUP_CODE = 98 # The signup code is invalid.
BLOB_SIZE_TOO_LARGE_FOR_INLINE = 99 # The blob is too large and cannot be returned inline.
DUPLICATE_BLOB = 100 # A blob of this name is already present in the request.
BLOB_TOKEN_COMMITTED = 101 # The blob token corresponds to a blob that is already committed.
BLOB_TOKEN_NOT_COMPLETED = 102 # The blob token corresponds to a blob that was not marked completed through the streaming interface.
THING_POTENTIALLY_INCOMPLETE = 104 # The thing being updated has data items that cannot be seen in this version, e.g. signatures with new signature methods or multiple blobs.
INVALID_SIGNATURE_ALGORITHM = 105 # The signature algorithm is not valid.
INVALID_BLOB_HASH_ALGORITHM = 106 # The blob hash algorithm is invalid or not supported.
UNSUPPORTED_BLOB_HASH_BLOCK_SIZE = 107 # The blob hash block size is unsupported.
BLOB_HASH_ALGORITHM_MISMATCH = 108 # The specified blob hash algorithm does not match the blob's hash algorithm.
BLOB_HASH_BLOCK_SIZE_MISMATCH = 109 # The specified blob hash block size does not match the blob's hash block size.
UNSUPPORTED_SIGNATURE_METHOD = 110 # The signature method is not supported in the context it is being used.
INVALID_BLOB_HASH = 111 # The specified blob hash is invalid.
PACKAGE_BLOB_NOT_COMMITTED = 112 # The blob is associated with a connect package that is not yet created.
APPLICATION_STATE_TRANSITION_NOT_SUPPORTED = 113 # Changing the application state from deleted is not supported.
INVALID_PACKAGE_CONTENTS = 120 # The contents of the connect package are not valid xml.
INVALID_CONTENT_TYPE = 121 # The content type is not supported.
CONNECT_PACKAGE_VALIDATION_REQUIRED = 122 # The contents of the connect package must be validated before they are put into a health record.
INVALID_THING_STATE = 123 # Invalid thing state.
TOO_MANY_THINGS_SPECIFIED = 124 # The maximum number of things specified has been exceeded.
INVALID_DIRECTORY_ITEM = 126 # The directory item passed in is invalid.
INVALID_VOCABULARY_AUTHORIZATION = 129 # The vocbulary authorization is invalid.
VOCABULARY_ACCESS_DENIED = 130 # Access to the requested vocabulary is denied.
UNSUPPORTED_PERSONAL_FLAG = 131 # The personal flag is not supported with this type.
SUBSCRIPTION_NOT_FOUND = 132 # The requested subscription was not found.
SUBSCRIPTION_LIMIT_EXCEEDED = 133 # The number of subscriptions for the application was exceeded.
SUBSCRIPTION_INVALID = 134 # The subscription contains invalid data.
|
mit
|
kkanahin/django-avatar
|
avatar/views.py
|
3
|
8108
|
from django.http import Http404
from django.shortcuts import render, redirect
from django.utils import six
from django.utils.translation import ugettext as _
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from avatar.forms import PrimaryAvatarForm, DeleteAvatarForm, UploadAvatarForm
from avatar.models import Avatar
from avatar.settings import AVATAR_MAX_AVATARS_PER_USER, AVATAR_DEFAULT_SIZE
from avatar.signals import avatar_updated
from avatar.util import (get_primary_avatar, get_default_avatar_url,
get_user_model, get_user)
def _get_next(request):
"""
The part that's the least straightforward about views in this module is how they
determine their redirects after they have finished computation.
In short, they will try and determine the next place to go in the following order:
1. If there is a variable named ``next`` in the *POST* parameters, the view will
redirect to that variable's value.
2. If there is a variable named ``next`` in the *GET* parameters, the view will
redirect to that variable's value.
3. If Django can determine the previous page from the HTTP headers, the view will
redirect to that previous page.
"""
next = request.POST.get('next', request.GET.get('next',
request.META.get('HTTP_REFERER', None)))
if not next:
next = request.path
return next
def _get_avatars(user):
# Default set. Needs to be sliced, but that's it. Keep the natural order.
avatars = user.avatar_set.all()
# Current avatar
primary_avatar = avatars.order_by('-primary')[:1]
if primary_avatar:
avatar = primary_avatar[0]
else:
avatar = None
if AVATAR_MAX_AVATARS_PER_USER == 1:
avatars = primary_avatar
else:
# Slice the default set now that we used the queryset for the primary avatar
avatars = avatars[:AVATAR_MAX_AVATARS_PER_USER]
return (avatar, avatars)
@login_required
def add(request, extra_context=None, next_override=None,
upload_form=UploadAvatarForm, *args, **kwargs):
if extra_context is None:
extra_context = {}
avatar, avatars = _get_avatars(request.user)
upload_avatar_form = upload_form(request.POST or None,
request.FILES or None, user=request.user)
if request.method == "POST" and 'avatar' in request.FILES:
if upload_avatar_form.is_valid():
avatar = Avatar(user=request.user, primary=True)
image_file = request.FILES['avatar']
avatar.avatar.save(image_file.name, image_file)
avatar.save()
messages.success(request, _("Successfully uploaded a new avatar."))
avatar_updated.send(sender=Avatar, user=request.user, avatar=avatar)
return redirect(next_override or _get_next(request))
context = {
'avatar': avatar,
'avatars': avatars,
'upload_avatar_form': upload_avatar_form,
'next': next_override or _get_next(request),
}
context.update(extra_context)
return render(request, 'avatar/add.html', context)
@login_required
def change(request, extra_context=None, next_override=None,
upload_form=UploadAvatarForm, primary_form=PrimaryAvatarForm,
*args, **kwargs):
if extra_context is None:
extra_context = {}
avatar, avatars = _get_avatars(request.user)
if avatar:
kwargs = {'initial': {'choice': avatar.id}}
else:
kwargs = {}
upload_avatar_form = upload_form(user=request.user, **kwargs)
primary_avatar_form = primary_form(request.POST or None,
user=request.user, avatars=avatars, **kwargs)
if request.method == "POST":
updated = False
if 'choice' in request.POST and primary_avatar_form.is_valid():
avatar = Avatar.objects.get(
id=primary_avatar_form.cleaned_data['choice'])
avatar.primary = True
avatar.save()
updated = True
messages.success(request, _("Successfully updated your avatar."))
if updated:
avatar_updated.send(sender=Avatar, user=request.user, avatar=avatar)
return redirect(next_override or _get_next(request))
context = {
'avatar': avatar,
'avatars': avatars,
'upload_avatar_form': upload_avatar_form,
'primary_avatar_form': primary_avatar_form,
'next': next_override or _get_next(request)
}
context.update(extra_context)
return render(request, 'avatar/change.html', context)
@login_required
def delete(request, extra_context=None, next_override=None, *args, **kwargs):
if extra_context is None:
extra_context = {}
avatar, avatars = _get_avatars(request.user)
delete_avatar_form = DeleteAvatarForm(request.POST or None,
user=request.user, avatars=avatars)
if request.method == 'POST':
if delete_avatar_form.is_valid():
ids = delete_avatar_form.cleaned_data['choices']
if six.text_type(avatar.id) in ids and avatars.count() > len(ids):
# Find the next best avatar, and set it as the new primary
for a in avatars:
if six.text_type(a.id) not in ids:
a.primary = True
a.save()
avatar_updated.send(sender=Avatar, user=request.user, avatar=avatar)
break
Avatar.objects.filter(id__in=ids).delete()
messages.success(request, _("Successfully deleted the requested avatars."))
return redirect(next_override or _get_next(request))
context = {
'avatar': avatar,
'avatars': avatars,
'delete_avatar_form': delete_avatar_form,
'next': next_override or _get_next(request),
}
context.update(extra_context)
return render(request, 'avatar/confirm_delete.html', context)
def avatar_gallery(request, username, template_name="avatar/gallery.html"):
try:
user = get_user(username)
except get_user_model().DoesNotExist:
raise Http404
return render(request, template_name, {
"other_user": user,
"avatars": user.avatar_set.all(),
})
def avatar(request, username, id, template_name="avatar/avatar.html"):
try:
user = get_user(username)
except get_user_model().DoesNotExist:
raise Http404
avatars = user.avatar_set.order_by("-date_uploaded")
index = None
avatar = None
if avatars:
avatar = avatars.get(pk=id)
if not avatar:
return Http404
index = avatars.filter(date_uploaded__gt=avatar.date_uploaded).count()
count = avatars.count()
if index == 0:
prev = avatars.reverse()[0]
if count <= 1:
next = avatars[0]
else:
next = avatars[1]
else:
prev = avatars[index - 1]
if (index + 1) >= count:
next = avatars[0]
prev_index = index - 1
if prev_index < 0:
prev_index = 0
prev = avatars[prev_index]
else:
next = avatars[index + 1]
return render(request, template_name, {
"other_user": user,
"avatar": avatar,
"index": index + 1,
"avatars": avatars,
"next": next,
"prev": prev,
"count": count,
})
def render_primary(request, extra_context={}, user=None, size=AVATAR_DEFAULT_SIZE, *args, **kwargs):
size = int(size)
avatar = get_primary_avatar(user, size=size)
if avatar:
# FIXME: later, add an option to render the resized avatar dynamically
# instead of redirecting to an already created static file. This could
# be useful in certain situations, particulary if there is a CDN and
# we want to minimize the storage usage on our static server, letting
# the CDN store those files instead
return redirect(avatar.avatar_url(size))
else:
return redirect(get_default_avatar_url())
|
bsd-3-clause
|
wooga/airflow
|
tests/www/api/experimental/test_endpoints.py
|
1
|
21328
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import unittest
from datetime import timedelta
from unittest import mock
from urllib.parse import quote_plus
from parameterized import parameterized_class
from airflow import settings
from airflow.api.common.experimental.trigger_dag import trigger_dag
from airflow.models import DagBag, DagRun, Pool, TaskInstance
from airflow.models.serialized_dag import SerializedDagModel
from airflow.settings import Session
from airflow.utils.timezone import datetime, parse as parse_datetime, utcnow
from airflow.version import version
from airflow.www import app as application
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_pools
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir, os.pardir, os.pardir)
)
class TestBase(unittest.TestCase):
def setUp(self):
self.app = application.create_app(testing=True)
self.appbuilder = self.app.appbuilder # pylint: disable=no-member
self.app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///'
self.app.config['SECRET_KEY'] = 'secret_key'
self.app.config['CSRF_ENABLED'] = False
self.app.config['WTF_CSRF_ENABLED'] = False
self.client = self.app.test_client()
settings.configure_orm()
self.session = Session
@parameterized_class([
{"dag_serialization": "False"},
{"dag_serialization": "True"},
])
class TestApiExperimental(TestBase):
dag_serialization = "False"
@classmethod
def setUpClass(cls):
super().setUpClass()
session = Session()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
session.commit()
session.close()
dagbag = DagBag(include_examples=True)
for dag in dagbag.dags.values():
dag.sync_to_db()
SerializedDagModel.write_dag(dag)
def tearDown(self):
session = Session()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
session.commit()
session.close()
super().tearDown()
def test_info(self):
url = '/api/experimental/info'
resp_raw = self.client.get(url)
resp = json.loads(resp_raw.data.decode('utf-8'))
self.assertEqual(version, resp['version'])
def test_task_info(self):
with conf_vars(
{("core", "store_serialized_dags"): self.dag_serialization}
):
url_template = '/api/experimental/dags/{}/tasks/{}'
response = self.client.get(
url_template.format('example_bash_operator', 'runme_0')
)
self.assertIn('"email"', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
self.assertEqual(200, response.status_code)
response = self.client.get(
url_template.format('example_bash_operator', 'DNE')
)
self.assertIn('error', response.data.decode('utf-8'))
self.assertEqual(404, response.status_code)
response = self.client.get(
url_template.format('DNE', 'DNE')
)
self.assertIn('error', response.data.decode('utf-8'))
self.assertEqual(404, response.status_code)
def test_get_dag_code(self):
with conf_vars(
{("core", "store_serialized_dags"): self.dag_serialization}
):
url_template = '/api/experimental/dags/{}/code'
response = self.client.get(
url_template.format('example_bash_operator')
)
self.assertIn('BashOperator(', response.data.decode('utf-8'))
self.assertEqual(200, response.status_code)
response = self.client.get(
url_template.format('xyz')
)
self.assertEqual(404, response.status_code)
def test_dag_paused(self):
with conf_vars(
{("core", "store_serialized_dags"): self.dag_serialization}
):
pause_url_template = '/api/experimental/dags/{}/paused/{}'
paused_url_template = '/api/experimental/dags/{}/paused'
paused_url = paused_url_template.format('example_bash_operator')
response = self.client.get(
pause_url_template.format('example_bash_operator', 'true')
)
self.assertIn('ok', response.data.decode('utf-8'))
self.assertEqual(200, response.status_code)
paused_response = self.client.get(paused_url)
self.assertEqual(200, paused_response.status_code)
self.assertEqual({"is_paused": True}, paused_response.json)
response = self.client.get(
pause_url_template.format('example_bash_operator', 'false')
)
self.assertIn('ok', response.data.decode('utf-8'))
self.assertEqual(200, response.status_code)
paused_response = self.client.get(paused_url)
self.assertEqual(200, paused_response.status_code)
self.assertEqual({"is_paused": False}, paused_response.json)
def test_trigger_dag(self):
with conf_vars(
{("core", "store_serialized_dags"): self.dag_serialization}
):
url_template = '/api/experimental/dags/{}/dag_runs'
run_id = 'my_run' + utcnow().isoformat()
response = self.client.post(
url_template.format('example_bash_operator'),
data=json.dumps({'run_id': run_id}),
content_type="application/json"
)
self.assertEqual(200, response.status_code)
response_execution_date = parse_datetime(
json.loads(response.data.decode('utf-8'))['execution_date'])
self.assertEqual(0, response_execution_date.microsecond)
# Check execution_date is correct
response = json.loads(response.data.decode('utf-8'))
dagbag = DagBag()
dag = dagbag.get_dag('example_bash_operator')
dag_run = dag.get_dagrun(response_execution_date)
dag_run_id = dag_run.run_id
self.assertEqual(run_id, dag_run_id)
self.assertEqual(dag_run_id, response['run_id'])
# Test error for nonexistent dag
response = self.client.post(
url_template.format('does_not_exist_dag'),
data=json.dumps({}),
content_type="application/json"
)
self.assertEqual(404, response.status_code)
def test_trigger_dag_for_date(self):
with conf_vars(
{("core", "store_serialized_dags"): self.dag_serialization}
):
url_template = '/api/experimental/dags/{}/dag_runs'
dag_id = 'example_bash_operator'
execution_date = utcnow() + timedelta(hours=1)
datetime_string = execution_date.isoformat()
# Test correct execution with execution date
response = self.client.post(
url_template.format(dag_id),
data=json.dumps({'execution_date': datetime_string}),
content_type="application/json"
)
self.assertEqual(200, response.status_code)
self.assertEqual(datetime_string, json.loads(response.data.decode('utf-8'))['execution_date'])
dagbag = DagBag()
dag = dagbag.get_dag(dag_id)
dag_run = dag.get_dagrun(execution_date)
self.assertTrue(dag_run,
'Dag Run not found for execution date {}'
.format(execution_date))
# Test correct execution with execution date and microseconds replaced
response = self.client.post(
url_template.format(dag_id),
data=json.dumps({'execution_date': datetime_string, 'replace_microseconds': 'true'}),
content_type="application/json"
)
self.assertEqual(200, response.status_code)
response_execution_date = parse_datetime(
json.loads(response.data.decode('utf-8'))['execution_date'])
self.assertEqual(0, response_execution_date.microsecond)
dagbag = DagBag()
dag = dagbag.get_dag(dag_id)
dag_run = dag.get_dagrun(response_execution_date)
self.assertTrue(dag_run,
'Dag Run not found for execution date {}'
.format(execution_date))
# Test error for nonexistent dag
response = self.client.post(
url_template.format('does_not_exist_dag'),
data=json.dumps({'execution_date': datetime_string}),
content_type="application/json"
)
self.assertEqual(404, response.status_code)
# Test error for bad datetime format
response = self.client.post(
url_template.format(dag_id),
data=json.dumps({'execution_date': 'not_a_datetime'}),
content_type="application/json"
)
self.assertEqual(400, response.status_code)
def test_task_instance_info(self):
with conf_vars(
{("core", "store_serialized_dags"): self.dag_serialization}
):
url_template = '/api/experimental/dags/{}/dag_runs/{}/tasks/{}'
dag_id = 'example_bash_operator'
task_id = 'also_run_this'
execution_date = utcnow().replace(microsecond=0)
datetime_string = quote_plus(execution_date.isoformat())
wrong_datetime_string = quote_plus(
datetime(1990, 1, 1, 1, 1, 1).isoformat()
)
# Create DagRun
trigger_dag(dag_id=dag_id,
run_id='test_task_instance_info_run',
execution_date=execution_date)
# Test Correct execution
response = self.client.get(
url_template.format(dag_id, datetime_string, task_id)
)
self.assertEqual(200, response.status_code)
self.assertIn('state', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag
response = self.client.get(
url_template.format('does_not_exist_dag', datetime_string,
task_id),
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for nonexistent task
response = self.client.get(
url_template.format(dag_id, datetime_string, 'does_not_exist_task')
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag run (wrong execution_date)
response = self.client.get(
url_template.format(dag_id, wrong_datetime_string, task_id)
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for bad datetime format
response = self.client.get(
url_template.format(dag_id, 'not_a_datetime', task_id)
)
self.assertEqual(400, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
def test_dagrun_status(self):
with conf_vars(
{("core", "store_serialized_dags"): self.dag_serialization}
):
url_template = '/api/experimental/dags/{}/dag_runs/{}'
dag_id = 'example_bash_operator'
execution_date = utcnow().replace(microsecond=0)
datetime_string = quote_plus(execution_date.isoformat())
wrong_datetime_string = quote_plus(
datetime(1990, 1, 1, 1, 1, 1).isoformat()
)
# Create DagRun
trigger_dag(dag_id=dag_id,
run_id='test_task_instance_info_run',
execution_date=execution_date)
# Test Correct execution
response = self.client.get(
url_template.format(dag_id, datetime_string)
)
self.assertEqual(200, response.status_code)
self.assertIn('state', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag
response = self.client.get(
url_template.format('does_not_exist_dag', datetime_string),
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag run (wrong execution_date)
response = self.client.get(
url_template.format(dag_id, wrong_datetime_string)
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for bad datetime format
response = self.client.get(
url_template.format(dag_id, 'not_a_datetime')
)
self.assertEqual(400, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
@parameterized_class([
{"dag_serialization": "False"},
{"dag_serialization": "True"},
])
class TestLineageApiExperimental(TestBase):
PAPERMILL_EXAMPLE_DAGS = os.path.join(ROOT_FOLDER, "airflow", "providers", "papermill", "example_dags")
dag_serialization = "False"
@classmethod
def setUpClass(cls):
super().setUpClass()
session = Session()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
session.commit()
session.close()
dagbag = DagBag(include_examples=False, dag_folder=cls.PAPERMILL_EXAMPLE_DAGS)
for dag in dagbag.dags.values():
dag.sync_to_db()
SerializedDagModel.write_dag(dag)
@mock.patch("airflow.settings.DAGS_FOLDER", PAPERMILL_EXAMPLE_DAGS)
def test_lineage_info(self):
with conf_vars(
{("core", "store_serialized_dags"): self.dag_serialization}
):
url_template = '/api/experimental/lineage/{}/{}'
dag_id = 'example_papermill_operator'
execution_date = utcnow().replace(microsecond=0)
datetime_string = quote_plus(execution_date.isoformat())
wrong_datetime_string = quote_plus(
datetime(1990, 1, 1, 1, 1, 1).isoformat()
)
# create DagRun
trigger_dag(dag_id=dag_id,
run_id='test_lineage_info_run',
execution_date=execution_date)
# test correct execution
response = self.client.get(
url_template.format(dag_id, datetime_string)
)
self.assertEqual(200, response.status_code)
self.assertIn('task_ids', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag
response = self.client.get(
url_template.format('does_not_exist_dag', datetime_string),
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for nonexistent dag run (wrong execution_date)
response = self.client.get(
url_template.format(dag_id, wrong_datetime_string)
)
self.assertEqual(404, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
# Test error for bad datetime format
response = self.client.get(
url_template.format(dag_id, 'not_a_datetime')
)
self.assertEqual(400, response.status_code)
self.assertIn('error', response.data.decode('utf-8'))
class TestPoolApiExperimental(TestBase):
USER_POOL_COUNT = 2
TOTAL_POOL_COUNT = USER_POOL_COUNT + 1 # including default_pool
@classmethod
def setUpClass(cls):
super().setUpClass()
def setUp(self):
super().setUp()
clear_db_pools()
self.pools = [Pool.get_default_pool()]
for i in range(self.USER_POOL_COUNT):
name = 'experimental_%s' % (i + 1)
pool = Pool(
pool=name,
slots=i,
description=name,
)
self.session.add(pool)
self.pools.append(pool)
self.session.commit()
self.pool = self.pools[-1]
def _get_pool_count(self):
response = self.client.get('/api/experimental/pools')
self.assertEqual(response.status_code, 200)
return len(json.loads(response.data.decode('utf-8')))
def test_get_pool(self):
response = self.client.get(
'/api/experimental/pools/{}'.format(self.pool.pool),
)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode('utf-8')),
self.pool.to_json())
def test_get_pool_non_existing(self):
response = self.client.get('/api/experimental/pools/foo')
self.assertEqual(response.status_code, 404)
self.assertEqual(json.loads(response.data.decode('utf-8'))['error'],
"Pool 'foo' doesn't exist")
def test_get_pools(self):
response = self.client.get('/api/experimental/pools')
self.assertEqual(response.status_code, 200)
pools = json.loads(response.data.decode('utf-8'))
self.assertEqual(len(pools), self.TOTAL_POOL_COUNT)
for i, pool in enumerate(sorted(pools, key=lambda p: p['pool'])):
self.assertDictEqual(pool, self.pools[i].to_json())
def test_create_pool(self):
response = self.client.post(
'/api/experimental/pools',
data=json.dumps({
'name': 'foo',
'slots': 1,
'description': '',
}),
content_type='application/json',
)
self.assertEqual(response.status_code, 200)
pool = json.loads(response.data.decode('utf-8'))
self.assertEqual(pool['pool'], 'foo')
self.assertEqual(pool['slots'], 1)
self.assertEqual(pool['description'], '')
self.assertEqual(self._get_pool_count(), self.TOTAL_POOL_COUNT + 1)
def test_create_pool_with_bad_name(self):
for name in ('', ' '):
response = self.client.post(
'/api/experimental/pools',
data=json.dumps({
'name': name,
'slots': 1,
'description': '',
}),
content_type='application/json',
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
json.loads(response.data.decode('utf-8'))['error'],
"Pool name shouldn't be empty",
)
self.assertEqual(self._get_pool_count(), self.TOTAL_POOL_COUNT)
def test_delete_pool(self):
response = self.client.delete(
'/api/experimental/pools/{}'.format(self.pool.pool),
)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode('utf-8')),
self.pool.to_json())
self.assertEqual(self._get_pool_count(), self.TOTAL_POOL_COUNT - 1)
def test_delete_pool_non_existing(self):
response = self.client.delete(
'/api/experimental/pools/foo',
)
self.assertEqual(response.status_code, 404)
self.assertEqual(json.loads(response.data.decode('utf-8'))['error'],
"Pool 'foo' doesn't exist")
def test_delete_default_pool(self):
clear_db_pools()
response = self.client.delete(
'/api/experimental/pools/default_pool',
)
self.assertEqual(response.status_code, 400)
self.assertEqual(json.loads(response.data.decode('utf-8'))['error'],
"default_pool cannot be deleted")
|
apache-2.0
|
vismantic-ohtuprojekti/image-filtering-suite
|
qualipy/utils/tesseract.py
|
2
|
1421
|
import subprocess
import tempfile
import os
def img_to_str(tesseract_path, image):
"""Reads text in an image with tesseract-ocr
:param tesseract_path: path to the tesseract executable
:type tesseracth_path: str
:param image: path to the input image
:type image: str
"""
if not os.path.isfile(image):
raise OSError("image not found: %s" % image)
with tempfile.NamedTemporaryFile(prefix="tess_") as outfile:
try:
status, err_str = __run_tesseract(tesseract_path,
image, outfile.name)
if status:
raise OSError(err_str)
with open(outfile.name + ".txt") as out:
return out.read().strip()
finally:
__remove_file(outfile.name + ".txt")
def __run_tesseract(tesseract_path, infile, outfile):
"""Run tesseract process for given input file
:param tesseract_path: path to the tesseract executable
:type tesseracth_path: str
:param infile: path to the input image
:type infile: str
:param outfile: path to the output file
:type outfile: str
"""
command = [tesseract_path, infile, outfile]
tesseract = subprocess.Popen(command, stderr=subprocess.PIPE)
return tesseract.wait(), tesseract.stderr.read()
def __remove_file(filename):
try:
os.remove(filename)
except OSError:
pass
|
mit
|
runt18/nupic
|
src/nupic/data/aggregator.py
|
1
|
28990
|
#! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from collections import defaultdict
import datetime
import os
from pkg_resources import resource_filename
import time
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.fieldmeta import FieldMetaSpecial
from nupic.data.file_record_stream import FileRecordStream
"""The aggregator aggregates PF datasets
It supports aggregation of multiple records based on time.
Common use cases:
- Aggregate records by month
- Aggregate records every 3 months starting April 15th
- Aggregate records in 2.5 seconds intervals
Assumption: aggregated slices fit in memory. All the records that are aggregated
per period are stored in memory until the next slice starts and are only
aggregated then. If this assumption is too strong the script will need to write
slices to a temp storage or use incremental aggregation techniques.
"""
def initFilter(input, filterInfo = None):
""" Initializes internal filter variables for further processing.
Returns a tuple (function to call,parameters for the filter call)
The filterInfo is a dict. Here is an example structure:
{fieldName: {'min': x,
'max': y,
'type': 'category', # or 'number'
'acceptValues': ['foo', 'bar'],
}
}
This returns the following:
(filterFunc, ((fieldIdx, fieldFilterFunc, filterDict),
...)
Where fieldIdx is the index of the field within each record
fieldFilterFunc returns True if the value is "OK" (within min, max or
part of acceptValues)
fieldDict is a dict containing 'type', 'min', max', 'acceptValues'
"""
if filterInfo is None:
return None
# Build an array of index/func to call on record[index]
filterList = []
for i, fieldName in enumerate(input.getFieldNames()):
fieldFilter = filterInfo.get(fieldName, None)
if fieldFilter is None:
continue
var = dict()
var['acceptValues'] = None
min = fieldFilter.get('min', None)
max = fieldFilter.get('max', None)
var['min'] = min
var['max'] = max
if fieldFilter['type'] == 'category':
var['acceptValues'] = fieldFilter['acceptValues']
fp = lambda x: (x['value'] != SENTINEL_VALUE_FOR_MISSING_DATA and \
x['value'] in x['acceptValues'])
elif fieldFilter['type'] == 'number':
if min is not None and max is not None:
fp = lambda x: (x['value'] != SENTINEL_VALUE_FOR_MISSING_DATA and \
x['value'] >= x['min'] and x['value'] <= x['max'])
elif min is not None:
fp = lambda x: (x['value'] != SENTINEL_VALUE_FOR_MISSING_DATA and \
x['value'] >= x['min'])
else:
fp = lambda x: (x['value'] != SENTINEL_VALUE_FOR_MISSING_DATA and \
x['value'] <= x['max'])
filterList.append((i, fp, var))
return (_filterRecord, filterList)
def _filterRecord(filterList, record):
""" Takes a record and returns true if record meets filter criteria,
false otherwise
"""
for (fieldIdx, fp, params) in filterList:
x = dict()
x['value'] = record[fieldIdx]
x['acceptValues'] = params['acceptValues']
x['min'] = params['min']
x['max'] = params['max']
if not fp(x):
return False
# None of the field filters triggered, accept the record as a good one
return True
def _aggr_first(inList):
""" Returns first non-None element in the list, or None if all are None
"""
for elem in inList:
if elem != SENTINEL_VALUE_FOR_MISSING_DATA:
return elem
return None
def _aggr_last(inList):
""" Returns last non-None element in the list, or None if all are None
"""
for elem in reversed(inList):
if elem != SENTINEL_VALUE_FOR_MISSING_DATA:
return elem
return None
def _aggr_sum(inList):
""" Returns sum of the elements in the list. Missing items are replaced with
the mean value
"""
aggrMean = _aggr_mean(inList)
if aggrMean is None:
return None
aggrSum = 0
for elem in inList:
if elem != SENTINEL_VALUE_FOR_MISSING_DATA:
aggrSum += elem
else:
aggrSum += aggrMean
return aggrSum
def _aggr_mean(inList):
""" Returns mean of non-None elements of the list
"""
aggrSum = 0
nonNone = 0
for elem in inList:
if elem != SENTINEL_VALUE_FOR_MISSING_DATA:
aggrSum += elem
nonNone += 1
if nonNone != 0:
return aggrSum / nonNone
else:
return None
def _aggr_mode(inList):
""" Returns most common value seen in the non-None elements of the list
"""
valueCounts = dict()
nonNone = 0
for elem in inList:
if elem == SENTINEL_VALUE_FOR_MISSING_DATA:
continue
nonNone += 1
if elem in valueCounts:
valueCounts[elem] += 1
else:
valueCounts[elem] = 1
# Get the most common one
if nonNone == 0:
return None
# Sort by counts
sortedCounts = valueCounts.items()
sortedCounts.sort(cmp=lambda x,y: x[1] - y[1], reverse=True)
return sortedCounts[0][0]
def _aggr_weighted_mean(inList, params):
""" Weighted mean uses params (must be the same size as inList) and
makes weighed mean of inList"""
assert(len(inList) == len(params))
# If all weights are 0, then the value is not defined, return None (missing)
weightsSum = sum(params)
if weightsSum == 0:
return None
weightedMean = 0
for i, elem in enumerate(inList):
weightedMean += elem * params[i]
return weightedMean / weightsSum
class Aggregator(object):
"""
This class provides context and methods for aggregating records. The caller
should construct an instance of Aggregator and then call the next() method
repeatedly to get each aggregated record.
This is an example aggregationInfo dict:
{
'hours': 1,
'minutes': 15,
'fields': [
('timestamp', 'first'),
('gym', 'first'),
('consumption', 'sum')
],
}
"""
def __init__(self, aggregationInfo, inputFields, timeFieldName=None,
sequenceIdFieldName=None, resetFieldName=None, filterInfo=None):
""" Construct an aggregator instance
Params:
- aggregationInfo: a dictionary that contains the following entries
- fields: a list of pairs. Each pair is a field name and an
aggregation function (e.g. sum). The function will be used to aggregate
multiple values during the aggregation period.
- aggregation period: 0 or more of unit=value fields; allowed units are:
[years months] | [weeks days hours minutes seconds milliseconds
microseconds]
NOTE: years and months are mutually-exclusive with the other units. See
getEndTime() and _aggregate() for more details.
Example1: years=1, months=6,
Example2: hours=1, minutes=30,
If none of the period fields are specified or if all that are specified
have values of 0, then aggregation will be suppressed, and the given
inputFile parameter value will be returned.
- inputFields: The fields from the data source. This is a sequence of
`nupic.data.fieldmeta.FieldMetaInfo` instances.
- timeFieldName: name of the field to use as the time field. If None,
then the time field will be queried from the reader.
- sequenceIdFieldName: name of the field to use as the sequenecId. If None,
then the time field will be queried from the reader.
- resetFieldName: name of the field to use as the reset field. If None,
then the time field will be queried from the reader.
- filterInfo: a structure with rules for filtering records out
If the input file contains a time field, sequence id field or reset field
that were not specified in aggregationInfo fields, those fields will be
added automatically with the following rules:
1. The order will be R, S, T, rest of the fields
2. The aggregation function for these will be to pick the first:
lambda x: x[0]
"""
# -----------------------------------------------------------------------
# Save member variables.
# The same aggregationInfo dict may be used by the caller for generating
# more datasets (with slight changes), so it is safer to copy it here and
# all changes made here will not affect the input aggregationInfo
self._filterInfo = filterInfo
self._nullAggregation = False
self._inputFields = inputFields
# See if this is a null aggregation
self._nullAggregation = False
if aggregationInfo is None:
self._nullAggregation = True
else:
aggDef = defaultdict(lambda: 0, aggregationInfo)
if (aggDef['years'] == aggDef['months'] == aggDef['weeks'] ==
aggDef['days'] == aggDef['hours'] == aggDef['minutes'] ==
aggDef['seconds'] == aggDef['milliseconds'] ==
aggDef['microseconds'] == 0):
self._nullAggregation = True
# Prepare the field filtering info. The filter allows us to ignore records
# based on specified min or max values for each field.
self._filter = initFilter(self._inputFields, self._filterInfo)
# ----------------------------------------------------------------------
# Fill in defaults
self._fields = None
self._resetFieldIdx = None
self._timeFieldIdx = None
self._sequenceIdFieldIdx = None
self._aggTimeDelta = datetime.timedelta()
self._aggYears = 0
self._aggMonths = 0
# Init state variables used within next()
self._aggrInputBookmark = None
self._startTime = None
self._endTime = None
self._sequenceId = None
self._firstSequenceStartTime = None
self._inIdx = -1
self._slice = defaultdict(list)
# ========================================================================
# Get aggregation params
# self._fields will be a list of tuples: (fieldIdx, funcPtr, funcParam)
if not self._nullAggregation:
# ---------------------------------------------------------------------
# Verify that all aggregation field names exist in the input
fieldNames = [f[0] for f in aggregationInfo['fields']]
readerFieldNames = [f[0] for f in self._inputFields]
for name in fieldNames:
if not name in readerFieldNames:
raise Exception('No such input field: {0!s}'.format((name)))
# ---------------------------------------------------------------------
# Get the indices of the special fields, if given to our constructor
if timeFieldName is not None:
self._timeFieldIdx = readerFieldNames.index(timeFieldName)
if resetFieldName is not None:
self._resetFieldIdx = readerFieldNames.index(resetFieldName)
if sequenceIdFieldName is not None:
self._sequenceIdFieldIdx = readerFieldNames.index(sequenceIdFieldName)
# ---------------------------------------------------------------------
# Re-order the fields to match the order in the reader and add in any
# fields from the reader that were not already in the aggregationInfo
# fields list.
self._fields = []
fieldIdx = -1
for (name, type, special) in self._inputFields:
fieldIdx += 1
# See if it exists in the aggregationInfo
found = False
for field in aggregationInfo['fields']:
if field[0] == name:
aggFunctionName = field[1]
found = True
break
if not found:
aggFunctionName = 'first'
# Convert to a function pointer and optional params
(funcPtr, params) = self._getFuncPtrAndParams(aggFunctionName)
# Add it
self._fields.append((fieldIdx, funcPtr, params))
# Is it a special field that we are still looking for?
if special == FieldMetaSpecial.reset and self._resetFieldIdx is None:
self._resetFieldIdx = fieldIdx
if special == FieldMetaSpecial.timestamp and self._timeFieldIdx is None:
self._timeFieldIdx = fieldIdx
if (special == FieldMetaSpecial.sequence and
self._sequenceIdFieldIdx is None):
self._sequenceIdFieldIdx = fieldIdx
assert self._timeFieldIdx is not None, "No time field was found"
# Create an instance of _AggregationPeriod with the aggregation period
self._aggTimeDelta = datetime.timedelta(days=aggDef['days'],
hours=aggDef['hours'],
minutes=aggDef['minutes'],
seconds=aggDef['seconds'],
milliseconds=aggDef['milliseconds'],
microseconds=aggDef['microseconds'],
weeks=aggDef['weeks'])
self._aggYears = aggDef['years']
self._aggMonths = aggDef['months']
if self._aggTimeDelta:
assert self._aggYears == 0
assert self._aggMonths == 0
def _getEndTime(self, t):
"""Add the aggregation period to the input time t and return a datetime object
Years and months are handled as aspecial case due to leap years
and months with different number of dates. They can't be converted
to a strict timedelta because a period of 3 months will have different
durations actually. The solution is to just add the years and months
fields directly to the current time.
Other periods are converted to timedelta and just added to current time.
"""
assert isinstance(t, datetime.datetime)
if self._aggTimeDelta:
return t + self._aggTimeDelta
else:
year = t.year + self._aggYears + (t.month - 1 + self._aggMonths) / 12
month = (t.month - 1 + self._aggMonths) % 12 + 1
return t.replace(year=year, month=month)
def _getFuncPtrAndParams(self, funcName):
""" Given the name of an aggregation function, returns the function pointer
and param.
Parameters:
------------------------------------------------------------------------
funcName: a string (name of function) or funcPtr
retval: (funcPtr, param)
"""
params = None
if isinstance(funcName, basestring):
if funcName == 'sum':
fp = _aggr_sum
elif funcName == 'first':
fp = _aggr_first
elif funcName == 'last':
fp = _aggr_last
elif funcName == 'mean':
fp = _aggr_mean
elif funcName == 'max':
fp = max
elif funcName == 'min':
fp = min
elif funcName == 'mode':
fp = _aggr_mode
elif funcName.startswith('wmean:'):
fp = _aggr_weighted_mean
paramsName = funcName[6:]
params = [f[0] for f in self._inputFields].index(paramsName)
else:
fp = funcName
return (fp, params)
def _createAggregateRecord(self):
""" Generate the aggregated output record
Parameters:
------------------------------------------------------------------------
retval: outputRecord
"""
record = []
for i, (fieldIdx, aggFP, paramIdx) in enumerate(self._fields):
if aggFP is None: # this field is not supposed to be aggregated.
continue
values = self._slice[i]
refIndex = None
if paramIdx is not None:
record.append(aggFP(values, self._slice[paramIdx]))
else:
record.append(aggFP(values))
return record
def isNullAggregation(self):
""" Return True if no aggregation will be performed, either because the
aggregationInfo was None or all aggregation params within it were 0.
"""
return self._nullAggregation
def next(self, record, curInputBookmark):
""" Return the next aggregated record, if any
Parameters:
------------------------------------------------------------------------
record: The input record (values only) from the input source, or
None if the input has reached EOF (this will cause this
method to force completion of and return any partially
aggregated time period)
curInputBookmark: The bookmark to the next input record
retval:
(outputRecord, inputBookmark)
outputRecord: the aggregated record
inputBookmark: a bookmark to the last position from the input that
contributed to this aggregated record.
If we don't have any aggregated records yet, returns (None, None)
The caller should generally do a loop like this:
while True:
inRecord = reader.getNextRecord()
bookmark = reader.getBookmark()
(aggRecord, aggBookmark) = aggregator.next(inRecord, bookmark)
# reached EOF?
if inRecord is None and aggRecord is None:
break
if aggRecord is not None:
proessRecord(aggRecord, aggBookmark)
This method makes use of the self._slice member variable to build up
the values we need to aggregate. This is a dict of lists. The keys are
the field indices and the elements of each list are the values for that
field. For example:
self._siice = { 0: [42, 53], 1: [4.0, 5.1] }
"""
# This will hold the aggregated record we return
outRecord = None
# This will hold the bookmark of the last input used within the
# aggregated record we return.
retInputBookmark = None
if record is not None:
# Increment input count
self._inIdx += 1
#print self._inIdx, record
# Apply the filter, ignore the record if any field is unacceptable
if self._filter is not None and not self._filter[0](self._filter[1], record):
return (None, None)
# If no aggregation info just return as-is
if self._nullAggregation:
return (record, curInputBookmark)
# ----------------------------------------------------------------------
# Do aggregation
#
# Remember the very first record time stamp - it will be used as
# the timestamp for all first records in all sequences to align
# times for the aggregation/join of sequences.
#
# For a set of aggregated records, it will use the beginning of the time
# window as a timestamp for the set
#
t = record[self._timeFieldIdx]
if self._firstSequenceStartTime is None:
self._firstSequenceStartTime = t
# Create initial startTime and endTime if needed
if self._startTime is None:
self._startTime = t
if self._endTime is None:
self._endTime = self._getEndTime(t)
assert self._endTime > t
#print 'Processing line:', i, t, endTime
#from dbgp.client import brk; brk(port=9011)
# ----------------------------------------------------------------------
# Does this record have a reset signal or sequence Id associated with it?
# If so, see if we've reached a sequence boundary
if self._resetFieldIdx is not None:
resetSignal = record[self._resetFieldIdx]
else:
resetSignal = None
if self._sequenceIdFieldIdx is not None:
currSequenceId = record[self._sequenceIdFieldIdx]
else:
currSequenceId = None
newSequence = (resetSignal == 1 and self._inIdx > 0) \
or self._sequenceId != currSequenceId \
or self._inIdx == 0
if newSequence:
self._sequenceId = currSequenceId
# --------------------------------------------------------------------
# We end the aggregation chunk if we go past the end time
# -OR- we get an out of order record (t < startTime)
sliceEnded = (t >= self._endTime or t < self._startTime)
# -------------------------------------------------------------------
# Time to generate a new output record?
if (newSequence or sliceEnded) and len(self._slice) > 0:
# Create aggregated record
# print 'Creating aggregate record...'
# Make first record timestamp as the beginning of the time period,
# in case the first record wasn't falling on the beginning of the period
for j, f in enumerate(self._fields):
index = f[0]
if index == self._timeFieldIdx:
self._slice[j][0] = self._startTime
break
# Generate the aggregated record
outRecord = self._createAggregateRecord()
retInputBookmark = self._aggrInputBookmark
# Reset the slice
self._slice = defaultdict(list)
# --------------------------------------------------------------------
# Add current record to slice (Note keeping slices in memory). Each
# field in the slice is a list of field values from all the sliced
# records
for j, f in enumerate(self._fields):
index = f[0]
# append the parsed field value to the proper aggregated slice field.
self._slice[j].append(record[index])
self._aggrInputBookmark = curInputBookmark
# --------------------------------------------------------------------
# If we've encountered a new sequence, start aggregation over again
if newSequence:
# TODO: May use self._firstSequenceStartTime as a start for the new
# sequence (to align all sequences)
self._startTime = t
self._endTime = self._getEndTime(t)
# --------------------------------------------------------------------
# If a slice just ended, re-compute the start and end time for the
# next aggregated record
if sliceEnded:
# Did we receive an out of order record? If so, go back and iterate
# till we get to the next end time boundary.
if t < self._startTime:
self._endTime = self._firstSequenceStartTime
while t >= self._endTime:
self._startTime = self._endTime
self._endTime = self._getEndTime(self._endTime)
# If we have a record to return, do it now
if outRecord is not None:
return (outRecord, retInputBookmark)
# ---------------------------------------------------------------------
# Input reached EOF
# Aggregate one last time in the end if necessary
elif self._slice:
# Make first record timestamp as the beginning of the time period,
# in case the first record wasn't falling on the beginning of the period
for j, f in enumerate(self._fields):
index = f[0]
if index == self._timeFieldIdx:
self._slice[j][0] = self._startTime
break
outRecord = self._createAggregateRecord()
retInputBookmark = self._aggrInputBookmark
self._slice = defaultdict(list)
# Return aggregated record
return (outRecord, retInputBookmark)
def generateDataset(aggregationInfo, inputFilename, outputFilename=None):
"""Generate a dataset of aggregated values
Parameters:
----------------------------------------------------------------------------
aggregationInfo: a dictionary that contains the following entries
- fields: a list of pairs. Each pair is a field name and an
aggregation function (e.g. sum). The function will be used to aggregate
multiple values during the aggregation period.
aggregation period: 0 or more of unit=value fields; allowed units are:
[years months] |
[weeks days hours minutes seconds milliseconds microseconds]
NOTE: years and months are mutually-exclusive with the other units.
See getEndTime() and _aggregate() for more details.
Example1: years=1, months=6,
Example2: hours=1, minutes=30,
If none of the period fields are specified or if all that are specified
have values of 0, then aggregation will be suppressed, and the given
inputFile parameter value will be returned.
inputFilename: filename of the input dataset within examples/prediction/data
outputFilename: name for the output file. If not given, a name will be
generated based on the input filename and the aggregation params
retval: Name of the generated output file. This will be the same as the input
file name if no aggregation needed to be performed
If the input file contained a time field, sequence id field or reset field
that were not specified in aggregationInfo fields, those fields will be
added automatically with the following rules:
1. The order will be R, S, T, rest of the fields
2. The aggregation function for all will be to pick the first: lambda x: x[0]
Returns: the path of the aggregated data file if aggregation was performed
(in the same directory as the given input file); if aggregation did not
need to be performed, then the given inputFile argument value is returned.
"""
# Create the input stream
inputFullPath = resource_filename("nupic.datafiles", inputFilename)
inputObj = FileRecordStream(inputFullPath)
# Instantiate the aggregator
aggregator = Aggregator(aggregationInfo=aggregationInfo,
inputFields=inputObj.getFields())
# Is it a null aggregation? If so, just return the input file unmodified
if aggregator.isNullAggregation():
return inputFullPath
# ------------------------------------------------------------------------
# If we were not given an output filename, create one based on the
# aggregation settings
if outputFilename is None:
outputFilename = 'agg_{0!s}'.format( \
os.path.splitext(os.path.basename(inputFullPath))[0])
timePeriods = 'years months weeks days '\
'hours minutes seconds milliseconds microseconds'
for k in timePeriods.split():
if aggregationInfo.get(k, 0) > 0:
outputFilename += '_{0!s}_{1:d}'.format(k, aggregationInfo[k])
outputFilename += '.csv'
outputFilename = os.path.join(os.path.dirname(inputFullPath), outputFilename)
# ------------------------------------------------------------------------
# If some other process already started creating this file, simply
# wait for it to finish and return without doing anything
lockFilePath = outputFilename + '.please_wait'
if os.path.isfile(outputFilename) or \
os.path.isfile(lockFilePath):
while os.path.isfile(lockFilePath):
print 'Waiting for {0!s} to be fully written by another process'.format( \
lockFilePath)
time.sleep(1)
return outputFilename
# Create the lock file
lockFD = open(lockFilePath, 'w')
# -------------------------------------------------------------------------
# Create the output stream
outputObj = FileRecordStream(streamID=outputFilename, write=True,
fields=inputObj.getFields())
# -------------------------------------------------------------------------
# Write all aggregated records to the output
while True:
inRecord = inputObj.getNextRecord()
(aggRecord, aggBookmark) = aggregator.next(inRecord, None)
if aggRecord is None and inRecord is None:
break
if aggRecord is not None:
outputObj.appendRecord(aggRecord)
return outputFilename
def getFilename(aggregationInfo, inputFile):
"""Generate the filename for aggregated dataset
The filename is based on the input filename and the
aggregation period.
Returns the inputFile if no aggregation required (aggregation
info has all 0's)
"""
# Find the actual file, with an absolute path
inputFile = resource_filename("nupic.datafiles", inputFile)
a = defaultdict(lambda: 0, aggregationInfo)
outputDir = os.path.dirname(inputFile)
outputFile = 'agg_{0!s}'.format(os.path.splitext(os.path.basename(inputFile))[0])
noAggregation = True
timePeriods = 'years months weeks days '\
'hours minutes seconds milliseconds microseconds'
for k in timePeriods.split():
if a[k] > 0:
noAggregation = False
outputFile += '_{0!s}_{1:d}'.format(k, a[k])
if noAggregation:
return inputFile
outputFile += '.csv'
outputFile = os.path.join(outputDir, outputFile)
return outputFile
|
agpl-3.0
|
dje42/gdb
|
gdb/contrib/cleanup_check.py
|
20
|
13262
|
# Copyright 2013 Free Software Foundation, Inc.
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import gcc
import gccutils
import sys
want_raii_info = False
logging = False
show_cfg = False
def log(msg, indent=0):
global logging
if logging:
sys.stderr.write('%s%s\n' % (' ' * indent, msg))
sys.stderr.flush()
def is_cleanup_type(return_type):
if not isinstance(return_type, gcc.PointerType):
return False
if not isinstance(return_type.dereference, gcc.RecordType):
return False
if str(return_type.dereference.name) == 'cleanup':
return True
return False
def is_constructor(decl):
"Return True if the function DECL is a cleanup constructor; False otherwise"
return is_cleanup_type(decl.type.type) and (not decl.name or str(decl.name) != 'make_final_cleanup')
destructor_names = set(['do_cleanups', 'discard_cleanups'])
def is_destructor(decl):
return decl.name in destructor_names
# This list is just much too long... we should probably have an
# attribute instead.
special_names = set(['do_final_cleanups', 'discard_final_cleanups',
'save_cleanups', 'save_final_cleanups',
'restore_cleanups', 'restore_final_cleanups',
'exceptions_state_mc_init',
'make_my_cleanup2', 'make_final_cleanup', 'all_cleanups',
'save_my_cleanups', 'quit_target'])
def needs_special_treatment(decl):
return decl.name in special_names
# Sometimes we need a new placeholder object that isn't the same as
# anything else.
class Dummy(object):
def __init__(self, location):
self.location = location
# A wrapper for a cleanup which has been assigned to a variable.
# This holds the variable and the location.
class Cleanup(object):
def __init__(self, var, location):
self.var = var
self.location = location
# A class representing a master cleanup. This holds a stack of
# cleanup objects and supports a merging operation.
class MasterCleanup(object):
# Create a new MasterCleanup object. OTHER, if given, is a
# MasterCleanup object to copy.
def __init__(self, other = None):
# 'cleanups' is a list of cleanups. Each element is either a
# Dummy, for an anonymous cleanup, or a Cleanup, for a cleanup
# which was assigned to a variable.
if other is None:
self.cleanups = []
self.aliases = {}
else:
self.cleanups = other.cleanups[:]
self.aliases = dict(other.aliases)
def compare_vars(self, definition, argument):
if definition == argument:
return True
if argument in self.aliases:
argument = self.aliases[argument]
if definition in self.aliases:
definition = self.aliases[definition]
return definition == argument
def note_assignment(self, lhs, rhs):
log('noting assignment %s = %s' % (lhs, rhs), 4)
self.aliases[lhs] = rhs
# Merge with another MasterCleanup.
# Returns True if this resulted in a change to our state.
def merge(self, other):
# We do explicit iteration like this so we can easily
# update the list after the loop.
counter = -1
found_named = False
for counter in range(len(self.cleanups) - 1, -1, -1):
var = self.cleanups[counter]
log('merge checking %s' % var, 4)
# Only interested in named cleanups.
if isinstance(var, Dummy):
log('=> merge dummy', 5)
continue
# Now see if VAR is found in OTHER.
if other._find_var(var.var) >= 0:
log ('=> merge found', 5)
break
log('=>merge not found', 5)
found_named = True
if found_named and counter < len(self.cleanups) - 1:
log ('merging to %d' % counter, 4)
if counter < 0:
self.cleanups = []
else:
self.cleanups = self.cleanups[0:counter]
return True
# If SELF is empty but OTHER has some cleanups, then consider
# that a change as well.
if len(self.cleanups) == 0 and len(other.cleanups) > 0:
log('merging non-empty other', 4)
self.cleanups = other.cleanups[:]
return True
return False
# Push a new constructor onto our stack. LHS is the
# left-hand-side of the GimpleCall statement. It may be None,
# meaning that this constructor's value wasn't used.
def push(self, location, lhs):
if lhs is None:
obj = Dummy(location)
else:
obj = Cleanup(lhs, location)
log('pushing %s' % lhs, 4)
idx = self._find_var(lhs)
if idx >= 0:
gcc.permerror(location, 'reassigning to known cleanup')
gcc.inform(self.cleanups[idx].location,
'previous assignment is here')
self.cleanups.append(obj)
# A helper for merge and pop that finds BACK_TO in self.cleanups,
# and returns the index, or -1 if not found.
def _find_var(self, back_to):
for i in range(len(self.cleanups) - 1, -1, -1):
if isinstance(self.cleanups[i], Dummy):
continue
if self.compare_vars(self.cleanups[i].var, back_to):
return i
return -1
# Pop constructors until we find one matching BACK_TO.
# This is invoked when we see a do_cleanups call.
def pop(self, location, back_to):
log('pop:', 4)
i = self._find_var(back_to)
if i >= 0:
self.cleanups = self.cleanups[0:i]
else:
gcc.permerror(location, 'destructor call with unknown argument')
# Check whether ARG is the current master cleanup. Return True if
# all is well.
def verify(self, location, arg):
log('verify %s' % arg, 4)
return (len(self.cleanups) > 0
and not isinstance(self.cleanups[0], Dummy)
and self.compare_vars(self.cleanups[0].var, arg))
# Check whether SELF is empty.
def isempty(self):
log('isempty: len = %d' % len(self.cleanups), 4)
return len(self.cleanups) == 0
# Emit informational warnings about the cleanup stack.
def inform(self):
for item in reversed(self.cleanups):
gcc.inform(item.location, 'leaked cleanup')
class CleanupChecker:
def __init__(self, fun):
self.fun = fun
self.seen_edges = set()
self.bad_returns = set()
# This maps BB indices to a list of master cleanups for the
# BB.
self.master_cleanups = {}
# Pick a reasonable location for the basic block BB.
def guess_bb_location(self, bb):
if isinstance(bb.gimple, list):
for stmt in bb.gimple:
if stmt.loc:
return stmt.loc
return self.fun.end
# Compute the master cleanup list for BB.
# Modifies MASTER_CLEANUP in place.
def compute_master(self, bb, bb_from, master_cleanup):
if not isinstance(bb.gimple, list):
return
curloc = self.fun.end
for stmt in bb.gimple:
if stmt.loc:
curloc = stmt.loc
if isinstance(stmt, gcc.GimpleCall) and stmt.fndecl:
if is_constructor(stmt.fndecl):
log('saw constructor %s in bb=%d' % (str(stmt.fndecl), bb.index), 2)
self.cleanup_aware = True
master_cleanup.push(curloc, stmt.lhs)
elif is_destructor(stmt.fndecl):
if str(stmt.fndecl.name) != 'do_cleanups':
self.only_do_cleanups_seen = False
log('saw destructor %s in bb=%d, bb_from=%d, argument=%s'
% (str(stmt.fndecl.name), bb.index, bb_from, str(stmt.args[0])),
2)
master_cleanup.pop(curloc, stmt.args[0])
elif needs_special_treatment(stmt.fndecl):
pass
# gcc.permerror(curloc, 'function needs special treatment')
elif isinstance(stmt, gcc.GimpleAssign):
if isinstance(stmt.lhs, gcc.VarDecl) and isinstance(stmt.rhs[0], gcc.VarDecl):
master_cleanup.note_assignment(stmt.lhs, stmt.rhs[0])
elif isinstance(stmt, gcc.GimpleReturn):
if self.is_constructor:
if not master_cleanup.verify(curloc, stmt.retval):
gcc.permerror(curloc,
'constructor does not return master cleanup')
elif not self.is_special_constructor:
if not master_cleanup.isempty():
if curloc not in self.bad_returns:
gcc.permerror(curloc, 'cleanup stack is not empty at return')
self.bad_returns.add(curloc)
master_cleanup.inform()
# Traverse a basic block, updating the master cleanup information
# and propagating to other blocks.
def traverse_bbs(self, edge, bb, bb_from, entry_master):
log('traverse_bbs %d from %d' % (bb.index, bb_from), 1)
# Propagate the entry MasterCleanup though this block.
master_cleanup = MasterCleanup(entry_master)
self.compute_master(bb, bb_from, master_cleanup)
modified = False
if bb.index in self.master_cleanups:
# Merge the newly-computed MasterCleanup into the one we
# have already computed. If this resulted in a
# significant change, then we need to re-propagate.
modified = self.master_cleanups[bb.index].merge(master_cleanup)
else:
self.master_cleanups[bb.index] = master_cleanup
modified = True
# EDGE is None for the entry BB.
if edge is not None:
# If merging cleanups caused a change, check to see if we
# have a bad loop.
if edge in self.seen_edges:
# This error doesn't really help.
# if modified:
# gcc.permerror(self.guess_bb_location(bb),
# 'invalid cleanup use in loop')
return
self.seen_edges.add(edge)
if not modified:
return
# Now propagate to successor nodes.
for edge in bb.succs:
self.traverse_bbs(edge, edge.dest, bb.index, master_cleanup)
def check_cleanups(self):
if not self.fun.cfg or not self.fun.decl:
return 'ignored'
if is_destructor(self.fun.decl):
return 'destructor'
if needs_special_treatment(self.fun.decl):
return 'special'
self.is_constructor = is_constructor(self.fun.decl)
self.is_special_constructor = not self.is_constructor and str(self.fun.decl.name).find('with_cleanup') > -1
# Yuck.
if str(self.fun.decl.name) == 'gdb_xml_create_parser_and_cleanup_1':
self.is_special_constructor = True
if self.is_special_constructor:
gcc.inform(self.fun.start, 'function %s is a special constructor' % (self.fun.decl.name))
# If we only see do_cleanups calls, and this function is not
# itself a constructor, then we can convert it easily to RAII.
self.only_do_cleanups_seen = not self.is_constructor
# If we ever call a constructor, then we are "cleanup-aware".
self.cleanup_aware = False
entry_bb = self.fun.cfg.entry
master_cleanup = MasterCleanup()
self.traverse_bbs(None, entry_bb, -1, master_cleanup)
if want_raii_info and self.only_do_cleanups_seen and self.cleanup_aware:
gcc.inform(self.fun.decl.location,
'function %s could be converted to RAII' % (self.fun.decl.name))
if self.is_constructor:
return 'constructor'
return 'OK'
class CheckerPass(gcc.GimplePass):
def execute(self, fun):
if fun.decl:
log("Starting " + fun.decl.name)
if show_cfg:
dot = gccutils.cfg_to_dot(fun.cfg, fun.decl.name)
gccutils.invoke_dot(dot, name=fun.decl.name)
checker = CleanupChecker(fun)
what = checker.check_cleanups()
if fun.decl:
log(fun.decl.name + ': ' + what, 2)
ps = CheckerPass(name = 'check-cleanups')
# We need the cfg, but we want a relatively high-level Gimple.
ps.register_after('cfg')
|
gpl-2.0
|
Godiyos/python-for-android
|
python-build/python-libs/gdata/build/lib/gdata/sample_util.py
|
133
|
7858
|
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides utility functions used with command line samples."""
# This module is used for version 2 of the Google Data APIs.
import sys
import getpass
import urllib
import gdata.gauth
__author__ = '[email protected] (Jeff Scudder)'
CLIENT_LOGIN = 1
AUTHSUB = 2
OAUTH = 3
HMAC = 1
RSA = 2
def get_param(name, prompt='', secret=False, ask=True):
# First, check for a command line parameter.
for i in xrange(len(sys.argv)):
if sys.argv[i].startswith('--%s=' % name):
return sys.argv[i].split('=')[1]
elif sys.argv[i] == '--%s' % name:
return sys.argv[i + 1]
if ask:
# If it was not on the command line, ask the user to input the value.
prompt = '%s: ' % prompt
if secret:
return getpass.getpass(prompt)
else:
return raw_input(prompt)
else:
return None
def authorize_client(client, auth_type=None, service=None, source=None,
scopes=None, oauth_type=None, consumer_key=None,
consumer_secret=None):
"""Uses command line arguments, or prompts user for token values."""
if auth_type is None:
auth_type = int(get_param(
'auth_type', 'Please choose the authorization mechanism you want'
' to use.\n'
'1. to use your email address and password (ClientLogin)\n'
'2. to use a web browser to visit an auth web page (AuthSub)\n'
'3. if you have registed to use OAuth\n'))
# Get the scopes for the services we want to access.
if auth_type == AUTHSUB or auth_type == OAUTH:
if scopes is None:
scopes = get_param(
'scopes', 'Enter the URL prefixes (scopes) for the resources you '
'would like to access.\nFor multiple scope URLs, place a comma '
'between each URL.\n'
'Example: http://www.google.com/calendar/feeds/,'
'http://www.google.com/m8/feeds/\n').split(',')
elif isinstance(scopes, (str, unicode)):
scopes = scopes.split(',')
if auth_type == CLIENT_LOGIN:
email = get_param('email', 'Please enter your username')
password = get_param('password', 'Password', True)
if service is None:
service = get_param(
'service', 'What is the name of the service you wish to access?'
'\n(See list:'
' http://code.google.com/apis/gdata/faq.html#clientlogin)')
if source is None:
source = get_param('source', ask=False)
client.client_login(email, password, source=source, service=service)
elif auth_type == AUTHSUB:
auth_sub_token = get_param('auth_sub_token', ask=False)
session_token = get_param('session_token', ask=False)
private_key = None
auth_url = None
single_use_token = None
rsa_private_key = get_param(
'rsa_private_key',
'If you want to use secure mode AuthSub, please provide the\n'
' location of your RSA private key which corresponds to the\n'
' certificate you have uploaded for your domain. If you do not\n'
' have an RSA key, simply press enter')
if rsa_private_key:
try:
private_key_file = open(rsa_private_key, 'rb')
private_key = private_key_file.read()
private_key_file.close()
except IOError:
print 'Unable to read private key from file'
if private_key is not None:
if client.auth_token is None:
if session_token:
client.auth_token = gdata.gauth.SecureAuthSubToken(
session_token, private_key, scopes)
return
elif auth_sub_token:
client.auth_token = gdata.gauth.SecureAuthSubToken(
auth_sub_token, private_key, scopes)
client.upgrade_token()
return
auth_url = gdata.gauth.generate_auth_sub_url(
'http://gauthmachine.appspot.com/authsub', scopes, True)
print 'with a private key, get ready for this URL', auth_url
else:
if client.auth_token is None:
if session_token:
client.auth_token = gdata.gauth.AuthSubToken(session_token, scopes)
return
elif auth_sub_token:
client.auth_token = gdata.gauth.AuthSubToken(auth_sub_token, scopes)
client.upgrade_token()
return
auth_url = gdata.gauth.generate_auth_sub_url(
'http://gauthmachine.appspot.com/authsub', scopes)
print 'Visit the following URL in your browser to authorize this app:'
print str(auth_url)
print 'After agreeing to authorize the app, copy the token value from the'
print ' URL. Example: "www.google.com/?token=ab12" token value is ab12'
token_value = raw_input('Please enter the token value: ')
if private_key is not None:
single_use_token = gdata.gauth.SecureAuthSubToken(
token_value, private_key, scopes)
else:
single_use_token = gdata.gauth.AuthSubToken(token_value, scopes)
client.auth_token = single_use_token
client.upgrade_token()
elif auth_type == OAUTH:
if oauth_type is None:
oauth_type = int(get_param(
'oauth_type', 'Please choose the authorization mechanism you want'
' to use.\n'
'1. use an HMAC signature using your consumer key and secret\n'
'2. use RSA with your private key to sign requests\n'))
consumer_key = get_param(
'consumer_key', 'Please enter your OAuth conumer key '
'which identifies your app')
if oauth_type == HMAC:
consumer_secret = get_param(
'consumer_secret', 'Please enter your OAuth conumer secret '
'which you share with the OAuth provider', True)
# Swap out this code once the client supports requesting an oauth token.
# Get a request token.
request_token = client.get_oauth_token(
scopes, 'http://gauthmachine.appspot.com/oauth', consumer_key,
consumer_secret=consumer_secret)
elif oauth_type == RSA:
rsa_private_key = get_param(
'rsa_private_key',
'Please provide the location of your RSA private key which\n'
' corresponds to the certificate you have uploaded for your domain.')
try:
private_key_file = open(rsa_private_key, 'rb')
private_key = private_key_file.read()
private_key_file.close()
except IOError:
print 'Unable to read private key from file'
request_token = client.get_oauth_token(
scopes, 'http://gauthmachine.appspot.com/oauth', consumer_key,
rsa_private_key=private_key)
else:
print 'Invalid OAuth signature type'
return None
# Authorize the request token in the browser.
print 'Visit the following URL in your browser to authorize this app:'
print str(request_token.generate_authorization_url())
print 'After agreeing to authorize the app, copy URL from the browser\'s'
print ' address bar.'
url = raw_input('Please enter the url: ')
gdata.gauth.authorize_request_token(request_token, url)
# Exchange for an access token.
client.auth_token = client.get_access_token(request_token)
else:
print 'Invalid authorization type.'
return None
def print_options():
"""Displays usage information, available command line params."""
# TODO: fill in the usage description for authorizing the client.
print ''
|
apache-2.0
|
Godiyos/python-for-android
|
python-build/python-libs/gdata/src/gdata/Crypto/PublicKey/qNEW.py
|
228
|
5545
|
#
# qNEW.py : The q-NEW signature algorithm.
#
# Part of the Python Cryptography Toolkit
#
# Distribute and use freely; there are no restrictions on further
# dissemination and usage except those imposed by the laws of your
# country of residence. This software is provided "as is" without
# warranty of fitness for use or suitability for any purpose, express
# or implied. Use at your own risk or not at all.
#
__revision__ = "$Id: qNEW.py,v 1.8 2003/04/04 15:13:35 akuchling Exp $"
from Crypto.PublicKey import pubkey
from Crypto.Util.number import *
from Crypto.Hash import SHA
class error (Exception):
pass
HASHBITS = 160 # Size of SHA digests
def generate(bits, randfunc, progress_func=None):
"""generate(bits:int, randfunc:callable, progress_func:callable)
Generate a qNEW key of length 'bits', using 'randfunc' to get
random data and 'progress_func', if present, to display
the progress of the key generation.
"""
obj=qNEWobj()
# Generate prime numbers p and q. q is a 160-bit prime
# number. p is another prime number (the modulus) whose bit
# size is chosen by the caller, and is generated so that p-1
# is a multiple of q.
#
# Note that only a single seed is used to
# generate p and q; if someone generates a key for you, you can
# use the seed to duplicate the key generation. This can
# protect you from someone generating values of p,q that have
# some special form that's easy to break.
if progress_func:
progress_func('p,q\n')
while (1):
obj.q = getPrime(160, randfunc)
# assert pow(2, 159L)<obj.q<pow(2, 160L)
obj.seed = S = long_to_bytes(obj.q)
C, N, V = 0, 2, {}
# Compute b and n such that bits-1 = b + n*HASHBITS
n= (bits-1) / HASHBITS
b= (bits-1) % HASHBITS ; powb=2L << b
powL1=pow(long(2), bits-1)
while C<4096:
# The V array will contain (bits-1) bits of random
# data, that are assembled to produce a candidate
# value for p.
for k in range(0, n+1):
V[k]=bytes_to_long(SHA.new(S+str(N)+str(k)).digest())
p = V[n] % powb
for k in range(n-1, -1, -1):
p= (p << long(HASHBITS) )+V[k]
p = p+powL1 # Ensure the high bit is set
# Ensure that p-1 is a multiple of q
p = p - (p % (2*obj.q)-1)
# If p is still the right size, and it's prime, we're done!
if powL1<=p and isPrime(p):
break
# Otherwise, increment the counter and try again
C, N = C+1, N+n+1
if C<4096:
break # Ended early, so exit the while loop
if progress_func:
progress_func('4096 values of p tried\n')
obj.p = p
power=(p-1)/obj.q
# Next parameter: g = h**((p-1)/q) mod p, such that h is any
# number <p-1, and g>1. g is kept; h can be discarded.
if progress_func:
progress_func('h,g\n')
while (1):
h=bytes_to_long(randfunc(bits)) % (p-1)
g=pow(h, power, p)
if 1<h<p-1 and g>1:
break
obj.g=g
# x is the private key information, and is
# just a random number between 0 and q.
# y=g**x mod p, and is part of the public information.
if progress_func:
progress_func('x,y\n')
while (1):
x=bytes_to_long(randfunc(20))
if 0 < x < obj.q:
break
obj.x, obj.y=x, pow(g, x, p)
return obj
# Construct a qNEW object
def construct(tuple):
"""construct(tuple:(long,long,long,long)|(long,long,long,long,long)
Construct a qNEW object from a 4- or 5-tuple of numbers.
"""
obj=qNEWobj()
if len(tuple) not in [4,5]:
raise error, 'argument for construct() wrong length'
for i in range(len(tuple)):
field = obj.keydata[i]
setattr(obj, field, tuple[i])
return obj
class qNEWobj(pubkey.pubkey):
keydata=['p', 'q', 'g', 'y', 'x']
def _sign(self, M, K=''):
if (self.q<=K):
raise error, 'K is greater than q'
if M<0:
raise error, 'Illegal value of M (<0)'
if M>=pow(2,161L):
raise error, 'Illegal value of M (too large)'
r=pow(self.g, K, self.p) % self.q
s=(K- (r*M*self.x % self.q)) % self.q
return (r,s)
def _verify(self, M, sig):
r, s = sig
if r<=0 or r>=self.q or s<=0 or s>=self.q:
return 0
if M<0:
raise error, 'Illegal value of M (<0)'
if M<=0 or M>=pow(2,161L):
return 0
v1 = pow(self.g, s, self.p)
v2 = pow(self.y, M*r, self.p)
v = ((v1*v2) % self.p)
v = v % self.q
if v==r:
return 1
return 0
def size(self):
"Return the maximum number of bits that can be handled by this key."
return 160
def has_private(self):
"""Return a Boolean denoting whether the object contains
private components."""
return hasattr(self, 'x')
def can_sign(self):
"""Return a Boolean value recording whether this algorithm can generate signatures."""
return 1
def can_encrypt(self):
"""Return a Boolean value recording whether this algorithm can encrypt data."""
return 0
def publickey(self):
"""Return a new key object containing only the public information."""
return construct((self.p, self.q, self.g, self.y))
object = qNEWobj
|
apache-2.0
|
FNCS/ns-3.26
|
src/olsr/bindings/callbacks_list.py
|
240
|
1159
|
callback_classes = [
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['unsigned char', 'ns3::Ptr<ns3::QueueItem>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
|
gpl-2.0
|
ikjn/qemu-fi-monitor
|
tests/qemu-iotests/qed.py
|
248
|
7194
|
#!/usr/bin/env python
#
# Tool to manipulate QED image files
#
# Copyright (C) 2010 IBM, Corp.
#
# Authors:
# Stefan Hajnoczi <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
import sys
import struct
import random
import optparse
# This can be used as a module
__all__ = ['QED_F_NEED_CHECK', 'QED']
QED_F_NEED_CHECK = 0x02
header_fmt = '<IIIIQQQQQII'
header_size = struct.calcsize(header_fmt)
field_names = ['magic', 'cluster_size', 'table_size',
'header_size', 'features', 'compat_features',
'autoclear_features', 'l1_table_offset', 'image_size',
'backing_filename_offset', 'backing_filename_size']
table_elem_fmt = '<Q'
table_elem_size = struct.calcsize(table_elem_fmt)
def err(msg):
sys.stderr.write(msg + '\n')
sys.exit(1)
def unpack_header(s):
fields = struct.unpack(header_fmt, s)
return dict((field_names[idx], val) for idx, val in enumerate(fields))
def pack_header(header):
fields = tuple(header[x] for x in field_names)
return struct.pack(header_fmt, *fields)
def unpack_table_elem(s):
return struct.unpack(table_elem_fmt, s)[0]
def pack_table_elem(elem):
return struct.pack(table_elem_fmt, elem)
class QED(object):
def __init__(self, f):
self.f = f
self.f.seek(0, 2)
self.filesize = f.tell()
self.load_header()
self.load_l1_table()
def raw_pread(self, offset, size):
self.f.seek(offset)
return self.f.read(size)
def raw_pwrite(self, offset, data):
self.f.seek(offset)
return self.f.write(data)
def load_header(self):
self.header = unpack_header(self.raw_pread(0, header_size))
def store_header(self):
self.raw_pwrite(0, pack_header(self.header))
def read_table(self, offset):
size = self.header['table_size'] * self.header['cluster_size']
s = self.raw_pread(offset, size)
table = [unpack_table_elem(s[i:i + table_elem_size]) for i in xrange(0, size, table_elem_size)]
return table
def load_l1_table(self):
self.l1_table = self.read_table(self.header['l1_table_offset'])
self.table_nelems = self.header['table_size'] * self.header['cluster_size'] / table_elem_size
def write_table(self, offset, table):
s = ''.join(pack_table_elem(x) for x in table)
self.raw_pwrite(offset, s)
def random_table_item(table):
vals = [(index, offset) for index, offset in enumerate(table) if offset != 0]
if not vals:
err('cannot pick random item because table is empty')
return random.choice(vals)
def corrupt_table_duplicate(table):
'''Corrupt a table by introducing a duplicate offset'''
victim_idx, victim_val = random_table_item(table)
unique_vals = set(table)
if len(unique_vals) == 1:
err('no duplication corruption possible in table')
dup_val = random.choice(list(unique_vals.difference([victim_val])))
table[victim_idx] = dup_val
def corrupt_table_invalidate(qed, table):
'''Corrupt a table by introducing an invalid offset'''
index, _ = random_table_item(table)
table[index] = qed.filesize + random.randint(0, 100 * 1024 * 1024 * 1024 * 1024)
def cmd_show(qed, *args):
'''show [header|l1|l2 <offset>]- Show header or l1/l2 tables'''
if not args or args[0] == 'header':
print qed.header
elif args[0] == 'l1':
print qed.l1_table
elif len(args) == 2 and args[0] == 'l2':
offset = int(args[1])
print qed.read_table(offset)
else:
err('unrecognized sub-command')
def cmd_duplicate(qed, table_level):
'''duplicate l1|l2 - Duplicate a random table element'''
if table_level == 'l1':
offset = qed.header['l1_table_offset']
table = qed.l1_table
elif table_level == 'l2':
_, offset = random_table_item(qed.l1_table)
table = qed.read_table(offset)
else:
err('unrecognized sub-command')
corrupt_table_duplicate(table)
qed.write_table(offset, table)
def cmd_invalidate(qed, table_level):
'''invalidate l1|l2 - Plant an invalid table element at random'''
if table_level == 'l1':
offset = qed.header['l1_table_offset']
table = qed.l1_table
elif table_level == 'l2':
_, offset = random_table_item(qed.l1_table)
table = qed.read_table(offset)
else:
err('unrecognized sub-command')
corrupt_table_invalidate(qed, table)
qed.write_table(offset, table)
def cmd_need_check(qed, *args):
'''need-check [on|off] - Test, set, or clear the QED_F_NEED_CHECK header bit'''
if not args:
print bool(qed.header['features'] & QED_F_NEED_CHECK)
return
if args[0] == 'on':
qed.header['features'] |= QED_F_NEED_CHECK
elif args[0] == 'off':
qed.header['features'] &= ~QED_F_NEED_CHECK
else:
err('unrecognized sub-command')
qed.store_header()
def cmd_zero_cluster(qed, pos, *args):
'''zero-cluster <pos> [<n>] - Zero data clusters'''
pos, n = int(pos), 1
if args:
if len(args) != 1:
err('expected one argument')
n = int(args[0])
for i in xrange(n):
l1_index = pos / qed.header['cluster_size'] / len(qed.l1_table)
if qed.l1_table[l1_index] == 0:
err('no l2 table allocated')
l2_offset = qed.l1_table[l1_index]
l2_table = qed.read_table(l2_offset)
l2_index = (pos / qed.header['cluster_size']) % len(qed.l1_table)
l2_table[l2_index] = 1 # zero the data cluster
qed.write_table(l2_offset, l2_table)
pos += qed.header['cluster_size']
def cmd_copy_metadata(qed, outfile):
'''copy-metadata <outfile> - Copy metadata only (for scrubbing corrupted images)'''
out = open(outfile, 'wb')
# Match file size
out.seek(qed.filesize - 1)
out.write('\0')
# Copy header clusters
out.seek(0)
header_size_bytes = qed.header['header_size'] * qed.header['cluster_size']
out.write(qed.raw_pread(0, header_size_bytes))
# Copy L1 table
out.seek(qed.header['l1_table_offset'])
s = ''.join(pack_table_elem(x) for x in qed.l1_table)
out.write(s)
# Copy L2 tables
for l2_offset in qed.l1_table:
if l2_offset == 0:
continue
l2_table = qed.read_table(l2_offset)
out.seek(l2_offset)
s = ''.join(pack_table_elem(x) for x in l2_table)
out.write(s)
out.close()
def usage():
print 'Usage: %s <file> <cmd> [<arg>, ...]' % sys.argv[0]
print
print 'Supported commands:'
for cmd in sorted(x for x in globals() if x.startswith('cmd_')):
print globals()[cmd].__doc__
sys.exit(1)
def main():
if len(sys.argv) < 3:
usage()
filename, cmd = sys.argv[1:3]
cmd = 'cmd_' + cmd.replace('-', '_')
if cmd not in globals():
usage()
qed = QED(open(filename, 'r+b'))
try:
globals()[cmd](qed, *sys.argv[3:])
except TypeError, e:
sys.stderr.write(globals()[cmd].__doc__ + '\n')
sys.exit(1)
if __name__ == '__main__':
main()
|
gpl-2.0
|
pramasoul/micropython
|
tests/extmod/vfs_fat_finaliser.py
|
14
|
1906
|
# Test VfsFat class and its finaliser
try:
import uerrno, uos
uos.VfsFat
except (ImportError, AttributeError):
print("SKIP")
raise SystemExit
class RAMBlockDevice:
def __init__(self, blocks, sec_size=512):
self.sec_size = sec_size
self.data = bytearray(blocks * self.sec_size)
def readblocks(self, n, buf):
for i in range(len(buf)):
buf[i] = self.data[n * self.sec_size + i]
def writeblocks(self, n, buf):
for i in range(len(buf)):
self.data[n * self.sec_size + i] = buf[i]
def ioctl(self, op, arg):
if op == 4: # MP_BLOCKDEV_IOCTL_BLOCK_COUNT
return len(self.data) // self.sec_size
if op == 5: # MP_BLOCKDEV_IOCTL_BLOCK_SIZE
return self.sec_size
# Create block device, and skip test if not enough RAM
try:
bdev = RAMBlockDevice(50)
except MemoryError:
print("SKIP")
raise SystemExit
# Format block device and create VFS object
uos.VfsFat.mkfs(bdev)
vfs = uos.VfsFat(bdev)
# Here we test that opening a file with the heap locked fails correctly. This
# is a special case because file objects use a finaliser and allocating with a
# finaliser is a different path to normal allocation. It would be better to
# test this in the core tests but there are no core objects that use finaliser.
import micropython
micropython.heap_lock()
try:
vfs.open("x", "r")
except MemoryError:
print("MemoryError")
micropython.heap_unlock()
# Here we test that the finaliser is actually called during a garbage collection.
import gc
N = 4
for i in range(N):
n = "x%d" % i
f = vfs.open(n, "w")
f.write(n)
f = None # release f without closing
[0, 1, 2, 3] # use up Python stack so f is really gone
gc.collect() # should finalise all N files by closing them
for i in range(N):
with vfs.open("x%d" % i, "r") as f:
print(f.read())
|
mit
|
fgclaramonte/Odoo-addons
|
inter_company_rules/models/inter_company_invoice.py
|
21
|
5617
|
from openerp import models, fields, api, _
from openerp.exceptions import Warning
class account_invoice(models.Model):
_inherit = 'account.invoice'
auto_generated = fields.Boolean(string='Auto Generated Document', copy=False)
auto_invoice_id = fields.Many2one('account.invoice', string='Source Invoice',
readonly=True, copy=False)
@api.multi
def invoice_validate(self):
"""
Validated invoice generate cross invoice base on company rules.
"""
for invoice in self:
#do not consider invoices that have already been auto-generated, nor the invoices that were already validated in the past
company = self.env['res.company']._find_company_from_partner(invoice.partner_id.id)
if company and company.auto_generate_invoices and not invoice.auto_generated:
if invoice.type == 'out_invoice':
invoice.action_create_invoice(company, 'in_invoice', 'purchase')
elif invoice.type == 'in_invoice':
invoice.action_create_invoice(company, 'out_invoice', 'sale')
elif invoice.type == 'out_refund':
invoice.action_create_invoice(company, 'in_refund', 'purchase_refund')
elif invoice.type == 'in_refund':
invoice.action_create_invoice(company, 'out_refund', 'sale_refund')
return super(account_invoice, self).invoice_validate()
@api.one
def action_create_invoice(self, company, inv_type, journal_type):
#Find user for creating the invoice from company
intercompany_uid = company.intercompany_user_id and company.intercompany_user_id.id or False
if not intercompany_uid:
raise Warning(_('Provide one user for intercompany relation for % ') % company.name)
ctx = self._context.copy()
ctx['force_company'] = company.id
this_company_partner = self.company_id.partner_id
inv_lines = []
for line in self.invoice_line:
#To find lot of data from product onchanges because its already avail method in core.
product_uom = line.product_id.uom_id and line.product_id.uom_id.id or False
line_data = line.with_context(ctx).sudo(intercompany_uid).product_id_change(line.product_id.id,
product_uom, qty=line.quantity, name='', type=inv_type,
partner_id=this_company_partner.id,
fposition_id=this_company_partner.property_account_position.id, company_id=company.id)
inv_line_data = self._prepare_inv_line(line_data, line)
inv_line_id = line.with_context(ctx).sudo(intercompany_uid).create(inv_line_data)
inv_lines.append(inv_line_id.id)
#create invoice
invoice_vals = self.with_context(ctx).sudo(intercompany_uid)._prepare_inv(inv_lines, inv_type, journal_type, company)[0]
return self.with_context(ctx).sudo(intercompany_uid).create(invoice_vals)
@api.model
def _prepare_inv_line(self, line_data, line):
""" Generate invoice line dictionary"""
vals = {
'name': line.name,
'price_unit': line.price_unit,
'quantity': line.quantity,
'discount': line.discount,
'product_id': line.product_id.id or False,
'uos_id': line.uos_id.id or False,
'sequence': line.sequence,
'invoice_line_tax_id': [(6, 0, line_data['value'].get('invoice_line_tax_id', []))],
'account_analytic_id': line.account_analytic_id.id or False,
}
if line_data['value'].get('account_id', False):
vals['account_id'] = line_data['value']['account_id']
return vals
@api.one
def _prepare_inv(self, inv_lines, inv_type, jrnl_type, company):
""" Generate invoice dictionary """
#To find journal.
journal = self.env['account.journal'].search([('type', '=', jrnl_type), ('company_id', '=', company.id)], limit=1)
if not journal:
raise Warning(_('Please define %s journal for this company: "%s" (id:%d).') % (jrnl_type, company.name, company.id))
#To find periods of supplier company.
ctx = self._context.copy()
ctx['company_id'] = company.id
period_ids = self.env['account.period'].with_context(ctx).find(self.date_invoice)
#To find account,payment term,fiscal position,bank.
partner_data = self.onchange_partner_id(inv_type, self.company_id.partner_id.id, company_id=company.id)
return {
'name': self.name,
'origin': self.company_id.name + _(' Invoice: ') + str(self.number),
'type': inv_type,
'date_invoice': self.date_invoice,
'reference': self.reference,
'account_id': partner_data['value'].get('account_id', False),
'partner_id': self.company_id.partner_id.id,
'journal_id': journal.id,
'invoice_line': [(6, 0, inv_lines)],
'currency_id': self.currency_id and self.currency_id.id,
'fiscal_position': partner_data['value'].get('fiscal_position', False),
'payment_term': partner_data['value'].get('payment_term', False),
'company_id': company.id,
'period_id': period_ids and period_ids[0].id or False,
'partner_bank_id': partner_data['value'].get('partner_bank_id', False),
'auto_generated': True,
'auto_invoice_id': self.id,
}
|
gpl-3.0
|
pmghalvorsen/gramps_branch
|
gramps/gen/filters/rules/family/_hastag.py
|
2
|
1730
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Rule that checks for a family with a particular tag.
"""
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .._hastagbase import HasTagBase
#-------------------------------------------------------------------------
#
# HasTag
#
#-------------------------------------------------------------------------
class HasTag(HasTagBase):
"""
Rule that checks for a family with a particular tag.
"""
labels = [ _('Tag:') ]
name = _('Families with the <tag>')
description = _("Matches families with the particular tag")
|
gpl-2.0
|
camptocamp/odoo
|
addons/l10n_in_hr_payroll/__init__.py
|
430
|
1117
|
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import l10n_in_hr_payroll
import report
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
linkhub-sdk/popbill.taxinvoice.example.py
|
cancelIssue.py
|
1
|
1516
|
# -*- coding: utf-8 -*-
# code for console Encoding difference. Dont' mind on it
import sys
import imp
imp.reload(sys)
try:
sys.setdefaultencoding('UTF8')
except Exception as E:
pass
import testValue
from popbill import TaxinvoiceService, PopbillException
taxinvoiceService = TaxinvoiceService(testValue.LinkID, testValue.SecretKey)
taxinvoiceService.IsTest = testValue.IsTest
taxinvoiceService.IPRestrictOnOff = testValue.IPRestrictOnOff
taxinvoiceService.UseStaticIP = testValue.UseStaticIP
taxinvoiceService.UseLocalTimeYN = testValue.UseLocalTimeYN
'''
[๋ฐํ์๋ฃ] ์ํ์ ์ธ๊ธ๊ณ์ฐ์๋ฅผ [๋ฐํ์ทจ์] ์ฒ๋ฆฌํฉ๋๋ค.
- [๋ฐํ์ทจ์]๋ ๊ตญ์ธ์ฒญ ์ ์ก์ ์๋ง ๊ฐ๋ฅํฉ๋๋ค.
- ๋ฐํ์ทจ์๋ ์ธ๊ธ๊ณ์ฐ์๋ ๊ตญ์ธ์ฒญ์ ์ ์ก๋์ง ์์ต๋๋ค.
- https://docs.popbill.com/taxinvoice/python/api#CancelIssue
'''
try:
print("=" * 15 + " ์ธ๊ธ๊ณ์ฐ์ ๋ฐํ ์ทจ์ " + "=" * 15)
# ํ๋นํ์ ์ฌ์
์๋ฒํธ
CorpNum = testValue.testCorpNum
# ์ธ๊ธ๊ณ์ฐ์ ๋ฐํ์ ํ, SELL : ๋งค์ถ , BUY : ๋งค์
, TRUSTEE : ์ํ
MgtKeyType = "SELL"
# ๋ฌธ์๋ฒํธ
MgtKey = "20210429-001"
# ๋ฉ๋ชจ
Memo = "๋ฐํ์ทจ์ ๋ฉ๋ชจ"
# ํ๋นํ์ ์์ด๋
UserID = testValue.testUserID
result = taxinvoiceService.cancelIssue(CorpNum, MgtKeyType, MgtKey, Memo, UserID)
print("์ฒ๋ฆฌ๊ฒฐ๊ณผ : [%d] %s" % (result.code, result.message))
except PopbillException as PE:
print("Popbill Exception : [%d] %s" % (PE.code, PE.message))
|
mit
|
dineshappavoo/virtdc
|
virtdc_init/virtdc_init.py
|
2
|
1747
|
#!/usr/bin/env python
import subprocess, sys
#API - virtdc command line init tool
#==============================================================================
# Variables
#==============================================================================
# Some descriptive variables
#name = "virtdc"
#version = "0.1.0"
#long_description = """vmplacementandscaling is a set of API's/tools written to create virtual machines for cloud users efficiently."""
#url = "https://github.com/dineshappavoo/virtdc"
#license = ""
#==============================================================================
def virtdc_init():
#start domain termination process
cmd = "/usr/bin/python /var/lib/virtdc/framework/VM_terminationHandler.py &"
termination_process = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE)
#initiate the vmonere domain socket listener
vmonere_domain_socket_listener_cmd = "/usr/bin/python /var/lib/virtdc/vmonere/sockets/vmonere_listener.socket.py &"
vmonere_domain_socket_listener = subprocess.Popen(vmonere_domain_socket_listener_cmd, shell=True, stderr=subprocess.PIPE)
#initiate the vmonere host socket listener
vmonere_host_socket_listener_cmd = "/usr/bin/python /var/lib/virtdc/vmonere/sockets/vmonere_host_listener.socket &"
vmonere_host_socket_listener = subprocess.Popen(vmonere_host_socket_listener_cmd, shell=True, stderr=subprocess.PIPE)
delete_log_files_cmd = "/usr/bin/python /var/lib/virtdc/vmonere/host/vmonere_exec_log_deleter.py"
delete_log_files_process = subprocess.Popen(delete_log_files_cmd, shell=True, stderr=subprocess.PIPE)
if __name__ == "__main__":
# stuff only to run when not called via 'import' here
virtdc_init()
|
mit
|
srvelivela/ansibledoc
|
lib/ansible/plugins/lookup/consul_kv.py
|
89
|
4541
|
# (c) 2015, Steve Gargan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
Lookup plugin to grab metadata from a consul key value store.
============================================================
Plugin will lookup metadata for a playbook from the key value store in a
consul cluster. Values can be easily set in the kv store with simple rest
commands e.g.
curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata
this can then be looked up in a playbook as follows
- debug: msg='key contains {{item}}'
with_consul_kv:
- 'key/to/retrieve'
Parameters can be provided after the key be more specific about what to retrieve e.g.
- debug: msg='key contains {{item}}'
with_consul_kv:
- 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98')}}'
recurse: if true, will retrieve all the values that have the given key as prefix
index: if the key has a value with the specified index then this is returned
allowing access to historical values.
token: acl token to allow access to restricted values.
By default this will lookup keys via the consul agent running on http://localhost:8500
this can be changed by setting the env variable 'ANSIBLE_CONSUL_URL' to point to the url
of the kv store you'd like to use.
'''
######################################################################
import os
import sys
from urlparse import urlparse
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
try:
import json
except ImportError:
import simplejson as json
try:
import consul
HAS_CONSUL = True
except ImportError as e:
HAS_CONSUL = False
class LookupModule(LookupBase):
def __init__(self, loader=None, templar=None, **kwargs):
super(LookupBase, self).__init__(loader, templar, **kwargs)
self.agent_url = 'http://localhost:8500'
if os.getenv('ANSIBLE_CONSUL_URL') is not None:
self.agent_url = os.environ['ANSIBLE_CONSUL_URL']
def run(self, terms, variables=None, **kwargs):
if not HAS_CONSUL:
raise AnsibleError('python-consul is required for consul_kv lookup. see http://python-consul.readthedocs.org/en/latest/#installation')
u = urlparse(self.agent_url)
consul_api = consul.Consul(host=u.hostname, port=u.port)
values = []
try:
for term in terms:
params = self.parse_params(term)
results = consul_api.kv.get(params['key'],
token=params['token'],
index=params['index'],
recurse=params['recurse'])
if results[1]:
# responds with a single or list of result maps
if isinstance(results[1], list):
for r in results[1]:
values.append(r['Value'])
else:
values.append(results[1]['Value'])
except Exception as e:
raise AnsibleError(
"Error locating '%s' in kv store. Error was %s" % (term, e))
return values
def parse_params(self, term):
params = term.split(' ')
paramvals = {
'key': params[0],
'token': None,
'recurse': False,
'index': None
}
# parameters specified?
try:
for param in params[1:]:
if param and len(param) > 0:
name, value = param.split('=')
assert name in paramvals, "% not a valid consul lookup parameter" % name
paramvals[name] = value
except (ValueError, AssertionError) as e:
raise AnsibleError(e)
return paramvals
|
gpl-3.0
|
SVRobots/Minecraft
|
modules/world.py
|
1
|
2839
|
from os import listdir, mkdir
import cPickle
from imp import load_source
api = load_source('api', 'modules\\api.py')
from api import *
class World(object):
#initialize
def __init__(self):
super(World, self).__init__()
self.world="Default"
self.dimension="DIM1"
self.playername="Default"
self.world_blocks={}
self.shown_blocks={}
#load world
def load(self):
#set world folder location
self.savedir = 'saves\\' + self.world + '\\'
#init saves folder
if FileExists('', 'saves') == False:
mkdir('saves')
#init world folder
if FileExists('saves', self.world) == False:
mkdir('saves\\' + self.world)
#init dimension folder
if FileExists('saves\\' + self.world, self.dimension) == False:
mkdir('saves\\' + self.world + '\\' + self.dimension)
#init players folder
if FileExists(self.savedir, 'players') == False:
mkdir(self.savedir + 'players')
#init player
if FileExists(self.savedir + 'players\\', self.playername) == False:
self.player = Player()
else:
self.loadPlayer()
#get world gravity
self.GetGravity()
#make some blocks
self.GenerateWorld(-10,-10,10,10)
self.ParseVisible()
#Generate World
def GenerateWorld(self, lx, lz, ux, uz):
i=0
for m in self.c.dimensions:
if self.dimension in m:
tmp = self.c.mods[i].GenerateDimension(self.dimension, lx, lz, ux, uz)
for b in tmp:
self.world_blocks[b] = tmp[b]
#find visible blocks to render
def ParseVisible(self):
for b in self.world_blocks:
x,y,z = b
if ((x,y+1,z) not in self.world_blocks) or ((x,y-1,z) not in self.world_blocks) or ((x+1,y,z) not in self.world_blocks) or ((x-1,y,z) not in self.world_blocks) or ((x,y,z+1) not in self.world_blocks) or ((x,y,z-1) not in self.world_blocks):
self.shown_blocks[b]=self.world_blocks[b]
#Get world gravity
def GetGravity(self):
i=0
for m in self.c.dimensions:
if self.dimension in m:
self.a = self.c.mods[i].dimension_gravity[self.dimension]
#save player
def savePlayer(self):
save(self.savedir + 'players\\' + self.player.name, self.player, True)
#load player
def loadPlayer(self):
self.player = load(self.savedir + 'players\\' + self.playername)
def quit(self):
self.savePlayer()
class Player(object):
def __init__(self):
self.name='Default'
self.flying=True
self.gameMode=1
self.x=0
self.y=10
self.z=0
self.r=0
self.p=0
self.onGround=False
self.vy=0
self.fallDistance=0
def InitializeWorld(world, dimension, c):
w=World()
w.world = world
w.dimension = dimension
w.c = c
w.load()
return w
def save(f,o,r):
if r == True:
a = open(f,'wb')
else:
a = open(f,'r+b')
print f
cPickle.dump(o, a, -1)
a.close()
def load(f):
a = open(f, 'rb')
o = cPickle.load(a)
a.close()
return o
def FileExists(direct, f):
for p in listdir(direct):
if p == f:
return True
return False
|
gpl-2.0
|
dati91/servo
|
tests/wpt/web-platform-tests/resource-timing/resources/multi_redirect.py
|
17
|
1767
|
def main(request, response):
"""Handler that causes multiple redirections.
The request has two mandatory and one optional query parameters:
page_origin - The page origin, used for redirection and to set TAO. This is a mandatory parameter.
cross_origin - The cross origin used to make this a cross-origin redirect. This is a mandatory parameter.
timing_allow - Whether TAO should be set or not in the redirect chain. This is an optional parameter. Default: not set.
Note that |step| is a parameter used internally for the multi-redirect. It's the step we're at in the redirect chain.
"""
step = 1
if "step" in request.GET:
try:
step = int(request.GET.first("step"))
except ValueError:
pass
page_origin = request.GET.first("page_origin")
cross_origin = request.GET.first("cross_origin")
timing_allow = "0"
if "timing_allow" in request.GET:
timing_allow = request.GET.first("timing_allow")
redirect_url = "/resource-timing/resources/multi_redirect.py?"
redirect_url += "page_origin=" + page_origin
redirect_url += "&cross_origin=" + cross_origin
redirect_url += "&timing_allow=" + timing_allow
redirect_url += "&step="
if step == 1:
redirect_url = cross_origin + redirect_url + "2"
if timing_allow != "0":
response.headers.set("timing-allow-origin", page_origin)
elif step == 2:
redirect_url = page_origin + redirect_url + "3"
if timing_allow != "0":
response.headers.set("timing-allow-origin", page_origin)
else:
redirect_url = page_origin + "/resource-timing/resources/blank_page_green.htm"
response.status = 302
response.headers.set("Location", redirect_url)
|
mpl-2.0
|
acsone/partner-contact
|
base_location_nuts/models/res_partner_nuts.py
|
5
|
1197
|
# -*- coding: utf-8 -*-
# ยฉ 2015 Antiun Ingenierรญa S.L. - Antonio Espinosa
# ยฉ 2015 Antiun Ingenierรญa S.L. - Jairo Llopis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, fields
class ResPartnerNuts(models.Model):
_name = 'res.partner.nuts'
_order = "parent_left"
_parent_order = "name"
_parent_store = True
_description = "NUTS Item"
# NUTS fields
level = fields.Integer(required=True)
code = fields.Char(required=True)
name = fields.Char(required=True, translate=True)
country_id = fields.Many2one(comodel_name='res.country', string="Country",
required=True)
state_id = fields.Many2one(comodel_name='res.country.state',
string='State')
# Parent hierarchy
parent_id = fields.Many2one(comodel_name='res.partner.nuts',
ondelete='restrict')
child_ids = fields.One2many(
'res.partner.nuts',
'parent_id',
"Children",
oldname="children")
parent_left = fields.Integer('Parent Left', select=True)
parent_right = fields.Integer('Parent Right', select=True)
|
agpl-3.0
|
rwl/puddle
|
puddle/python_editor/python_workbench_editor.py
|
1
|
6267
|
#------------------------------------------------------------------------------
# Copyright (C) 2009 Richard W. Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#------------------------------------------------------------------------------
""" A Python code editor.
"""
#------------------------------------------------------------------------------
# Imports:
#------------------------------------------------------------------------------
from os.path import exists, basename
from enthought.pyface.workbench.api import TraitsUIEditor
from enthought.traits.api import Code, Instance, Str
from enthought.traits.ui.api import View, Item, Group, CodeEditor
from enthought.pyface.api import PythonEditor, FileDialog, CANCEL
#------------------------------------------------------------------------------
# "PythonWorkbenchEditor" class:
#------------------------------------------------------------------------------
class PythonWorkbenchEditor(TraitsUIEditor):
""" A text editor.
"""
#--------------------------------------------------------------------------
# "PythonEditor" interface:
#--------------------------------------------------------------------------
# The text being edited:
text = Code
# The default traits view.
traits_view = View(
Group(
Item(
name="text", editor=CodeEditor(show_line_numbers=False)
),
show_labels=False
),
id="puddle.python_editor.python_workbench_editor",
kind="live", resizable=True,
)
#--------------------------------------------------------------------------
# "TraitsUIEditor" interface.
#--------------------------------------------------------------------------
def _name_default(self):
""" Trait initialiser.
"""
# self.obj.on_trait_change(self.on_path, "path")
if self.obj.path == "":
return "Unsaved Script"
else:
return basename(self.obj.path)
# def on_path(self, new):
# """ Handle the file path changing """
#
# self.name = basename(new)
def create_control(self, parent):
""" Creates the toolkit-specific control that represents the
editor. 'parent' is the toolkit-specific control that is
the editor's parent.
"""
ed = PythonEditor(parent, show_line_numbers=False)
# FIXME: Implement toolkit specific Python editor subclass
import wx
styles = [
wx.stc.STC_STYLE_DEFAULT,
wx.stc.STC_STYLE_CONTROLCHAR,
wx.stc.STC_STYLE_BRACELIGHT,
wx.stc.STC_STYLE_BRACEBAD,
wx.stc.STC_P_DEFAULT,
wx.stc.STC_P_COMMENTLINE,
wx.stc.STC_P_NUMBER,
wx.stc.STC_P_STRING,
wx.stc.STC_P_CHARACTER,
wx.stc.STC_P_WORD,
wx.stc.STC_P_TRIPLE,
wx.stc.STC_P_TRIPLEDOUBLE,
wx.stc.STC_P_CLASSNAME,
wx.stc.STC_P_DEFNAME,
wx.stc.STC_P_OPERATOR,
wx.stc.STC_P_IDENTIFIER,
wx.stc.STC_P_COMMENTBLOCK,
wx.stc.STC_P_STRINGEOL
]
for style in styles:
ed.control.StyleSetFaceName(style, "monospace")
ed.control.StyleSetSize(style, 10)
path = self.obj.path
if exists(path):
ed.path = path
ed.load()
return ed.control
#--------------------------------------------------------------------------
# "PythonWorkbenchEditor" interface.
#--------------------------------------------------------------------------
def save(self):
""" Saves the text to disk.
"""
# If the file has not yet been saved then prompt for the file name.
if len(self.obj.path) == 0:
self.save_as()
else:
f = file(self.obj.path, 'w')
f.write(self.text)
f.close()
# We have just saved the file so we ain't dirty no more!
self.dirty = False
return
def save_as(self):
""" Saves the text to disk after prompting for the file name.
"""
dialog = FileDialog(
parent = self.window.control,
action = 'save as',
default_filename = self.name,
wildcard = FileDialog.WILDCARD_PY
)
if dialog.open() != CANCEL:
# Update the editor.
self.id = dialog.path
self.name = basename(dialog.path)
# Update the resource.
self.obj.path = dialog.path
# Save it!
self.save()
return
def _text_changed(self, trait_name, old, new):
""" Static trait change handler.
"""
if self.traits_inited():
self.dirty = True
return
def _dirty_changed(self, dirty):
""" Static trait change handler.
"""
if len(self.obj.path) > 0:
if dirty:
self.name = basename(self.obj.path) + '*'
else:
self.name = basename(self.obj.path)
return
# EOF -------------------------------------------------------------------------
|
mit
|
dischinator/pyload
|
module/lib/simplejson/encoder.py
|
45
|
20060
|
"""Implementation of JSONEncoder
"""
import re
from decimal import Decimal
def _import_speedups():
try:
from simplejson import _speedups
return _speedups.encode_basestring_ascii, _speedups.make_encoder
except ImportError:
return None, None
c_encode_basestring_ascii, c_make_encoder = _import_speedups()
from simplejson.decoder import PosInf
ESCAPE = re.compile(ur'[\x00-\x1f\\"\b\f\n\r\t\u2028\u2029]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
u'\u2028': '\\u2028',
u'\u2029': '\\u2029',
}
for i in range(0x20):
#ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
return ESCAPE_DCT[match.group(0)]
return u'"' + ESCAPE.sub(replace, s) + u'"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
#return '\\u{0:04x}'.format(n)
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
#return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = (
c_encode_basestring_ascii or py_encode_basestring_ascii)
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict, namedtuple | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None,
use_decimal=True, namedtuple_as_object=True,
tuple_as_array=True):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
If use_decimal is true (not the default), ``decimal.Decimal`` will
be supported directly by the encoder. For the inverse, decode JSON
with ``parse_float=decimal.Decimal``.
If namedtuple_as_object is true (the default), tuple subclasses with
``_asdict()`` methods will be encoded as JSON objects.
If tuple_as_array is true (the default), tuple (and subclasses) will
be encoded as JSON arrays.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.use_decimal = use_decimal
self.namedtuple_as_object = namedtuple_as_object
self.tuple_as_array = tuple_as_array
if isinstance(indent, (int, long)):
indent = ' ' * indent
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
elif indent is not None:
self.item_separator = ','
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> from simplejson import JSONEncoder
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan,
_repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on
# the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
key_memo = {}
if (_one_shot and c_make_encoder is not None
and self.indent is None):
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan, key_memo, self.use_decimal,
self.namedtuple_as_object, self.tuple_as_array)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot, self.use_decimal,
self.namedtuple_as_object, self.tuple_as_array)
try:
return _iterencode(o, 0)
finally:
key_memo.clear()
class JSONEncoderForHTML(JSONEncoder):
"""An encoder that produces JSON safe to embed in HTML.
To embed JSON content in, say, a script tag on a web page, the
characters &, < and > should be escaped. They cannot be escaped
with the usual entities (e.g. &) because they are not expanded
within <script> tags.
"""
def encode(self, o):
# Override JSONEncoder.encode because it has hacks for
# performance that make things more complicated.
chunks = self.iterencode(o, True)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)
for chunk in chunks:
chunk = chunk.replace('&', '\\u0026')
chunk = chunk.replace('<', '\\u003c')
chunk = chunk.replace('>', '\\u003e')
yield chunk
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
_use_decimal, _namedtuple_as_object, _tuple_as_array,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
Decimal=Decimal,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield buf + str(value)
else:
yield buf
if isinstance(value, list):
chunks = _iterencode_list(value, _current_indent_level)
elif (_namedtuple_as_object and isinstance(value, tuple) and
hasattr(value, '_asdict')):
chunks = _iterencode_dict(value._asdict(),
_current_indent_level)
elif _tuple_as_array and isinstance(value, tuple):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield str(value)
else:
if isinstance(value, list):
chunks = _iterencode_list(value, _current_indent_level)
elif (_namedtuple_as_object and isinstance(value, tuple) and
hasattr(value, '_asdict')):
chunks = _iterencode_dict(value._asdict(),
_current_indent_level)
elif _tuple_as_array and isinstance(value, tuple):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, list):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif (_namedtuple_as_object and isinstance(o, tuple) and
hasattr(o, '_asdict')):
for chunk in _iterencode_dict(o._asdict(), _current_indent_level):
yield chunk
elif (_tuple_as_array and isinstance(o, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
elif _use_decimal and isinstance(o, Decimal):
yield str(o)
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.