repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
SwagColoredKitteh/servo
|
python/servo/bootstrap.py
|
15
|
7857
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, print_function
from distutils.spawn import find_executable
import json
import os
import platform
import shutil
import subprocess
import servo.packages as packages
from servo.util import extract, download_file, host_triple
def salt(context, force=False):
# Ensure Salt is installed in the virtualenv
# It's not instaled globally because it's a large, non-required dependency,
# and the installation fails on Windows
print("Checking Salt installation...", end='')
reqs_path = os.path.join(context.topdir, 'python', 'requirements-salt.txt')
process = subprocess.Popen(
["pip", "install", "-q", "-I", "-r", reqs_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait()
if process.returncode:
out, err = process.communicate()
print('failed to install Salt via pip:')
print('Output: {}\nError: {}'.format(out, err))
return 1
print("done")
salt_root = os.path.join(context.sharedir, 'salt')
config_dir = os.path.join(salt_root, 'etc', 'salt')
pillar_dir = os.path.join(config_dir, 'pillars')
# In order to allow `mach bootstrap` to work from any CWD,
# the `root_dir` must be an absolute path.
# We place it under `context.sharedir` because
# Salt caches data (e.g. gitfs files) in its `var` subdirectory.
# Hence, dynamically generate the config with an appropriate `root_dir`
# and serialize it as JSON (which is valid YAML).
config = {
'fileserver_backend': ['git'],
'gitfs_env_whitelist': 'base',
'gitfs_provider': 'gitpython',
'gitfs_remotes': [
'https://github.com/servo/saltfs.git',
],
'hash_type': 'sha384',
'master': 'localhost',
'root_dir': salt_root,
'state_output': 'changes',
'state_tabular': True,
}
if not os.path.exists(config_dir):
os.makedirs(config_dir, mode=0o700)
with open(os.path.join(config_dir, 'minion'), 'w') as config_file:
config_file.write(json.dumps(config) + '\n')
# Similarly, the pillar data is created dynamically
# and temporarily serialized to disk.
# This dynamism is not yet used, but will be in the future
# to enable Android bootstrapping by using
# context.sharedir as a location for Android packages.
pillar = {
'top.sls': {
'base': {
'*': ['bootstrap'],
},
},
'bootstrap.sls': {
'fully_managed': False,
},
}
if os.path.exists(pillar_dir):
shutil.rmtree(pillar_dir)
os.makedirs(pillar_dir, mode=0o700)
for filename in pillar:
with open(os.path.join(pillar_dir, filename), 'w') as pillar_file:
pillar_file.write(json.dumps(pillar[filename]) + '\n')
cmd = [
'sudo',
# sudo escapes from the venv, need to use full path
find_executable('salt-call'),
'--local',
'--config-dir={}'.format(config_dir),
'--pillar-root={}'.format(pillar_dir),
'state.apply',
'servo-build-dependencies',
]
if not force:
print('Running bootstrap in dry-run mode to show changes')
# Because `test=True` mode runs each state individually without
# considering how required/previous states affect the system,
# it will often report states with requisites as failing due
# to the requisites not actually being run,
# even though these are spurious and will succeed during
# the actual highstate.
# Hence `--retcode-passthrough` is not helpful in dry-run mode,
# so only detect failures of the actual salt-call binary itself.
retcode = subprocess.call(cmd + ['test=True'])
if retcode != 0:
print('Something went wrong while bootstrapping')
return retcode
proceed = raw_input(
'Proposed changes are above, proceed with bootstrap? [y/N]: '
)
if proceed.lower() not in ['y', 'yes']:
return 0
print('')
print('Running Salt bootstrap')
retcode = subprocess.call(cmd + ['--retcode-passthrough'])
if retcode == 0:
print('Salt bootstrapping complete')
else:
print('Salt bootstrapping encountered errors')
return retcode
def windows_gnu(context, force=False):
'''Bootstrapper for msys2 based environments for building in Windows.'''
if not find_executable('pacman'):
print(
'The Windows GNU bootstrapper only works with msys2 with pacman. '
'Get msys2 at http://msys2.github.io/'
)
return 1
# Ensure repositories are up to date
command = ['pacman', '--sync', '--refresh']
subprocess.check_call(command)
# Install packages
command = ['pacman', '--sync', '--needed']
if force:
command.append('--noconfirm')
subprocess.check_call(command + list(packages.WINDOWS_GNU))
# Downgrade GCC to 5.4.0-1
gcc_pkgs = ["gcc", "gcc-ada", "gcc-fortran", "gcc-libgfortran", "gcc-libs", "gcc-objc"]
gcc_version = "5.4.0-1"
mingw_url = "http://repo.msys2.org/mingw/x86_64/mingw-w64-x86_64-{}-{}-any.pkg.tar.xz"
gcc_list = [mingw_url.format(gcc, gcc_version) for gcc in gcc_pkgs]
# Note: `--upgrade` also does downgrades
downgrade_command = ['pacman', '--upgrade']
if force:
downgrade_command.append('--noconfirm')
subprocess.check_call(downgrade_command + gcc_list)
def windows_msvc(context, force=False):
'''Bootstrapper for MSVC building on Windows.'''
deps_dir = os.path.join(context.sharedir, "msvc-dependencies")
deps_url = "https://servo-rust.s3.amazonaws.com/msvc-deps/"
def version(package):
return packages.WINDOWS_MSVC[package]
def package_dir(package):
return os.path.join(deps_dir, package, version(package))
to_install = {}
for package in packages.WINDOWS_MSVC:
# Don't install CMake if it already exists in PATH
if package == "cmake" and find_executable(package):
continue
if not os.path.isdir(package_dir(package)):
to_install[package] = version(package)
if not to_install:
return 0
print("Installing missing MSVC dependencies...")
for package in to_install:
full_spec = '{}-{}'.format(package, version(package))
parent_dir = os.path.dirname(package_dir(package))
if not os.path.isdir(parent_dir):
os.makedirs(parent_dir)
zip_path = package_dir(package) + ".zip"
if not os.path.isfile(zip_path):
zip_url = "{}{}.zip".format(deps_url, full_spec)
download_file(full_spec, zip_url, zip_path)
print("Extracting {}...".format(full_spec), end='')
extract(zip_path, deps_dir)
print("done")
extracted_path = os.path.join(deps_dir, full_spec)
os.rename(extracted_path, package_dir(package))
return 0
def bootstrap(context, force=False):
'''Dispatches to the right bootstrapping function for the OS.'''
bootstrapper = None
if "windows-gnu" in host_triple():
bootstrapper = windows_gnu
elif "windows-msvc" in host_triple():
bootstrapper = windows_msvc
elif "linux-gnu" in host_triple():
distro, version, _ = platform.linux_distribution()
if distro == 'Ubuntu' and version == '14.04':
bootstrapper = salt
if bootstrapper is None:
print('Bootstrap support is not yet available for your OS.')
return 1
return bootstrapper(context, force=force)
|
mpl-2.0
|
robnee/picsim
|
insdata.py
|
1
|
4659
|
'''
This is the instruction data grabbed from the Instruction Set Summary table
in the Pic data sheet for an enhanced midrange (14bit) processor such as the
16f1826 or 12f1822. This is used to initialize the decoder.
fields:
mnemonic
arguments
description
cycles
opcode format
status bits affected :
notes
'''
ENHMID = [
('NOP', '-', 'No Operation', '1', '00 0000 0000 0000', '', ''),
('RESET', '-', 'Software device Reset', '1', '00 0000 0000 0001', '', ''),
('RETURN', '-', 'Return from Subroutine', '2', '00 0000 0000 1000', '', ''),
('RETFIE', '-', 'Return from interrupt', '2', '00 0000 0000 1001', '', ''),
('CALLW', '-', 'Call Subroutine with W', '2', '00 0000 0000 1010', '', ''),
('BRW', '-', 'Relative Branch with W', '2', '00 0000 0000 1011', '', ''),
('MOVLB', 'k', 'Move literal to BSR', '1', '00 0000 001k kkkk', '', ''),
('OPTION', '-', 'Load OPTION_REG register with W', '1', '00 0000 0110 0010', '', ''),
('SLEEP', '-', 'Go into Standby mode', '1', '00 0000 0110 0011', 'TO,PD', ''),
('CLRWDT', '-', 'Clear Watchdog Timer', '1', '00 0000 0110 0100', 'TO,PD', ''),
('TRIS', 'f', 'Load TRIS register with W', '1', '00 0000 0110 0fff', '', ''),
('MOVWF', 'f', 'Move W to f', '1', '00 0000 1fff ffff', '', '2'),
('CLRW', '-', 'Clear W', '1', '00 0001 0000 00xx', 'Z', '2'),
('CLRF', 'f', 'Clear f', '1', '00 0001 1fff ffff', 'Z', ''),
('SUBWF', 'f,d', 'Subtract W from f', '1', '00 0010 dfff ffff', 'C,DC,Z', '2'),
('DECF', 'f,d', 'Decrement f', '1', '00 0011 dfff ffff', 'Z', '2'),
('IORWF', 'f,d', 'Inclusive OR W with f', '1', '00 0100 dfff ffff', 'Z', '2'),
('ANDWF', 'f,d', 'AND W with f', '1', '00 0101 dfff ffff', 'Z', '2'),
('XORWF', 'f,d', 'Exclusive OR W with f', '1', '00 0110 dfff ffff', 'Z', '2'),
('ADDWF', 'f,d', 'Add W and f', '1', '00 0111 dfff ffff', 'C,DC,Z', '2'),
('MOVF', 'f,d', 'Move f', '1', '00 1000 dfff ffff', 'Z', '2'),
('COMF', 'f,d', 'Complement f', '1', '00 1001 dfff ffff', 'Z', '2'),
('INCF', 'f,d', 'Increment f', '1', '00 1010 dfff ffff', 'Z', '2'),
('DECFSZ', 'f,d', 'Decrementf,Skip if 0', '1,2', '00 1011 dfff ffff', '', '1,2'),
('RRF', 'f,d', 'Rotate Right f through Carry', '1', '00 1100 dfff ffff', 'C', '2'),
('RLF', 'f,d', 'Rotate Left f through Carry', '1', '00 1101 dfff ffff', 'C', '2'),
('SWAPF', 'f,d', 'Swap nibbles in f', '1', '00 1110 dfff ffff', '', '2'),
('INCFSZ', 'f,d', 'Incrementf, Skip if 0', '1,2', '00 1111 dfff ffff', '', '1,2'),
('BCF', 'f,b', 'Bit Clear f', '1', '01 00bb bfff ffff', '', '2'),
('BSF', 'f,b', 'Bit Set f', '1', '01 01bb bfff ffff', '', '2'),
('BTFSC', 'f,b', 'Bit Test f, Skip if Clear', '1,2', '01 10bb bfff ffff', '', '1,2'),
('BTFSS', 'f,b', 'Bit Test f, Skip if Set', '1,2', '01 11bb bfff ffff', '', '1,2'),
('CALL', 'k', 'Call Subroutine', '2', '10 0kkk kkkk kkkk', '', ''),
('GOTO', 'k', 'Goto address', '2', '10 1kkk kkkk kkkk', '', ''),
('MOVLW', 'k', 'Move literal to W', '1', '11 0000 kkkk kkkk', '', ''),
('ADDFSR', 'n,k', 'Add Literal k to FSRn', '1', '11 0001 0nkk kkkk', '', ''),
('MOVLP', 'k', 'Move literal to PCLATH', '1', '11 0001 1kkk kkkk', '', ''),
('BRA', 'k', 'Relative Branch', '2', '11 001k kkkk kkkk', '', ''),
('RETLW', 'k', 'Return with literal in W', '2', '11 0100 kkkk kkkk', '', ''),
('LSLF', 'f,d', 'Logical Left Shift', '1', '11 0101 dfff ffff', 'C,Z', '2'),
('LSRF', 'f,d', 'Logical Right Shift', '1', '11 0110 dfff ffff', 'C,Z', '2'),
('ASRF', 'f,d', 'Arithmetic Right Shift', '1', '11 0111 dfff ffff', 'C,Z', '2'),
('IORLW', 'k', 'Inclusive OR literal with W', '1', '11 1000 kkkk kkkk', 'Z', ''),
('ANDLW', 'k', 'AND literal with W', '1', '11 1001 kkkk kkkk', 'Z', ''),
('XORLW', 'k', 'Exclusive OR literal with W', '1', '11 1010 kkkk kkkk', 'Z', ''),
('SUBWFB', 'f,d', 'Subtract with Borrow W from f', '1', '11 1011 dfff ffff', 'C,DC,Z', '2'),
('SUBLW', 'k', 'Subtract W from literal', '1', '11 1100 kkkk kkkk', 'C,DC,Z', ''),
('ADDWFC', 'f,d', 'Add with Carry W and f', '1', '11 1101 dfff ffff', 'C,DC,Z', '2'),
('ADDLW', 'k', 'Add literal and W', '1', '11 1110 kkkk kkkk', 'C,DC,Z', ''),
('MOVIW', 'n mm', 'Move Indirect FSRn to W with pre/post inc/dec', '1', '00 0000 0001 0nmm', 'Z', '2,3'),
('MOVIW', 'k[n]', 'Move INDFn to W, Indexed Indirect', '1', '11 1111 0nkk kkkk', 'Z', '2,3'),
('MOVWI', 'n mm', 'Move W to Indirect FSRn with pre/post inc/dec', '1', '00 0000 0001 1nmm', '', '2'),
('MOVWI', 'k[n]', 'Move W to INDFn, Indexed Indirect', '1', '11 1111 lnkk kkkk', '', '2'),
]
|
gpl-3.0
|
praveenkumar/ansible
|
test/units/template/test_safe_eval.py
|
14
|
1768
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from collections import defaultdict
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.template.safe_eval import safe_eval
class TestSafeEval(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_safe_eval_usage(self):
# test safe eval calls with different possible types for the
# locals dictionary, to ensure we don't run into problems like
# ansible/ansible/issues/12206 again
for locals_vars in (dict(), defaultdict(dict)):
self.assertEqual(safe_eval('True', locals=locals_vars), True)
self.assertEqual(safe_eval('False', locals=locals_vars), False)
self.assertEqual(safe_eval('0', locals=locals_vars), 0)
self.assertEqual(safe_eval('[]', locals=locals_vars), [])
self.assertEqual(safe_eval('{}', locals=locals_vars), {})
|
gpl-3.0
|
jonathonwalz/ansible
|
test/units/plugins/cache/test_cache.py
|
91
|
4078
|
# (c) 2012-2015, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest, mock
from ansible.errors import AnsibleError
from ansible.plugins.cache import FactCache
from ansible.plugins.cache.base import BaseCacheModule
from ansible.plugins.cache.memory import CacheModule as MemoryCache
HAVE_MEMCACHED = True
try:
import memcache
except ImportError:
HAVE_MEMCACHED = False
else:
# Use an else so that the only reason we skip this is for lack of
# memcached, not errors importing the plugin
from ansible.plugins.cache.memcached import CacheModule as MemcachedCache
HAVE_REDIS = True
try:
import redis
except ImportError:
HAVE_REDIS = False
else:
from ansible.plugins.cache.redis import CacheModule as RedisCache
class TestFactCache(unittest.TestCase):
def setUp(self):
with mock.patch('ansible.constants.CACHE_PLUGIN', 'memory'):
self.cache = FactCache()
def test_copy(self):
self.cache['avocado'] = 'fruit'
self.cache['daisy'] = 'flower'
a_copy = self.cache.copy()
self.assertEqual(type(a_copy), dict)
self.assertEqual(a_copy, dict(avocado='fruit', daisy='flower'))
def test_plugin_load_failure(self):
# See https://github.com/ansible/ansible/issues/18751
# Note no fact_connection config set, so this will fail
with mock.patch('ansible.constants.CACHE_PLUGIN', 'json'):
self.assertRaisesRegexp(AnsibleError,
"Unable to load the facts cache plugin.*json.*",
FactCache)
class TestAbstractClass(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_subclass_error(self):
class CacheModule1(BaseCacheModule):
pass
with self.assertRaises(TypeError):
CacheModule1()
class CacheModule2(BaseCacheModule):
def get(self, key):
super(CacheModule2, self).get(key)
with self.assertRaises(TypeError):
CacheModule2()
def test_subclass_success(self):
class CacheModule3(BaseCacheModule):
def get(self, key):
super(CacheModule3, self).get(key)
def set(self, key, value):
super(CacheModule3, self).set(key, value)
def keys(self):
super(CacheModule3, self).keys()
def contains(self, key):
super(CacheModule3, self).contains(key)
def delete(self, key):
super(CacheModule3, self).delete(key)
def flush(self):
super(CacheModule3, self).flush()
def copy(self):
super(CacheModule3, self).copy()
self.assertIsInstance(CacheModule3(), CacheModule3)
@unittest.skipUnless(HAVE_MEMCACHED, 'python-memcached module not installed')
def test_memcached_cachemodule(self):
self.assertIsInstance(MemcachedCache(), MemcachedCache)
def test_memory_cachemodule(self):
self.assertIsInstance(MemoryCache(), MemoryCache)
@unittest.skipUnless(HAVE_REDIS, 'Redis python module not installed')
def test_redis_cachemodule(self):
self.assertIsInstance(RedisCache(), RedisCache)
|
gpl-3.0
|
gaddman/ansible
|
lib/ansible/modules/network/avi/avi_pool.py
|
29
|
20387
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_pool
author: Gaurav Rastogi (@grastogi23) <[email protected]>
short_description: Module for setup of Pool Avi RESTful Object
description:
- This module is used to configure Pool object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
a_pool:
description:
- Name of container cloud application that constitutes a pool in a a-b pool configuration, if different from vs app.
ab_pool:
description:
- A/b pool configuration.
ab_priority:
description:
- Priority of this pool in a a-b pool pair.
- Internally used.
apic_epg_name:
description:
- Synchronize cisco apic epg members with pool servers.
application_persistence_profile_ref:
description:
- Persistence will ensure the same user sticks to the same server for a desired duration of time.
- It is a reference to an object of type applicationpersistenceprofile.
autoscale_launch_config_ref:
description:
- If configured then avi will trigger orchestration of pool server creation and deletion.
- It is only supported for container clouds like mesos, opensift, kubernates, docker etc.
- It is a reference to an object of type autoscalelaunchconfig.
autoscale_networks:
description:
- Network ids for the launch configuration.
autoscale_policy_ref:
description:
- Reference to server autoscale policy.
- It is a reference to an object of type serverautoscalepolicy.
capacity_estimation:
description:
- Inline estimation of capacity of servers.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
capacity_estimation_ttfb_thresh:
description:
- The maximum time-to-first-byte of a server.
- Allowed values are 1-5000.
- Special values are 0 - 'automatic'.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
- Units(MILLISECONDS).
cloud_config_cksum:
description:
- Checksum of cloud configuration for pool.
- Internally set by cloud connector.
cloud_ref:
description:
- It is a reference to an object of type cloud.
connection_ramp_duration:
description:
- Duration for which new connections will be gradually ramped up to a server recently brought online.
- Useful for lb algorithms that are least connection based.
- Allowed values are 1-300.
- Special values are 0 - 'immediate'.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
- Units(MIN).
created_by:
description:
- Creator name.
default_server_port:
description:
- Traffic sent to servers will use this destination server port unless overridden by the server's specific port attribute.
- The ssl checkbox enables avi to server encryption.
- Allowed values are 1-65535.
- Default value when not specified in API or module is interpreted by Avi Controller as 80.
description:
description:
- A description of the pool.
domain_name:
description:
- Comma separated list of domain names which will be used to verify the common names or subject alternative names presented by server certificates.
- It is performed only when common name check host_check_enabled is enabled.
east_west:
description:
- Inherited config from virtualservice.
type: bool
enabled:
description:
- Enable or disable the pool.
- Disabling will terminate all open connections and pause health monitors.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
external_autoscale_groups:
description:
- Names of external auto-scale groups for pool servers.
- Currently available only for aws and azure.
- Field introduced in 17.1.2.
fail_action:
description:
- Enable an action - close connection, http redirect or local http response - when a pool failure happens.
- By default, a connection will be closed, in case the pool experiences a failure.
fewest_tasks_feedback_delay:
description:
- Periodicity of feedback for fewest tasks server selection algorithm.
- Allowed values are 1-300.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
- Units(SEC).
graceful_disable_timeout:
description:
- Used to gracefully disable a server.
- Virtual service waits for the specified time before terminating the existing connections to the servers that are disabled.
- Allowed values are 1-7200.
- Special values are 0 - 'immediate', -1 - 'infinite'.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
- Units(MIN).
gslb_sp_enabled:
description:
- Indicates if the pool is a site-persistence pool.
- Field introduced in 17.2.1.
version_added: "2.5"
type: bool
health_monitor_refs:
description:
- Verify server health by applying one or more health monitors.
- Active monitors generate synthetic traffic from each service engine and mark a server up or down based on the response.
- The passive monitor listens only to client to server communication.
- It raises or lowers the ratio of traffic destined to a server based on successful responses.
- It is a reference to an object of type healthmonitor.
host_check_enabled:
description:
- Enable common name check for server certificate.
- If enabled and no explicit domain name is specified, avi will use the incoming host header to do the match.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
inline_health_monitor:
description:
- The passive monitor will monitor client to server connections and requests and adjust traffic load to servers based on successful responses.
- This may alter the expected behavior of the lb method, such as round robin.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
ipaddrgroup_ref:
description:
- Use list of servers from ip address group.
- It is a reference to an object of type ipaddrgroup.
lb_algorithm:
description:
- The load balancing algorithm will pick a server within the pool's list of available servers.
- Enum options - LB_ALGORITHM_LEAST_CONNECTIONS, LB_ALGORITHM_ROUND_ROBIN, LB_ALGORITHM_FASTEST_RESPONSE, LB_ALGORITHM_CONSISTENT_HASH,
- LB_ALGORITHM_LEAST_LOAD, LB_ALGORITHM_FEWEST_SERVERS, LB_ALGORITHM_RANDOM, LB_ALGORITHM_FEWEST_TASKS, LB_ALGORITHM_NEAREST_SERVER,
- LB_ALGORITHM_CORE_AFFINITY.
- Default value when not specified in API or module is interpreted by Avi Controller as LB_ALGORITHM_LEAST_CONNECTIONS.
lb_algorithm_consistent_hash_hdr:
description:
- Http header name to be used for the hash key.
lb_algorithm_core_nonaffinity:
description:
- Degree of non-affinity for core afffinity based server selection.
- Allowed values are 1-65535.
- Field introduced in 17.1.3.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.
version_added: "2.4"
lb_algorithm_hash:
description:
- Criteria used as a key for determining the hash between the client and server.
- Enum options - LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS, LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS_AND_PORT,
- LB_ALGORITHM_CONSISTENT_HASH_URI, LB_ALGORITHM_CONSISTENT_HASH_CUSTOM_HEADER, LB_ALGORITHM_CONSISTENT_HASH_CUSTOM_STRING.
- Default value when not specified in API or module is interpreted by Avi Controller as LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS.
lookup_server_by_name:
description:
- Allow server lookup by name.
- Field introduced in 17.1.11,17.2.4.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.5"
type: bool
max_concurrent_connections_per_server:
description:
- The maximum number of concurrent connections allowed to each server within the pool.
- Note applied value will be no less than the number of service engines that the pool is placed on.
- If set to 0, no limit is applied.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
max_conn_rate_per_server:
description:
- Rate limit connections to each server.
name:
description:
- The name of the pool.
required: true
networks:
description:
- (internal-use) networks designated as containing servers for this pool.
- The servers may be further narrowed down by a filter.
- This field is used internally by avi, not editable by the user.
nsx_securitygroup:
description:
- A list of nsx service groups where the servers for the pool are created.
- Field introduced in 17.1.1.
pki_profile_ref:
description:
- Avi will validate the ssl certificate present by a server against the selected pki profile.
- It is a reference to an object of type pkiprofile.
placement_networks:
description:
- Manually select the networks and subnets used to provide reachability to the pool's servers.
- Specify the subnet using the following syntax 10-1-1-0/24.
- Use static routes in vrf configuration when pool servers are not directly connected butroutable from the service engine.
prst_hdr_name:
description:
- Header name for custom header persistence.
request_queue_depth:
description:
- Minimum number of requests to be queued when pool is full.
- Default value when not specified in API or module is interpreted by Avi Controller as 128.
request_queue_enabled:
description:
- Enable request queue when pool is full.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
rewrite_host_header_to_server_name:
description:
- Rewrite incoming host header to server name of the server to which the request is proxied.
- Enabling this feature rewrites host header for requests to all servers in the pool.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
rewrite_host_header_to_sni:
description:
- If sni server name is specified, rewrite incoming host header to the sni server name.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
server_auto_scale:
description:
- Server autoscale.
- Not used anymore.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
server_count:
description:
- Number of server_count.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
server_name:
description:
- Fully qualified dns hostname which will be used in the tls sni extension in server connections if sni is enabled.
- If no value is specified, avi will use the incoming host header instead.
server_reselect:
description:
- Server reselect configuration for http requests.
servers:
description:
- The pool directs load balanced traffic to this list of destination servers.
- The servers can be configured by ip address, name, network or via ip address group.
sni_enabled:
description:
- Enable tls sni for server connections.
- If disabled, avi will not send the sni extension as part of the handshake.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
ssl_key_and_certificate_ref:
description:
- Service engines will present a client ssl certificate to the server.
- It is a reference to an object of type sslkeyandcertificate.
ssl_profile_ref:
description:
- When enabled, avi re-encrypts traffic to the backend servers.
- The specific ssl profile defines which ciphers and ssl versions will be supported.
- It is a reference to an object of type sslprofile.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
use_service_port:
description:
- Do not translate the client's destination port when sending the connection to the server.
- The pool or servers specified service port will still be used for health monitoring.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
uuid:
description:
- Uuid of the pool.
vrf_ref:
description:
- Virtual routing context that the pool is bound to.
- This is used to provide the isolation of the set of networks the pool is attached to.
- The pool inherits the virtual routing conext of the virtual service, and this field is used only internally, and is set by pb-transform.
- It is a reference to an object of type vrfcontext.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create a Pool with two servers and HTTP monitor
avi_pool:
controller: 10.10.1.20
username: avi_user
password: avi_password
name: testpool1
description: testpool1
state: present
health_monitor_refs:
- '/api/healthmonitor?name=System-HTTP'
servers:
- ip:
addr: 10.10.2.20
type: V4
- ip:
addr: 10.10.2.21
type: V4
- name: Patch pool with a single server using patch op and avi_credentials
avi_pool:
avi_api_update_method: patch
avi_api_patch_op: delete
avi_credentials: "{{avi_credentials}}"
name: test-pool
servers:
- ip:
addr: 10.90.64.13
type: 'V4'
register: pool
when:
- state | default("present") == "present"
"""
RETURN = '''
obj:
description: Pool (api/pool) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
a_pool=dict(type='str',),
ab_pool=dict(type='dict',),
ab_priority=dict(type='int',),
apic_epg_name=dict(type='str',),
application_persistence_profile_ref=dict(type='str',),
autoscale_launch_config_ref=dict(type='str',),
autoscale_networks=dict(type='list',),
autoscale_policy_ref=dict(type='str',),
capacity_estimation=dict(type='bool',),
capacity_estimation_ttfb_thresh=dict(type='int',),
cloud_config_cksum=dict(type='str',),
cloud_ref=dict(type='str',),
connection_ramp_duration=dict(type='int',),
created_by=dict(type='str',),
default_server_port=dict(type='int',),
description=dict(type='str',),
domain_name=dict(type='list',),
east_west=dict(type='bool',),
enabled=dict(type='bool',),
external_autoscale_groups=dict(type='list',),
fail_action=dict(type='dict',),
fewest_tasks_feedback_delay=dict(type='int',),
graceful_disable_timeout=dict(type='int',),
gslb_sp_enabled=dict(type='bool',),
health_monitor_refs=dict(type='list',),
host_check_enabled=dict(type='bool',),
inline_health_monitor=dict(type='bool',),
ipaddrgroup_ref=dict(type='str',),
lb_algorithm=dict(type='str',),
lb_algorithm_consistent_hash_hdr=dict(type='str',),
lb_algorithm_core_nonaffinity=dict(type='int',),
lb_algorithm_hash=dict(type='str',),
lookup_server_by_name=dict(type='bool',),
max_concurrent_connections_per_server=dict(type='int',),
max_conn_rate_per_server=dict(type='dict',),
name=dict(type='str', required=True),
networks=dict(type='list',),
nsx_securitygroup=dict(type='list',),
pki_profile_ref=dict(type='str',),
placement_networks=dict(type='list',),
prst_hdr_name=dict(type='str',),
request_queue_depth=dict(type='int',),
request_queue_enabled=dict(type='bool',),
rewrite_host_header_to_server_name=dict(type='bool',),
rewrite_host_header_to_sni=dict(type='bool',),
server_auto_scale=dict(type='bool',),
server_count=dict(type='int',),
server_name=dict(type='str',),
server_reselect=dict(type='dict',),
servers=dict(type='list',),
sni_enabled=dict(type='bool',),
ssl_key_and_certificate_ref=dict(type='str',),
ssl_profile_ref=dict(type='str',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
use_service_port=dict(type='bool',),
uuid=dict(type='str',),
vrf_ref=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'pool',
set([]))
if __name__ == '__main__':
main()
|
gpl-3.0
|
SCOAP3/invenio
|
invenio/legacy/bibdocfile/plugins/bom_textdoc.py
|
13
|
6894
|
# This file is part of Invenio.
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibObject Module providing BibObject prividing features for documents containing text (not necessarily as the main part of the content)"""
import os
import re
from datetime import datetime
from invenio.config import CFG_BIBINDEX_PERFORM_OCR_ON_DOCNAMES
from invenio.legacy.bibdocfile.api import BibDoc, InvenioBibDocFileError
from invenio.legacy.dbquery import run_sql
from invenio.ext.logging import register_exception
_RE_PERFORM_OCR = re.compile(CFG_BIBINDEX_PERFORM_OCR_ON_DOCNAMES)
class BibTextDoc(BibDoc):
def get_text(self, version=None):
"""
@param version: the requested version. If not set, the latest version
will be used.
@type version: integer
@return: the textual content corresponding to the specified version
of the document.
@rtype: string
"""
if version is None:
version = self.get_latest_version()
if self.has_text(version):
return open(os.path.join(self.basedir, '.text;%i' % version)).read()
else:
return ""
def is_ocr_required(self):
"""
Return True if this document require OCR in order to extract text from it.
"""
for bibrec_link in self.bibrec_links:
if _RE_PERFORM_OCR.match(bibrec_link['docname']):
return True
return False
def get_text_path(self, version=None):
"""
@param version: the requested version. If not set, the latest version
will be used.
@type version: int
@return: the full path to the textual content corresponding to the specified version
of the document.
@rtype: string
"""
if version is None:
version = self.get_latest_version()
if self.has_text(version):
return os.path.join(self.basedir, '.text;%i' % version)
else:
return ""
def extract_text(self, version=None, perform_ocr=False, ln='en'):
"""
Try what is necessary to extract the textual information of a document.
@param version: the version of the document for which text is required.
If not specified the text will be retrieved from the last version.
@type version: integer
@param perform_ocr: whether to perform OCR.
@type perform_ocr: bool
@param ln: a two letter language code to give as a hint to the OCR
procedure.
@type ln: string
@raise InvenioBibDocFileError: in case of error.
@note: the text is extracted and cached for later use. Use L{get_text}
to retrieve it.
"""
from invenio.legacy.websubmit.file_converter import get_best_format_to_extract_text_from, convert_file, InvenioWebSubmitFileConverterError
if version is None:
version = self.get_latest_version()
docfiles = self.list_version_files(version)
## We try to extract text only from original or OCRed documents.
filenames = [docfile.get_full_path() for docfile in docfiles if 'CONVERTED' not in docfile.flags or 'OCRED' in docfile.flags]
try:
filename = get_best_format_to_extract_text_from(filenames)
except InvenioWebSubmitFileConverterError:
## We fall back on considering all the documents
filenames = [docfile.get_full_path() for docfile in docfiles]
try:
filename = get_best_format_to_extract_text_from(filenames)
except InvenioWebSubmitFileConverterError:
open(os.path.join(self.basedir, '.text;%i' % version), 'w').write('')
return
try:
convert_file(filename, os.path.join(self.basedir, '.text;%i' % version), '.txt', perform_ocr=perform_ocr, ln=ln)
if version == self.get_latest_version():
run_sql("UPDATE bibdoc SET text_extraction_date=NOW() WHERE id=%s", (self.id, ))
except InvenioWebSubmitFileConverterError as e:
register_exception(alert_admin=True, prefix="Error in extracting text from bibdoc %i, version %i" % (self.id, version))
raise InvenioBibDocFileError, str(e)
def pdf_a_p(self):
"""
@return: True if this document contains a PDF in PDF/A format.
@rtype: bool"""
return self.has_flag('PDF/A', 'pdf')
def has_text(self, require_up_to_date=False, version=None):
"""
Return True if the text of this document has already been extracted.
@param require_up_to_date: if True check the text was actually
extracted after the most recent format of the given version.
@type require_up_to_date: bool
@param version: a version for which the text should have been
extracted. If not specified the latest version is considered.
@type version: integer
@return: True if the text has already been extracted.
@rtype: bool
"""
if version is None:
version = self.get_latest_version()
if os.path.exists(os.path.join(self.basedir, '.text;%i' % version)):
if not require_up_to_date:
return True
else:
docfiles = self.list_version_files(version)
text_md = datetime.fromtimestamp(os.path.getmtime(os.path.join(self.basedir, '.text;%i' % version)))
for docfile in docfiles:
if text_md <= docfile.md:
return False
return True
return False
def __repr__(self):
return 'BibTextDoc(%s, %s, %s)' % (repr(self.id), repr(self.doctype), repr(self.human_readable))
def supports(doctype, extensions):
return doctype == "Fulltext" or reduce(lambda x, y: x or y.startswith(".pdf") or y.startswith(".ps") , extensions, False)
def create_instance(docid=None, doctype='Main', human_readable=False, # pylint: disable=W0613
initial_data = None):
return BibTextDoc(docid=docid, human_readable=human_readable,
initial_data = initial_data)
|
gpl-2.0
|
dannyperry571/theapprentice
|
script.module.youtube.dl/lib/youtube_dl/extractor/thisav.py
|
32
|
2527
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import remove_end
class ThisAVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?thisav\.com/video/(?P<id>[0-9]+)/.*'
_TESTS = [{
# jwplayer
'url': 'http://www.thisav.com/video/47734/%98%26sup1%3B%83%9E%83%82---just-fit.html',
'md5': '0480f1ef3932d901f0e0e719f188f19b',
'info_dict': {
'id': '47734',
'ext': 'flv',
'title': '高樹マリア - Just fit',
'uploader': 'dj7970',
'uploader_id': 'dj7970'
}
}, {
# html5 media
'url': 'http://www.thisav.com/video/242352/nerdy-18yo-big-ass-tattoos-and-glasses.html',
'md5': 'ba90c076bd0f80203679e5b60bf523ee',
'info_dict': {
'id': '242352',
'ext': 'mp4',
'title': 'Nerdy 18yo Big Ass Tattoos and Glasses',
'uploader': 'cybersluts',
'uploader_id': 'cybersluts',
},
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
title = remove_end(self._html_search_regex(
r'<title>([^<]+)</title>', webpage, 'title'),
' - 視頻 - ThisAV.com-世界第一中文成人娛樂網站')
video_url = self._html_search_regex(
r"addVariable\('file','([^']+)'\);", webpage, 'video url', default=None)
if video_url:
info_dict = {
'formats': [{
'url': video_url,
}],
}
else:
entries = self._parse_html5_media_entries(url, webpage, video_id)
if entries:
info_dict = entries[0]
else:
info_dict = self._extract_jwplayer_data(
webpage, video_id, require_title=False)
uploader = self._html_search_regex(
r': <a href="http://www.thisav.com/user/[0-9]+/(?:[^"]+)">([^<]+)</a>',
webpage, 'uploader name', fatal=False)
uploader_id = self._html_search_regex(
r': <a href="http://www.thisav.com/user/[0-9]+/([^"]+)">(?:[^<]+)</a>',
webpage, 'uploader id', fatal=False)
info_dict.update({
'id': video_id,
'uploader': uploader,
'uploader_id': uploader_id,
'title': title,
})
return info_dict
|
gpl-2.0
|
indashnet/InDashNet.Open.UN2000
|
android/external/chromium_org/third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/memorizingfile.py
|
680
|
3709
|
#!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Memorizing file.
A memorizing file wraps a file and memorizes lines read by readline.
"""
import sys
class MemorizingFile(object):
"""MemorizingFile wraps a file and memorizes lines read by readline.
Note that data read by other methods are not memorized. This behavior
is good enough for memorizing lines SimpleHTTPServer reads before
the control reaches WebSocketRequestHandler.
"""
def __init__(self, file_, max_memorized_lines=sys.maxint):
"""Construct an instance.
Args:
file_: the file object to wrap.
max_memorized_lines: the maximum number of lines to memorize.
Only the first max_memorized_lines are memorized.
Default: sys.maxint.
"""
self._file = file_
self._memorized_lines = []
self._max_memorized_lines = max_memorized_lines
self._buffered = False
self._buffered_line = None
def __getattribute__(self, name):
if name in ('_file', '_memorized_lines', '_max_memorized_lines',
'_buffered', '_buffered_line', 'readline',
'get_memorized_lines'):
return object.__getattribute__(self, name)
return self._file.__getattribute__(name)
def readline(self, size=-1):
"""Override file.readline and memorize the line read.
Note that even if size is specified and smaller than actual size,
the whole line will be read out from underlying file object by
subsequent readline calls.
"""
if self._buffered:
line = self._buffered_line
self._buffered = False
else:
line = self._file.readline()
if line and len(self._memorized_lines) < self._max_memorized_lines:
self._memorized_lines.append(line)
if size >= 0 and size < len(line):
self._buffered = True
self._buffered_line = line[size:]
return line[:size]
return line
def get_memorized_lines(self):
"""Get lines memorized so far."""
return self._memorized_lines
# vi:sts=4 sw=4 et
|
apache-2.0
|
JinXinDeep/tensorflow
|
tensorflow/python/ops/numerics.py
|
11
|
2734
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Connects all float and double tensors to CheckNumericsOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
def verify_tensor_all_finite(t, msg, name=None):
"""Assert that the tensor does not contain any NaN's or Inf's.
Args:
t: Tensor to check.
msg: Message to log on failure.
name: A name for this operation (optional).
Returns:
Same tensor as `t`.
"""
with ops.op_scope([t], name, "VerifyFinite") as name:
t = ops.convert_to_tensor(t, name="t")
with ops.device(t.device or t.graph.get_default_device()):
verify_input = array_ops.check_numerics(t, message=msg)
out = control_flow_ops.with_dependencies([verify_input], t)
return out
def add_check_numerics_ops():
"""Connect a `check_numerics` to every floating point tensor.
`check_numerics` operations themselves are added for each `float` or `double`
tensor in the graph. For all ops in the graph, the `check_numerics` op for
all of its (`float` or `double`) inputs is guaranteed to run before the
`check_numerics` op on any of its outputs.
Returns:
A `group` op depending on all `check_numerics` ops added.
"""
check_op = []
# This code relies on the ordering of ops in get_operations().
# The consumer of a tensor always comes before that tensor's producer in
# this list. This is true because get_operations() returns ops in the order
# added, and ops can only be added once its inputs are added.
for op in ops.get_default_graph().get_operations():
for output in op.outputs:
if output.dtype in [dtypes.float32, dtypes.float64]:
message = op.name + ":" + str(output.value_index)
with ops.control_dependencies(check_op):
check_op = [array_ops.check_numerics(output, message=message)]
return control_flow_ops.group(*check_op)
|
apache-2.0
|
Laharah/deluge-FileBotTool
|
filebottool/gtkui/config_ui_gtk3.py
|
1
|
9943
|
from __future__ import absolute_import
__author__ = 'laharah'
from gi.repository import Gtk
import os
import time
import webbrowser
from twisted.internet import defer
from deluge.ui.client import client
import deluge.component as component
from filebottool.common import get_resource
from filebottool.common import LOG
from filebottool.gtkui.common_gtk3 import EditableList
from filebottool.gtkui.handler_editor_gtk3 import HandlerEditor
import filebottool.auto_sort
from . import user_messenger_gtk3 as user_messenger
SORT_OPERATORS = list(filebottool.auto_sort.OPERATOR_MAP.keys())
VALID_FIELDS = filebottool.auto_sort.VALID_FIELDS
FilterRule = filebottool.auto_sort.FilterRule
log = LOG
class ConfigUI(object):
"""handles the UI portion of getting and setting preferences"""
def __init__(self, settings=None):
self.builder = Gtk.Builder()
self.builder.add_from_file(get_resource("config.ui"))
self.config_page = self.builder.get_object("prefs_box")
self.pref_dialog = component.get("Preferences").pref_dialog
fb_icon = self.builder.get_object("fb_icon")
image = get_resource("fb_icon16.png")
fb_icon.set_from_file(image)
model = Gtk.ListStore(str)
view = self.builder.get_object('saved_handlers_listview')
renderer = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("Profile Name", renderer, text=0)
view.append_column(column)
model.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.handlers_list = EditableList(view, model)
model = Gtk.ListStore(str, str, str, str)
view = self.builder.get_object('rule_listview')
options = [
("Field:", VALID_FIELDS),
("Comparison Operator:", SORT_OPERATORS),
]
for col_index, tup in enumerate(options):
name, items = tup
combo_model = Gtk.ListStore(str)
for item in items:
combo_model.append([item])
cb = build_combo_renderer_cb(model, col_index, items)
renderer = build_combo_cellrenderer(combo_model, cb)
column = Gtk.TreeViewColumn(name, renderer, text=col_index)
view.append_column(column)
renderer = Gtk.CellRendererText()
renderer.set_property("editable", True)
def text_edited(widget, path, text):
model[path][2] = text
renderer.connect("edited", text_edited)
column = Gtk.TreeViewColumn("Pattern to Match:", renderer, text=2)
view.append_column(column)
self.rules_list = EditableList(view, model)
self.builder.connect_signals({
"on_add_handler": self.on_add_handler,
"on_remove_handler": self.handlers_list.remove,
"on_edit_handler": self.on_edit_handler,
"on_move_rule_up": self.rules_list.move_up,
"on_move_rule_down": self.rules_list.move_down,
"on_remove_rule": self.rules_list.remove,
"on_add_rule": self.on_add_rule,
"on_auto_sort_help_clicked": self.on_auto_sort_help_clicked,
"on_debug_button_clicked": self.on_debug_button_clicked,
"on_license_button_clicked": self.on_license_button_clicked,
})
self.gather_time = None
if settings:
self.populate_settings(settings)
def populate_settings(self, settings):
"""populates the UI widgets with the given settings"""
# workaround for new settings being overwritten by previous settings
if self.gather_time:
if time.time() - self.gather_time < 1:
return
self.config = settings
self.saved_handlers = settings["saved_handlers"]
self.handlers_list.clear()
for name in self.saved_handlers:
self.handlers_list.add([name])
rules = settings["auto_sort_rules"]
if len(self.rules_list.view.get_columns()) == 4: # force refresh
self.rules_list.view.remove_column(self.rules_list.view.get_column(3))
self.rule_handler_combo = build_combo_cellrenderer(
self.handlers_list.model, self.on_rule_handler_combo_changed)
column_name = "Profile to Use:"
column = Gtk.TreeViewColumn(column_name, self.rule_handler_combo, text=3)
self.rules_list.view.append_column(column)
self.rules_list.clear()
for rule in rules:
self.rules_list.add(rule[1:])
for column in self.rules_list.view.get_columns():
column.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
column.set_resizable(True)
if not rules:
for column in self.rules_list.view.get_columns():
column.set_expand(True)
def gather_settings(self):
"""
Updates the given config dictionary and updates the appropriate
settings.
"""
self.gather_time = time.time()
handlers = {}
for row in self.handlers_list.get_data():
handlers[row[0]] = self.saved_handlers[row[0]]
self.saved_handlers = handlers
self.config["saved_handlers"] = self.saved_handlers
rules = []
log.debug(self.rules_list.get_data())
for index, row in enumerate(self.rules_list.get_data()):
field, op, pat, handler = row
rules.append([index, field, op, pat, handler])
self.config['auto_sort_rules'] = rules
return self.config
#########
# Section: signal handlers
#########
def on_add_handler(self, widget):
def new_handler_cb(id, handlers):
self.handlers_list.add([id])
self.saved_handlers = handlers
log.debug(self.saved_handlers)
HandlerEditor(handlers=self.saved_handlers, cb=new_handler_cb,
parent=self.pref_dialog)
def on_edit_handler(self, widget):
handler_name = self.handlers_list.get_row()[0]
def edited_cb(id, handlers):
self.saved_handlers = handlers
if id != handler_name:
del self.saved_handlers[handler_name]
self.handlers_list.clear()
for name in self.saved_handlers:
self.handlers_list.add([name])
HandlerEditor(handlers=self.saved_handlers, initial=handler_name,
cb=edited_cb, parent=self.pref_dialog)
def on_add_rule(self, *args):
self.rules_list.add(['', "is exactly", '', ''])
path = self.rules_list.model.get_string_from_iter(self.rules_list.model[-1].iter)
self.rules_list.view.set_cursor(path)
def on_rule_handler_combo_changed(self, widget, path, text):
self.rules_list.model[path][3] = text
def on_auto_sort_help_clicked(self, *args):
webbrowser.open('https://github.com/Laharah/deluge-FileBotTool/wiki/Auto-Sorting',
new=2)
@defer.inlineCallbacks
def on_debug_button_clicked(self, button):
log.debug("Sending request for FileBot debug info...")
button.set_sensitive(False)
info = yield client.filebottool.get_filebot_debug()
log.debug("Displaying debug info")
dialog = user_messenger.UserMessenger()
dialog.display_text("Filebot Debug Info", info)
button.set_sensitive(True)
@defer.inlineCallbacks
def on_license_button_clicked(self, button):
log.debug("License button clicked.")
chooser = Gtk.FileChooserDialog(_("Choose your FileBot license file"),
None,
Gtk.FileChooserAction.OPEN,
buttons=(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_OPEN,
Gtk.ResponseType.OK))
chooser.set_transient_for(self.pref_dialog)
chooser.set_property("skip-taskbar-hint", True)
chooser.set_local_only(False)
file_filter = Gtk.FileFilter()
file_filter.set_name(_("FileBot license files"))
file_filter.add_pattern("*." + "psm")
chooser.add_filter(file_filter)
file_filter = Gtk.FileFilter()
file_filter.set_name(_("All files"))
file_filter.add_pattern("*")
chooser.add_filter(file_filter)
# Run the dialog
response = chooser.run()
if response == Gtk.ResponseType.OK:
license = chooser.get_filenames()[0]
else:
chooser.destroy()
return
chooser.destroy()
# License file should definetly be under 10K
size = os.stat(license).st_size
if size > 10*1000:
e = user_messenger.InfoDialog("Error", "License file is too big.")
e.resize(220, 125)
e.run_async()
defer.returnValue()
with open(license, 'rb') as l:
license_data = l.read()
log.debug("Sending license data to server.")
result = yield client.filebottool.activate_filebot_license(license_data)
log.debug("Recieved reply from server: %s", result)
if result.startswith("FilebotLicenseError: "):
title = "Error with License File"
msg = result[21:]
else:
title = "Success!"
msg = result
dialog = user_messenger.InfoDialog(title, msg)
dialog.resize(220, 125)
dialog.run_async()
#########
# Section: Utilities
#########
def build_combo_renderer_cb(list_store, column_number, allowed=None):
def cb(widget, path, text):
if allowed:
if text not in allowed:
return
log.debug('{0} {1} {2}'.format(widget, path, text))
list_store[path][column_number] = text
return cb
def build_combo_cellrenderer(model, cb):
renderer = Gtk.CellRendererCombo()
if model:
renderer.set_property("model", model)
renderer.set_property("editable", True)
renderer.set_property("text-column", 0)
renderer.connect("edited", cb)
return renderer
|
gpl-3.0
|
jank3/django
|
django/core/files/images.py
|
429
|
2428
|
"""
Utility functions for handling images.
Requires Pillow as you might imagine.
"""
import struct
import zlib
from django.core.files import File
class ImageFile(File):
"""
A mixin for use alongside django.core.files.base.File, which provides
additional features for dealing with images.
"""
def _get_width(self):
return self._get_image_dimensions()[0]
width = property(_get_width)
def _get_height(self):
return self._get_image_dimensions()[1]
height = property(_get_height)
def _get_image_dimensions(self):
if not hasattr(self, '_dimensions_cache'):
close = self.closed
self.open()
self._dimensions_cache = get_image_dimensions(self, close=close)
return self._dimensions_cache
def get_image_dimensions(file_or_path, close=False):
"""
Returns the (width, height) of an image, given an open file or a path. Set
'close' to True to close the file at the end if it is initially in an open
state.
"""
from PIL import ImageFile as PillowImageFile
p = PillowImageFile.Parser()
if hasattr(file_or_path, 'read'):
file = file_or_path
file_pos = file.tell()
file.seek(0)
else:
file = open(file_or_path, 'rb')
close = True
try:
# Most of the time Pillow only needs a small chunk to parse the image
# and get the dimensions, but with some TIFF files Pillow needs to
# parse the whole file.
chunk_size = 1024
while 1:
data = file.read(chunk_size)
if not data:
break
try:
p.feed(data)
except zlib.error as e:
# ignore zlib complaining on truncated stream, just feed more
# data to parser (ticket #19457).
if e.args[0].startswith("Error -5"):
pass
else:
raise
except struct.error:
# Ignore PIL failing on a too short buffer when reads return
# less bytes than expected. Skip and feed more data to the
# parser (ticket #24544).
pass
if p.image:
return p.image.size
chunk_size *= 2
return (None, None)
finally:
if close:
file.close()
else:
file.seek(file_pos)
|
bsd-3-clause
|
rmfitzpatrick/ansible
|
test/units/modules/packaging/os/test_apk.py
|
137
|
1157
|
from ansible.compat.tests import mock
from ansible.compat.tests import unittest
from ansible.modules.packaging.os import apk
class TestApkQueryLatest(unittest.TestCase):
def setUp(self):
self.module_names = [
'bash',
'g++',
]
@mock.patch('ansible.modules.packaging.os.apk.AnsibleModule')
def test_not_latest(self, mock_module):
apk.APK_PATH = ""
for module_name in self.module_names:
command_output = module_name + '-2.0.0-r1 < 3.0.0-r2 '
mock_module.run_command.return_value = (0, command_output, None)
command_result = apk.query_latest(mock_module, module_name)
self.assertFalse(command_result)
@mock.patch('ansible.modules.packaging.os.apk.AnsibleModule')
def test_latest(self, mock_module):
apk.APK_PATH = ""
for module_name in self.module_names:
command_output = module_name + '-2.0.0-r1 = 2.0.0-r1 '
mock_module.run_command.return_value = (0, command_output, None)
command_result = apk.query_latest(mock_module, module_name)
self.assertTrue(command_result)
|
gpl-3.0
|
kaedroho/django
|
django/db/migrations/operations/utils.py
|
2
|
3791
|
from collections import namedtuple
from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT
def resolve_relation(model, app_label=None, model_name=None):
"""
Turn a model class or model reference string and return a model tuple.
app_label and model_name are used to resolve the scope of recursive and
unscoped model relationship.
"""
if isinstance(model, str):
if model == RECURSIVE_RELATIONSHIP_CONSTANT:
if app_label is None or model_name is None:
raise TypeError(
'app_label and model_name must be provided to resolve '
'recursive relationships.'
)
return app_label, model_name
if '.' in model:
return tuple(model.lower().split('.', 1))
if app_label is None:
raise TypeError(
'app_label must be provided to resolve unscoped model '
'relationships.'
)
return app_label, model.lower()
return model._meta.app_label, model._meta.model_name
FieldReference = namedtuple('FieldReference', 'to through')
def field_references(
model_tuple,
field,
reference_model_tuple,
reference_field_name=None,
reference_field=None,
):
"""
Return either False or a FieldReference if `field` references provided
context.
False positives can be returned if `reference_field_name` is provided
without `reference_field` because of the introspection limitation it
incurs. This should not be an issue when this function is used to determine
whether or not an optimization can take place.
"""
remote_field = field.remote_field
if not remote_field:
return False
references_to = None
references_through = None
if resolve_relation(remote_field.model, *model_tuple) == reference_model_tuple:
to_fields = getattr(field, 'to_fields', None)
if (
reference_field_name is None or
# Unspecified to_field(s).
to_fields is None or
# Reference to primary key.
(None in to_fields and (reference_field is None or reference_field.primary_key)) or
# Reference to field.
reference_field_name in to_fields
):
references_to = (remote_field, to_fields)
through = getattr(remote_field, 'through', None)
if through and resolve_relation(through, *model_tuple) == reference_model_tuple:
through_fields = remote_field.through_fields
if (
reference_field_name is None or
# Unspecified through_fields.
through_fields is None or
# Reference to field.
reference_field_name in through_fields
):
references_through = (remote_field, through_fields)
if not (references_to or references_through):
return False
return FieldReference(references_to, references_through)
def get_references(state, model_tuple, field_tuple=()):
"""
Generator of (model_state, index, name, field, reference) referencing
provided context.
If field_tuple is provided only references to this particular field of
model_tuple will be generated.
"""
for state_model_tuple, model_state in state.models.items():
for index, (name, field) in enumerate(model_state.fields):
reference = field_references(state_model_tuple, field, model_tuple, *field_tuple)
if reference:
yield model_state, index, name, field, reference
def field_is_referenced(state, model_tuple, field_tuple):
"""Return whether `field_tuple` is referenced by any state models."""
return next(get_references(state, model_tuple, field_tuple), None) is not None
|
bsd-3-clause
|
wdwvt1/qiime
|
scripts/merge_otu_tables.py
|
15
|
2246
|
#!/usr/bin/env python
# File created on 30 Aug 2010
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Greg Caporaso", "Daniel McDonald"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "[email protected]"
from biom import load_table
from qiime.util import (parse_command_line_parameters, make_option,
write_biom_table, get_options_lookup)
options_lookup = get_options_lookup()
script_info = {}
script_info['brief_description'] = ("Merge two or more OTU tables into a "
"single OTU table.")
script_info['script_description'] = """This script merges two or more OTU tables into a single OTU table. This is useful, for example, when you've created several reference-based OTU tables for different analyses and need to combine them for a larger analysis.
Requirements: It is also very important that your OTUs are consistent across the different OTU tables. For example, you cannot safely merge OTU tables from two independent de novo OTU picking runs. Finally, either all or none of the OTU tables can contain taxonomic information: you can't merge some OTU tables with taxonomic data and some without taxonomic data."""
script_info['script_usage'] = [
("",
"Merge two OTU tables into a single OTU table",
"%prog -i otu_table1.biom,otu_table2.biom -o merged_otu_table.biom")]
script_info['output_description'] = ""
script_info['required_options'] = [
# Example required option
make_option('-i', '--input_fps', type='existing_filepaths',
help='the otu tables in biom format (comma-separated)'),
make_option('-o', '--output_fp', type='new_filepath',
help='the output otu table filepath'),
]
script_info['optional_options'] = []
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
input_fps = opts.input_fps
master = load_table(input_fps[0])
for input_fp in input_fps[1:]:
master = master.merge(load_table(input_fp))
write_biom_table(master, opts.output_fp)
if __name__ == "__main__":
main()
|
gpl-2.0
|
tst-eclamar/earthenterprise
|
earth_enterprise/src/server/wsgi/search/plugin/example_search_handler.py
|
5
|
14884
|
#!/usr/bin/env python2.7
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for implementing the Example search."""
import cgi
import os
from string import Template
import psycopg2
from psycopg2.pool import ThreadedConnectionPool
from search.common import exceptions
from search.common import geconstants
from search.common import utils
class ExampleSearch(object):
"""Class for performing the Example search.
Example search is the neighborhood search that demonstrates
how to construct and query a spatial database based on URL
search string, extract geometries from the result, associate
various styles with them and return the response back to the client
Valid Inputs are:
q=pacific heights
neighborhood=pacific heights
"""
def __init__(self):
"""Inits ExampleSearch.
Initializes the logger "ge_search".
Initializes templates for kml, json, placemark templates
for the KML/JSONP output.
Initializes parameters for establishing a connection to the database.
"""
self.utils = utils.SearchUtils()
constants = geconstants.Constants()
configs = self.utils.GetConfigs(
os.path.join(geconstants.SEARCH_CONFIGS_DIR, "ExampleSearch.conf"))
style_template = self.utils.style_template
self._jsonp_call = self.utils.jsonp_functioncall
self._geom = """
<name>%s</name>
<styleUrl>%s</styleUrl>
<Snippet>%s</Snippet>
<description>%s</description>
%s\
"""
self._json_geom = """
{
"name": "%s",
"Snippet": "%s",
"description": "%s",
%s
}\
"""
self._placemark_template = self.utils.placemark_template
self._kml_template = self.utils.kml_template
self._json_template = self.utils.json_template
self._json_placemark_template = self.utils.json_placemark_template
self._example_query_template = (
Template(constants.example_query))
self.logger = self.utils.logger
self._user = configs.get("user")
self._hostname = configs.get("host")
self._port = configs.get("port")
self._database = configs.get("databasename")
if not self._database:
self._database = constants.defaults.get("example.database")
self._pool = ThreadedConnectionPool(
int(configs.get("minimumconnectionpoolsize")),
int(configs.get("maximumconnectionpoolsize")),
database=self._database,
user=self._user,
host=self._hostname,
port=int(self._port))
self._style = style_template.substitute(
balloonBgColor=configs.get("balloonstyle.bgcolor"),
balloonTextColor=configs.get("balloonstyle.textcolor"),
balloonText=configs.get("balloonstyle.text"),
iconStyleScale=configs.get("iconstyle.scale"),
iconStyleHref=configs.get("iconstyle.href"),
lineStyleColor=configs.get("linestyle.color"),
lineStyleWidth=configs.get("linestyle.width"),
polyStyleColor=configs.get("polystyle.color"),
polyStyleColorMode=configs.get("polystyle.colormode"),
polyStyleFill=configs.get("polystyle.fill"),
polyStyleOutline=configs.get("polystyle.outline"),
listStyleHref=configs.get("iconstyle.href"))
def RunPGSQLQuery(self, query, params):
"""Submits the query to the database and returns tuples.
Note: variables placeholder must always be %s in query.
Warning: NEVER use Python string concatenation (+) or string parameters
interpolation (%) to pass variables to a SQL query string.
e.g.
SELECT vs_url FROM vs_table WHERE vs_name = 'default_ge';
query = "SELECT vs_url FROM vs_table WHERE vs_name = %s"
parameters = ["default_ge"]
Args:
query: SQL SELECT statement.
params: sequence of parameters to populate into placeholders.
Returns:
Results as list of tuples (rows of fields).
Raises:
psycopg2.Error/Warning in case of error.
"""
con = None
cursor = None
query_results = []
query_status = False
self.logger.debug("Querying the database %s, at port %s, as user %s on"
"hostname %s" % (self._database, self._port, self._user,
self._hostname))
try:
con = self._pool.getconn()
if con:
cursor = con.cursor()
cursor.execute(query, params)
for row in cursor:
if len(row) == 1:
query_results.append(row[0])
else:
query_results.append(row)
query_status = True
except psycopg2.pool.PoolError as e:
self.logger.error("Exception while querying the database %s, %s",
self._database, e)
raise exceptions.PoolConnectionException(
"Pool Error - Unable to get a connection from the pool.")
except psycopg2.Error as e:
self.logger.error("Exception while querying the database %s, %s",
self._database, e)
finally:
if con:
self._pool.putconn(con)
return query_status, query_results
def RunExampleSearch(self, search_query, response_type):
"""Performs a query search on the 'san_francisco_neighborhoods' table.
Args:
search_query: the query to be searched, in smallcase.
response_type: Response type can be KML or JSONP, depending on the client
Returns:
tuple containing
total_example_results: Total number of rows returned from
querying the database.
example_results: Query results as a list
"""
example_results = []
params = ["%" + entry + "%" for entry in search_query.split(",")]
accum_func = self.utils.GetAccumFunc(response_type)
example_query = self._example_query_template.substitute(FUNC=accum_func)
query_status, query_results = self.RunPGSQLQuery(example_query, params)
total_example_results = len(query_results)
if query_status:
for entry in xrange(total_example_results):
results = {}
name = query_results[entry][4]
snippet = query_results[entry][3]
styleurl = "#placemark_label"
description = ("The total area in decimal degrees of " +
query_results[entry][4] + " is: " +
str(query_results[entry][1]) + "<![CDATA[<br/>]]>")
description += ("The total perimeter in decimal degrees of " +
query_results[entry][4] + " is: " +
str(query_results[entry][2]))
geom = str(query_results[entry][0])
results["name"] = name
results["snippet"] = snippet
results["styleurl"] = styleurl
results["description"] = description
results["geom"] = geom
results["geom_type"] = str(query_results[entry][5])
example_results.append(results)
return total_example_results, example_results
def ConstructKMLResponse(self, search_results, original_query):
"""Prepares KML response.
KML response has the below format:
<kml>
<Folder>
<name/>
<StyleURL>
---
</StyleURL>
<Point>
<coordinates/>
</Point>
</Folder>
</kml>
Args:
search_results: Query results from the searchexample database
original_query: Search query as entered by the user
Returns:
kml_response: KML formatted response
"""
search_placemarks = ""
kml_response = ""
lookat_info = ""
set_first_element_lookat = True
# folder name should include the query parameter(q) if 'displayKeys'
# is present in the URL otherwise not.
if self.display_keys_string:
folder_name = ("Grouped results:<![CDATA[<br/>]]>%s (%s)"
% (original_query, str(len(search_results))))
else:
folder_name = ("Grouped results:<![CDATA[<br/>]]> (%s)"
% (str(len(search_results))))
fly_to_first_element = str(self.fly_to_first_element).lower() == "true"
for result in search_results:
geom = ""
placemark = ""
geom = self._geom % (
result["name"],
result["styleurl"],
result["snippet"],
result["description"],
result["geom"])
# Add <LookAt> for POINT geometric types only.
# TODO: Check if <LookAt> can be added for
# LINESTRING and POLYGON types.
if result["geom_type"] != "POINT":
set_first_element_lookat = False
if fly_to_first_element and set_first_element_lookat:
lookat_info = self.utils.GetLookAtInfo(result["geom"])
set_first_element_lookat = False
placemark = self._placemark_template.substitute(geom=geom)
search_placemarks += placemark
kml_response = self._kml_template.substitute(
foldername=folder_name,
style=self._style,
lookat=lookat_info,
placemark=search_placemarks)
self.logger.info("KML response successfully formatted")
return kml_response
def ConstructJSONPResponse(self, search_results, original_query):
"""Prepares JSONP response.
{
"Folder": {
"name": "Latitude X Longitude Y",
"Placemark": {
"Point": {
"coordinates": "X,Y" } }
}
}
Args:
search_results: Query results from the searchexample table
original_query: Search query as entered by the user
Returns:
jsonp_response: JSONP formatted response
"""
search_placemarks = ""
search_geoms = ""
geoms = ""
json_response = ""
jsonp_response = ""
folder_name = ("Grouped results:<![CDATA[<br/>]]>%s (%s)"
% (original_query, str(len(search_results))))
for count, result in enumerate(search_results):
geom = ""
geom = self._json_geom % (
result["name"],
result["snippet"],
result["description"],
result["geom"][1:-1])
if count < (len(search_results) -1):
geom += ","
geoms += geom
if len(search_results) == 1:
search_geoms = geoms
else:
search_geoms = "["+ geoms +"]"
search_placemarks = self._json_placemark_template.substitute(
geom=search_geoms)
json_response = self._json_template.substitute(
foldername=folder_name, json_placemark=search_placemarks)
# Escape single quotes from json_response.
json_response = json_response.replace("'", "\\'")
jsonp_response = self._jsonp_call % (self.f_callback, json_response)
self.logger.info("JSONP response successfully formatted")
return jsonp_response
def HandleSearchRequest(self, environ):
"""Fetches the search tokens from form and performs the example search.
Args:
environ: A list of environment variables as supplied by the
WSGI interface to the example search application interface.
Returns:
search_results: A KML/JSONP formatted string which contains search results.
"""
search_results = ""
search_status = False
# Fetch all the attributes provided by the user.
parameters = self.utils.GetParameters(environ)
response_type = self.utils.GetResponseType(environ)
# Retrieve the function call back name for JSONP response.
self.f_callback = self.utils.GetCallback(parameters)
original_query = self.utils.GetValue(parameters, "q")
# Fetch additional query parameters 'flyToFirstElement' and
# 'displayKeys' from URL.
self.fly_to_first_element = self.utils.GetValue(
parameters, "flyToFirstElement")
self.display_keys_string = self.utils.GetValue(
parameters, "displayKeys")
if not original_query:
# Extract 'neighborhood' parameter from URL
try:
form = cgi.FieldStorage(fp=environ["wsgi.input"], environ=environ)
original_query = form.getvalue("neighborhood")
except AttributeError as e:
self.logger.debug("Error in neighborhood query %s" % e)
if original_query:
(search_status, search_results) = self.DoSearch(
original_query, response_type)
else:
self.logger.debug("Empty or incorrect search query received")
if not search_status:
folder_name = "No results were returned."
search_results = self.utils.NoSearchResults(
folder_name, self._style, response_type, self.f_callback)
return (search_results, response_type)
def DoSearch(self, original_query, response_type):
"""Performs the example search and returns the results.
Args:
original_query: A string containing the search query as
entered by the user.
response_type: Response type can be KML or JSONP, depending on the client.
Returns:
tuple containing
search_status: Whether search could be performed.
search_results: A KML/JSONP formatted string which contains search results.
"""
search_status = False
search_results = ""
query_results = ""
total_results = 0
search_query = original_query.strip().lower()
if len(search_query.split(",")) > 2:
self.logger.warning("Extra search parameters ignored:%s"
% (",".join(search_query.split(",")[2:])))
search_query = ",".join(search_query.split(",")[:2])
original_query = ",".join(original_query.split(",")[:2])
total_results, query_results = self.RunExampleSearch(
search_query, response_type)
self.logger.info("example search returned %s results"
% total_results)
if total_results > 0:
if response_type == "KML":
search_results = self.ConstructKMLResponse(
query_results, original_query)
search_status = True
elif response_type == "JSONP":
search_results = self.ConstructJSONPResponse(
query_results, original_query)
search_status = True
else:
# This condition may not occur,
# as response_type is either KML or JSONP
self.logger.debug("Invalid response type %s" % response_type)
return search_status, search_results
def __del__(self):
"""Closes the connection pool created in __init__.
"""
self._pool.closeall()
def main():
expobj = ExampleSearch()
expobj.DoSearch("pacific heights", "KML")
if __name__ == "__main__":
main()
|
apache-2.0
|
catapult-project/catapult
|
tracing/tracing/extras/symbolizer/symbolize_trace.py
|
4
|
63468
|
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=too-many-lines
"""
This script processes trace files and symbolizes stack frames generated by
Chrome's native heap profiler. This script assumes that the Chrome binary
referenced in the trace contains symbols, and is the same binary used to emit
the trace.
=== Overview ===
Trace file is essentially a giant JSON array of dictionaries (events).
Events have some predefined keys (e.g. 'pid'), but otherwise are free to
have anything inside. Trace file contains events from all Chrome processes
that were sampled during tracing period.
This script cares only about memory dump events generated with memory-infra
category enabled.
When Chrome native heap profiling is enabled, some memory dump events
include the following extra information:
* (Per allocator) Information about live allocations at the moment of the
memory dump (the information includes backtraces, types / categories,
sizes, and counts of allocations). There are several allocators in
Chrome: e.g. malloc, blink_gc, partition_alloc.
* (Per process) Stack frame tree of all functions that called allocators
above.
This script does the following:
1. Parses the given trace file (loads JSON).
2. Finds memory dump events and parses stack frame tree for each process.
3. Finds stack frames that have PC addresses instead of function names.
4. Symbolizes PCs and modifies loaded JSON.
5. Writes modified JSON back to the file.
The script supports trace files from the following platforms:
* Android (the script itself must be run on Linux)
* Linux
* macOS
* Windows
Important note - the script doesn't check that it symbolizes same binaries
that were used at the time trace was taken. I.e. if you take a trace, change
and rebuild Chrome binaries, the script will blindly use the new binaries.
=== Details ===
There are two formats of heap profiler information: legacy and modern. The
main differences relevant to this script are:
* In the modern format the stack frame tree, type name mapping, and string
mapping nodes are dumped incrementally. These nodes are dumped in each
memory dump event and carry updates that occurred since the last event.
For example, let's say that when the first memory dump event is generated
we only know about a function foo() (called from main()) allocating objects
of type "int":
{
"args": {
"dumps": {
"heaps_v2": {
"maps": {
"nodes": [
{ "id": 1, "name_sid": 1 },
{ "id": 2, "parent": 1, "name_sid": 3 },
],
"types": [
{ "id": 1, "name_sid": 2 },
],
"strings": [
{ "id": 1, "string": "main()" },
{ "id": 2, "string": "int" },
{ "id": 3, "string": "foo()" },
]
},
"allocators": { ...live allocations per allocator... },
...
},
...
}
},
...
}
Here:
* 'nodes' node encodes stack frame tree
* 'types' node encodes type name mappings
* 'strings' node encodes string mapping (explained below)
Then, by the time second memory dump even is generated, we learn about
bar() (called from main()), which also allocated "int" objects. Only the
new information is dumped, i.e. bar() stack frame:
{
"args": {
"dumps": {
"heaps_v2": {
"maps": {
"nodes": [
{ "id": 2, "parent": 1, "name_sid": 4 },
],
"types": [],
"strings": [
{ "id": 4, "string": "bar()" },
]
},
"allocators": { ...live allocations per allocator... },
...
},
...
}
},
...
}
Note that 'types' node is empty, since there were no updates. All three
nodes ('nodes', types', and 'strings') can be empty if there were no updates
to them.
For simplicity, when the script updates incremental nodes, it puts updated
content in the first node, and clears all others. I.e. the following stack
frame nodes:
'nodes': [
{ "id": 1, "name_sid": 1 },
{ "id": 2, "parent": 1, "name_sid": 2 },
]
'nodes': [
{ "id": 3, "parent": 2, "name_sid": 3 },
]
'nodes': [
{ "id": 4, "parent": 3, "name_sid": 4 },
{ "id": 5, "parent": 1, "name_sid": 5 },
]
After symbolization are written as:
'nodes': [
{ "id": 1, "name_sid": 1 },
{ "id": 2, "parent": 1, "name_sid": 2 },
{ "id": 3, "parent": 2, "name_sid": 3 },
{ "id": 4, "parent": 3, "name_sid": 4 },
{ "id": 5, "parent": 1, "name_sid": 5 },
]
'nodes': []
'nodes': []
* In contrast, in the legacy format stack frame tree and type mappings are
dumped separately from memory dump events, once per process.
Here is how trace file with two memory dump events looks like in the
legacy format:
{
"args": {
"dumps": {
"heaps": { ...live allocations per allocator... },
...
}
},
...
}
{
"args": {
"dumps": {
"heaps": { ...live allocations per allocator... },
...
}
},
...
}
{
"args": {
"typeNames": {
1: "int",
}
},
"cat": "__metadata",
"name": "typeNames",
...
}
{
"args": {
"stackFrames": {
1: { "name": "main" },
2: { "name": "foo", "parent": 1 },
3: { "name": "bar", "parent": 1 },
}
},
"cat": "__metadata",
"name": "stackFrames",
...
}
* Another change in the modern format is 'strings' node, which was added
to deduplicate stack frame names (mainly for trace file size reduction).
For consistency 'types' node also uses string mappings.
See crbug.com/708930 for more information about the modern format.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import bisect
import collections
import gzip
import json
import ntpath
import os
import re
import shutil
import subprocess
import sys
import tarfile
import tempfile
import zipfile
import py_utils.cloud_storage as cloud_storage
import six
from six.moves import range # pylint: disable=redefined-builtin
import symbols.elf_symbolizer as elf_symbolizer
from tracing.extras.symbolizer import symbolize_trace_atos_regex
from tracing.extras.symbolizer import symbolize_trace_macho_reader
_UNNAMED_FILE = 'unnamed'
class NodeWrapper(object):
"""Wraps an event data node(s).
A node is a reference into a trace event JSON. Wrappers parse nodes to
provide convenient APIs and update nodes when asked to propagate changes
back (see ApplyModifications() below).
Here is an example of legacy metadata event that contains stack frame tree:
{
"args": {
"stackFrames": { ... }
},
"cat": "__metadata",
"name": "stackFrames",
"ph": "M",
...
}
When this event is encountered, a reference to the "stackFrames" dictionary
is obtained and passed down to a specific wrapped class, which knows how to
parse / update the dictionary.
There are two parsing patterns depending on whether node is serialized
incrementally:
* If node is not incremental, then parsing is done by __init__(),
see MemoryMap for an example.
* If node is incremental, then __init__() does nothing, and instead
ParseNext() method is called when next node (from a next event) is
encountered.
Some wrappers can also modify nodes they parsed. In such cases they have
additional APIs:
* 'modified' flag, which indicates whether the wrapper was changed.
* 'ApplyModifications' method, which propagates changes made to the wrapper
back to nodes. Successful invocation of ApplyModifications() resets
'modified' flag.
"""
pass
class MemoryMap(NodeWrapper):
"""Wraps 'process_mmaps' node.
'process_mmaps' node contains information about file mappings.
"process_mmaps": {
"vm_regions": [
{
"mf": "<file_path>",
"sa": "<start_address>",
"sz": "<size>",
...
},
...
]
}
"""
class Region(object):
def __init__(self, start_address, size, file_path, file_offset):
self._start_address = start_address
self._size = size
self._file_path = file_path if file_path else _UNNAMED_FILE
self._file_offset = file_offset
self._code_id = None
@property
def start_address(self):
return self._start_address
@property
def end_address(self):
return self._start_address + self._size
@property
def size(self):
return self._size
@property
def code_id(self):
return self._code_id
@property
def file_path(self):
return self._file_path
@property
def file_offset(self):
return self._file_offset
@file_offset.setter
def file_offset(self, value):
self._file_offset = value
def __cmp__(self, other):
if isinstance(other, type(self)):
other_start_address = other._start_address
elif isinstance(other, six.integer_types):
other_start_address = other
else:
raise Exception('Cannot compare with %s' % type(other))
if self._start_address < other_start_address:
return -1
elif self._start_address > other_start_address:
return 1
else:
return 0
def __repr__(self):
return 'Region(0x{:X} - 0x{:X}, {})'.format(
self.start_address, self.end_address, self.file_path)
def __init__(self, process_mmaps_node, process):
regions = []
for region_node in process_mmaps_node['vm_regions']:
file_offset = int(region_node['fo'], 16) if 'fo' in region_node else 0
file_path = region_node['mf'].replace(" (deleted)", "")
region = self.Region(
int(region_node['sa'], 16), int(region_node['sz'], 16), file_path,
file_offset)
# Keep track of code-identifier when present.
if 'ts' in region_node and 'sz' in region_node:
region._code_id = '%08X%X' % (int(region_node['ts'], 16), region.size)
regions.append(region)
regions.sort()
# Iterate through the regions in order. If two regions border each other,
# and have the same file_path [or at least one of them is unnamed], but the
# latter region has file_offset == 0, then set the file_offset of the latter
# region to be former_region.file_offset + former_region.size.
#
# Rationale: Semantically, we want file_offset to be the distance between
# the base address of the region and the base address of the module [which
# breakpad symbols use as a relative-zero]. Technically, this is called
# slide (macOS) and load bias (ELF). See
# https://chromium-review.googlesource.com/c/chromium/src/+/568413#message-01cf829007882eea8c9d3403871814c4f336d16d
# for more details.
# Chrome does not emit slide or load bias. This usually doesn't make a
# difference because the TEXT segment usually has a slide or load bias of 0.
# In the rare cases that it doesn't [observed on Chrome Linux official
# builds], this heuristic correctly computes it.
#
# This hack relies on the assumption that all regions with the same name are
# mapped from the same file. Each region's file_offset should be computed
# based on the first region's base address.
last_region_with_file_path = {}
for region in regions:
# If the file was mapped from apk, the start address of first mapping
# cannot be used since other files than library can be mapped from apk.
# Use the start address provided by metadata in these cases. See
# https://crbug.com/927357.
if 'base.apk' in region.file_path and process.library_start_address > 0:
region.file_offset = (
region.start_address - process.library_start_address)
elif (region.file_path in last_region_with_file_path and
region.file_offset == 0):
region.file_offset = (
region.start_address -
last_region_with_file_path[region.file_path].start_address)
if (region.file_path and
region.file_path not in last_region_with_file_path):
last_region_with_file_path[region.file_path] = region
# Copy regions without duplicates and check for overlaps.
self._regions = []
previous_region = None
for region in regions:
if previous_region is not None:
if region == previous_region:
continue
if region.start_address < previous_region.end_address:
print('Regions {} and {} overlap.'.format(previous_region, region))
previous_region = region
self._regions.append(region)
@property
def regions(self):
return self._regions
def FindRegion(self, address):
"""Finds region containing |address|. Returns None if none found."""
region_index = bisect.bisect_right(self._regions, address) - 1
if region_index >= 0:
region = self._regions[region_index]
if address >= region.start_address and address < region.end_address:
return region
return None
class UnsupportedHeapDumpVersionError(Exception):
"""Helper exception class to signal unsupported heap dump version."""
def __init__(self, version):
message = 'Unsupported heap dump version: {}'.format(version)
super(UnsupportedHeapDumpVersionError, self).__init__(message)
class StringMap(NodeWrapper):
"""Wraps all 'strings' nodes for a process.
'strings' node contains incremental mappings between integer ids and strings.
"strings": [
{
"id": <string_id>,
"string": <string>
},
...
]
"""
def __init__(self):
self._modified = False
self._strings_nodes = []
self._string_by_id = {}
self._id_by_string = {}
self._max_string_id = 0
@property
def modified(self):
"""Returns True if the wrapper was modified (see NodeWrapper)."""
return self._modified
@property
def string_by_id(self):
return self._string_by_id
def ParseNext(self, heap_dump_version, strings_node):
"""Parses and interns next node (see NodeWrapper)."""
if heap_dump_version != Trace.HEAP_DUMP_VERSION_1:
raise UnsupportedHeapDumpVersionError(heap_dump_version)
self._strings_nodes.append(strings_node)
for string_node in strings_node:
self._Insert(string_node['id'], string_node['string'])
def Clear(self):
"""Clears all string mappings."""
if self._string_by_id:
self._modified = True
self._string_by_id = {}
self._id_by_string = {}
self._max_string_id = 0
def AddString(self, string):
"""Adds a string (if it doesn't exist) and returns its integer id."""
string_id = self._id_by_string.get(string)
if string_id is None:
string_id = self._max_string_id + 1
self._Insert(string_id, string)
self._modified = True
return string_id
def ApplyModifications(self):
"""Propagates modifications back to nodes (see NodeWrapper)."""
if not self.modified:
return
assert self._strings_nodes, 'no nodes'
# Serialize into the first node, and clear all others.
for strings_node in self._strings_nodes:
del strings_node[:]
strings_node = self._strings_nodes[0]
for string_id, string in self._string_by_id.items():
strings_node.append({'id': string_id, 'string': string})
self._modified = False
def _Insert(self, string_id, string):
self._id_by_string[string] = string_id
self._string_by_id[string_id] = string
self._max_string_id = max(self._max_string_id, string_id)
class TypeNameMap(NodeWrapper):
"""Wraps all 'types' nodes for a process.
'types' nodes encode mappings between integer type ids and integer
string ids (from 'strings' nodes).
"types": [
{
"id": <type_id>,
"name_sid": <name_string_id>
}
...
]
For simplicity string ids are translated into strings during parsing,
and then translated back to ids in ApplyModifications().
"""
class Type(object):
"""Holds information used to populate the "object type" field.
We store type id and type name. If "--frame-as-object-type" is enabled,
we use PCs as type names, which can be resolved later.
"""
def __init__(self, type_id, name):
self._modified = False
self._id = type_id
self._pc = self._ParsePC(name)
self._name = name
@property
def modified(self):
"""Returns True if the type was modified."""
return self._modified
@property
def id(self):
"""Type id (integer)."""
return self._id
@property
def name(self):
"""Name of the type."""
return self._name
@name.setter
def name(self, value):
"""Changes the name."""
self._modified = True
self._name = value
@property
def pc(self):
"""Parsed (integer) PC of the type node if frame_as_object_type is
enabled, or None."""
return self._pc
_PC_TAG = 'pc:'
def _ParsePC(self, name):
if not name.startswith(self._PC_TAG):
return None
return int(name[len(self._PC_TAG):], 16)
def _ClearModified(self):
self._modified = False
def __init__(self):
self._modified = False
self._type_name_nodes = []
self._type_by_id = {}
self._id_by_name = {}
self._max_type_id = 0
@property
def modified(self):
"""Returns True if the wrapper was modified (see NodeWrapper) or if the
object type was overwritten."""
return (self._modified or
any(t.modified for t in self._type_by_id.values()))
@property
def type_by_id(self):
"""Returns {id -> Type} dict (must not be changed directly)."""
return self._type_by_id
def ParseNext(self, heap_dump_version, type_name_node, string_map):
"""Parses and interns next node (see NodeWrapper).
|string_map| - A StringMap object to use to translate string ids
to strings.
"""
if heap_dump_version != Trace.HEAP_DUMP_VERSION_1:
raise UnsupportedHeapDumpVersionError(heap_dump_version)
self._type_name_nodes.append(type_name_node)
for type_node in type_name_node:
self._Insert(type_node['id'],
string_map.string_by_id[type_node['name_sid']])
def ParseNextWithPCAsTypeName(self, heap_dump_version, type_name_node,
frame_by_id, alloc_types, alloc_nodes):
"""Set the corresponding PCs from frame as object type.
This should be used while "--frame-as-object-type" is raised. Current this
is only supported when is_cast is raised.
Args:
alloc_types: A list of type_id which is one-to-one corresponding
to alloc_nodes.
alloc_nodes: A list of frame_id which is the last frame of backtrace.
"""
if heap_dump_version != Trace.HEAP_DUMP_VERSION_1:
raise UnsupportedHeapDumpVersionError(heap_dump_version)
self._type_name_nodes.append(type_name_node)
if not alloc_types:
return
self._modified = True
type_size = len(alloc_types)
get_frame_parent = (
lambda frame_id: frame_id if frame_by_id[frame_id].parent_id is None
else frame_by_id[frame_id].parent_id
)
for count in range(type_size):
# Note that according to the AllocatorShim layer, there are two frames
# which are used to record the allocations in Linux.
parent = get_frame_parent(get_frame_parent(alloc_nodes[count]))
type_id = self.AddType(frame_by_id[parent].name)
alloc_types[count] = type_id
def AddType(self, type_name):
"""Adds a type name (if it doesn't exist) and returns its id."""
type_id = self._id_by_name.get(type_name)
if type_id is None:
type_id = self._max_type_id + 1
self._Insert(type_id, type_name)
self._modified = True
return type_id
def ApplyModifications(self, string_map, force=False):
"""Propagates modifications back to nodes.
|string_map| - A StringMap object to use to translate strings to ids.
|force| - Whether to propagate changes regardless of 'modified' flag.
"""
if not self.modified and not force:
return
assert self._type_name_nodes, 'no nodes'
# Serialize into the first node, and clear all others.
for types_node in self._type_name_nodes:
del types_node[:]
types_node = self._type_name_nodes[0]
for type_node in self._type_by_id.values():
types_node.append({
'id': type_node.id,
'name_sid': string_map.AddString(type_node.name)
})
type_node._ClearModified()
self._modified = False
def _Insert(self, type_id, type_name):
type_node = self.Type(type_id, type_name)
self._id_by_name[type_name] = type_id
self._type_by_id[type_id] = type_node
self._max_type_id = max(self._max_type_id, type_id)
class StackFrameMap(NodeWrapper):
""" Wraps stack frame tree nodes for a process.
For the legacy format this wrapper expects a single 'stackFrames' node
(which comes from metadata event):
"stackFrames": {
"<frame_id>": {
"name": "<frame_name>"
"parent": "<parent_frame_id>"
},
...
}
For the modern format this wrapper expects several 'nodes' nodes:
"nodes": [
{
"id": <frame_id>,
"parent": <parent_frame_id>,
"name_sid": <name_string_id>
},
...
]
In both formats frame name is a string. Native heap profiler generates
specially formatted frame names (e.g. "pc:10eb78dba") for function
addresses (PCs). Inner Frame class below parses name and extracts PC,
if it's there.
"""
class Frame(object):
def __init__(self, frame_id, name, parent_frame_id):
self._modified = False
self._id = frame_id
self._name = name
self._pc = self._ParsePC(name)
self._parent_id = parent_frame_id
self._ext = None
@property
def modified(self):
"""Returns True if the frame was modified.
For example changing frame's name sets this flag (since the change
needs to be propagated back to nodes).
"""
return self._modified
@property
def id(self):
"""Frame id (integer)."""
return self._id
@property
def pc(self):
"""Parsed (integer) PC of the frame, or None."""
return self._pc
@property
def name(self):
"""Name of the frame (see above)."""
return self._name
@name.setter
def name(self, value):
"""Changes the name. Doesn't affect value of |pc|."""
self._modified = True
self._name = value
@property
def parent_id(self):
"""Parent frame id (integer)."""
return self._parent_id
_PC_TAG = 'pc:'
def _ParsePC(self, name):
if not name.startswith(self._PC_TAG):
return None
return int(name[len(self._PC_TAG):], 16)
def _ClearModified(self):
self._modified = False
def __init__(self):
self._modified = False
self._heap_dump_version = None
self._stack_frames_nodes = []
self._frame_by_id = {}
@property
def modified(self):
"""Returns True if the wrapper or any of its frames were modified."""
return (self._modified or
any(f.modified for f in self._frame_by_id.values()))
@property
def frame_by_id(self):
"""Returns {id -> frame} dict (must not be modified directly)."""
return self._frame_by_id
def ParseNext(self, heap_dump_version, stack_frames_node, string_map):
"""Parses the next stack frames node (see NodeWrapper).
For the modern format |string_map| is used to translate string ids
to strings.
"""
frame_by_id = {}
if heap_dump_version == Trace.HEAP_DUMP_VERSION_LEGACY:
if self._stack_frames_nodes:
raise Exception('Legacy stack frames node is expected only once.')
for frame_id, frame_node in stack_frames_node.items():
frame = self.Frame(frame_id,
frame_node['name'],
frame_node.get('parent'))
frame_by_id[frame.id] = frame
else:
if heap_dump_version != Trace.HEAP_DUMP_VERSION_1:
raise UnsupportedHeapDumpVersionError(heap_dump_version)
for frame_node in stack_frames_node:
frame = self.Frame(frame_node['id'],
string_map.string_by_id[frame_node['name_sid']],
frame_node.get('parent'))
frame_by_id[frame.id] = frame
self._heap_dump_version = heap_dump_version
self._stack_frames_nodes.append(stack_frames_node)
self._frame_by_id.update(frame_by_id)
def ApplyModifications(self, string_map, force=False):
"""Applies modifications back to nodes (see NodeWrapper)."""
if not self.modified and not force:
return
assert self._stack_frames_nodes, 'no nodes'
if self._heap_dump_version == Trace.HEAP_DUMP_VERSION_LEGACY:
assert string_map is None, \
'string_map should not be used with the legacy format'
# Serialize frames into the first node, clear all others.
for frames_node in self._stack_frames_nodes:
if self._heap_dump_version == Trace.HEAP_DUMP_VERSION_LEGACY:
frames_node.clear()
else:
del frames_node[:]
frames_node = self._stack_frames_nodes[0]
for frame in self._frame_by_id.values():
if self._heap_dump_version == Trace.HEAP_DUMP_VERSION_LEGACY:
frame_node = {'name': frame.name}
frames_node[frame.id] = frame_node
else:
frame_node = {
'id': frame.id,
'name_sid': string_map.AddString(frame.name)
}
frames_node.append(frame_node)
if frame.parent_id is not None:
frame_node['parent'] = frame.parent_id
frame._ClearModified()
self._modified = False
class Trace(NodeWrapper):
"""Wrapper for the root trace node (i.e. the trace JSON itself).
This wrapper parses select nodes from memory-infra events and groups
parsed data per-process (see inner Process class below).
"""
# Indicates legacy heap dump format.
HEAP_DUMP_VERSION_LEGACY = 'Legacy'
# Indicates variation of a modern heap dump format.
HEAP_DUMP_VERSION_1 = 1
class Process(object):
"""Collection of per-process data and wrappers."""
def __init__(self, pid):
self._pid = pid
self._name = None
self._memory_map = None
self._stack_frame_map = StackFrameMap()
self._type_name_map = TypeNameMap()
self._string_map = StringMap()
self._heap_dump_version = None
self._library_start_address = 0
@property
def modified(self):
return self._stack_frame_map.modified or self._type_name_map.modified
@property
def pid(self):
return self._pid
@property
def name(self):
return self._name
@property
def unique_name(self):
"""Returns string that includes both process name and its pid."""
name = self._name if self._name else 'UnnamedProcess'
return '{}({})'.format(name, self._pid)
@property
def memory_map(self):
return self._memory_map
@property
def stack_frame_map(self):
return self._stack_frame_map
@property
def type_name_map(self):
return self._type_name_map
@property
def library_start_address(self):
return self._library_start_address
def ApplyModifications(self):
"""Calls ApplyModifications() on contained wrappers."""
if self._heap_dump_version == Trace.HEAP_DUMP_VERSION_LEGACY:
self._stack_frame_map.ApplyModifications(None)
else:
if self._stack_frame_map.modified or self._type_name_map.modified:
self._string_map.Clear()
self._stack_frame_map.ApplyModifications(self._string_map, force=True)
self._type_name_map.ApplyModifications(self._string_map, force=True)
self._string_map.ApplyModifications()
def __init__(self, trace_node, frame_as_object_type=False):
self._trace_node = trace_node
self._processes = []
self._heap_dump_version = None
self._os = None
self._version = None
self._is_chromium = True
self._is_64bit = False
self._is_win = False
self._is_mac = False
self._is_linux = False
self._is_cros = False
self._is_android = False
self._is_cast = False
self._frame_as_object_type = frame_as_object_type
# Misc per-process information needed only during parsing.
class ProcessExt(object):
def __init__(self, pid):
self.process = Trace.Process(pid)
self.mapped_entry_names = set()
self.process_mmaps_node = None
self.seen_strings_node = False
process_ext_by_pid = {}
if isinstance(trace_node, dict):
metadata = trace_node['metadata']
product_version = metadata['product-version']
# product-version has the form "Chrome/60.0.3103.0"
self._version = product_version.split('/', 1)[-1]
self._os = metadata['os-name']
self._is_win = re.search('windows', metadata['os-name'], re.IGNORECASE)
self._is_mac = re.search('mac', metadata['os-name'], re.IGNORECASE)
self._is_linux = re.search('linux', metadata['os-name'], re.IGNORECASE)
self._is_cros = re.search('cros', metadata['os-name'], re.IGNORECASE)
self._is_android = re.search(
'android', metadata['os-name'], re.IGNORECASE)
self._is_64bit = (
re.search('x86_64', metadata['os-arch'], re.IGNORECASE) and
not re.search('WOW64', metadata['user-agent'], re.IGNORECASE))
# Android traces produced via 'chrome://inspect/?tracing#devices' are
# just list of events.
events = trace_node if isinstance(trace_node, list) \
else trace_node['traceEvents']
for event in events: # pylint: disable=too-many-nested-blocks
name = event.get('name')
if not name:
continue
pid = event['pid']
process_ext = process_ext_by_pid.get(pid)
if process_ext is None:
process_ext = ProcessExt(pid)
process_ext_by_pid[pid] = process_ext
process = process_ext.process
phase = event['ph']
if phase == self._EVENT_PHASE_METADATA:
if name == 'process_name':
process._name = event['args']['name']
elif name == 'stackFrames':
process._stack_frame_map.ParseNext(
self._UseHeapDumpVersion(self.HEAP_DUMP_VERSION_LEGACY),
event['args']['stackFrames'],
process._string_map)
elif name == 'chrome_library_address':
process._library_start_address = (
int(event['args']['start_address'], 16))
elif phase == self._EVENT_PHASE_MEMORY_DUMP:
dumps = event['args']['dumps']
process_mmaps = dumps.get('process_mmaps')
if process_mmaps:
# We want the most recent memory map, so parsing happens later
# once we finished reading all events.
process_ext.process_mmaps_node = process_mmaps
heaps = dumps.get('heaps_v2')
if heaps:
version = self._UseHeapDumpVersion(heaps['version'])
maps = heaps.get('maps')
allocators = heaps.get('allocators')
if maps and allocators:
process_ext.mapped_entry_names.update(list(maps.keys()))
types = maps.get('types')
stack_frames = maps.get('nodes')
strings = maps.get('strings')
if (strings is None and (types or stack_frames)
and not process_ext.seen_strings_node):
# ApplyModifications() for TypeNameMap and StackFrameMap puts
# everything into the first node and depends on StringMap. So
# we need to make sure that 'strings' node is there if any of
# other two nodes present.
strings = []
maps['strings'] = strings
if strings is not None:
process_ext.seen_strings_node = True
process._string_map.ParseNext(version, strings)
if stack_frames:
process._stack_frame_map.ParseNext(
version, stack_frames, process._string_map)
if types:
if self._frame_as_object_type:
for alloc in allocators.values():
process._type_name_map.ParseNextWithPCAsTypeName(
version, types, process._stack_frame_map._frame_by_id,
alloc['types'], alloc['nodes'])
else:
process._type_name_map.ParseNext(version, types,
process._string_map)
self._processes = []
for pe in process_ext_by_pid.values():
pe.process._heap_dump_version = self._heap_dump_version
if pe.process_mmaps_node:
# Now parse the most recent memory map.
pe.process._memory_map = MemoryMap(pe.process_mmaps_node,
pe.process)
self._processes.append(pe.process)
@property
def node(self):
"""Root node (that was passed to the __init__)."""
return self._trace_node
@property
def modified(self):
"""Returns True if trace file needs to be updated.
Before writing trace JSON back to a file ApplyModifications() needs
to be called.
"""
return any(p.modified for p in self._processes)
@property
def processes(self):
return self._processes
@property
def heap_dump_version(self):
return self._heap_dump_version
@property
def version(self):
return self._version
@property
def os(self):
return self._os
@property
def is_chromium(self):
return self._is_chromium
@is_chromium.setter
def is_chromium(self, new_value):
self._is_chromium = new_value
@property
def is_mac(self):
return self._is_mac
@property
def is_win(self):
return self._is_win
@property
def is_linux(self):
return self._is_linux
@property
def is_android(self):
return self._is_android
@property
def is_cast(self):
return self._is_cast
@is_cast.setter
def is_cast(self, new_value):
self._is_cast = new_value
@property
def frame_as_object_type(self):
return self._frame_as_object_type
@property
def is_64bit(self):
return self._is_64bit
@property
def library_name(self):
return self._trace_node['metadata'].get('chrome-library-name')
def ApplyModifications(self):
"""Propagates modifications back to the trace JSON."""
for process in self._processes:
process.ApplyModifications()
assert not self.modified, 'still modified'
# Relevant trace event phases from Chromium's
# src/base/trace_event/common/trace_event_common.h.
_EVENT_PHASE_METADATA = 'M'
_EVENT_PHASE_MEMORY_DUMP = 'v'
def _UseHeapDumpVersion(self, version):
if self._heap_dump_version is None:
self._heap_dump_version = version
return version
elif self._heap_dump_version != version:
raise Exception(
("Inconsistent trace file: first saw '{}' heap dump version, "
"then '{}'.").format(self._heap_dump_version, version))
else:
return version
class SymbolizableFile(object):
"""Holds file path, addresses to symbolize and stack frames to update.
This class is a link between ELFSymbolizer and a trace file: it specifies
what to symbolize (addresses) and what to update with the symbolization
result (frames).
"""
def __init__(self, file_path, code_id, trace_from_win):
self.path = file_path
if trace_from_win:
self.module_name = ntpath.basename(file_path)
else:
self.module_name = os.path.basename(file_path)
self.symbolizable_path = file_path # path to use for symbolization
self.code_id = code_id
self.frames_by_address = collections.defaultdict(list)
self.skip_symbolization = False
self.has_breakpad_symbols = False
def ResolveSymbolizableFilesByNodes(symfile_by_path, memory_map, nodes,
trace_from_win):
"""Resolves and groups PCs into list of SymbolizableFiles.
As part of the grouping process, this function resolves PC from each nodes
(stack frame or object type) to the corresponding memory map region. Nodes
that failed to resolve are symbolized with '<unresolved>'.
"""
for node in nodes:
if node.pc is None:
continue
region = memory_map.FindRegion(node.pc)
if region is None:
node.name = '<unresolved>'
continue
symfile = symfile_by_path.get(region.file_path)
if symfile is None:
file_path = region.file_path
symfile = SymbolizableFile(file_path, region.code_id, trace_from_win)
symfile_by_path[symfile.path] = symfile
relative_pc = node.pc - region.start_address + region.file_offset
symfile.frames_by_address[relative_pc].append(node)
def ResolveSymbolizableFiles(processes, trace_from_win, frame_as_object_type):
"""Resolves and groups PCs from frame or type into list of SymbolizableFiles.
In default mode, we always resolve PCs from stack frame. If "--frame-as-
object-type" is enabled, the type name will be PCs, so that we need to
resolve and symbolize it.
"""
symfile_by_path = {}
for process in processes:
if not process.memory_map:
continue
ResolveSymbolizableFilesByNodes(
symfile_by_path, process.memory_map,
process.stack_frame_map.frame_by_id.values(), trace_from_win)
if frame_as_object_type:
ResolveSymbolizableFilesByNodes(symfile_by_path, process.memory_map,
process.type_name_map.type_by_id.values(),
trace_from_win)
return list(symfile_by_path.values())
def FindInSystemPath(binary_name):
paths = os.environ['PATH'].split(os.pathsep)
for path in paths:
binary_path = os.path.join(path, binary_name)
if os.path.isfile(binary_path):
return binary_path
return None
class BreakpadSymbolsModule(object):
"""Encapsulates Breakpad logic for symbols of a specific module."""
def __init__(self, filename):
super(BreakpadSymbolsModule, self).__init__()
self.filename = filename
self.files = []
self.symbols = {}
self.arch = None
self.debug_id = None
self.code_id = None
self.binary = None
def Parse(self):
# see: https://chromium.googlesource.com/breakpad/breakpad/+/master/docs/symbol_files.md
with open(self.filename) as fp:
for line in fp:
fragments = line.rstrip().split()
if fragments[0] == 'MODULE':
# MODULE mac x86_64 A7001116478B33F18FF9BEDE9F615F190 t
self.arch = fragments[2]
self.debug_id = fragments[3]
self.binary = ' '.join(fragments[4:])
elif fragments[0] == 'INFO' and fragments[1] == 'CODE_ID':
# INFO CODE_ID 595D00BD31F0000 chrome.dll
self.code_id = fragments[2]
elif fragments[0] == 'FILE':
# FILE 0 /b/c/b/mac64/src/out/Release/../../base/at_exit.cc
self.files.append(' '.join(fragments[2:]))
elif fragments[0] == 'PUBLIC':
# PUBLIC db60 0 base::mac::CallWithEHFrame(void () block_pointer)
self.symbols[int(fragments[1], 16)] = ' '.join(fragments[3:])
elif fragments[0] == 'FUNC':
# FUNC 567e0 264 0 Cr_z_fill_window_sse
self.symbols[int(fragments[1], 16)] = ' '.join(fragments[4:])
class Symbolizer(object):
"""Encapsulates platform-specific symbolization logic."""
def __init__(self, addr2line_executable):
self.is_mac = sys.platform == 'darwin'
self.is_win = sys.platform == 'win32'
if self.is_mac:
self.binary = 'atos'
self._matcher = symbolize_trace_atos_regex.AtosRegexMatcher()
elif self.is_win:
self.binary = 'addr2line-pdb.exe'
else:
self.binary = 'addr2line'
if addr2line_executable and os.path.isfile(addr2line_executable):
self.symbolizer_path = addr2line_executable
else:
self.symbolizer_path = FindInSystemPath(self.binary)
self.breakpad_modules = {}
def _SymbolizeLinuxAndAndroid(self, symfile):
def _SymbolizerCallback(sym_info, frames):
# Unwind inline chain to the top.
while sym_info.inlined_by:
sym_info = sym_info.inlined_by
symbolized_name = (sym_info.name if sym_info.name else
'<{}>'.format(symfile.path))
for frame in frames:
frame.name = symbolized_name
symbolizer = elf_symbolizer.ELFSymbolizer(symfile.symbolizable_path,
self.symbolizer_path,
_SymbolizerCallback,
inlines=True)
for address, frames in symfile.frames_by_address.items():
# SymbolizeAsync() asserts that the type of address is int. We operate
# on longs (since they are raw pointers possibly from 64-bit processes).
# It's OK to cast here because we're passing relative PC, which should
# always fit into int.
symbolizer.SymbolizeAsync(int(address), frames)
symbolizer.Join()
def _SymbolizeMac(self, symfile):
load_address = (symbolize_trace_macho_reader.
ReadMachOTextLoadAddress(symfile.symbolizable_path))
assert load_address is not None
address_os_file, address_file_path = tempfile.mkstemp()
try:
with os.fdopen(address_os_file, 'w') as address_file:
for address in symfile.frames_by_address.keys():
address_file.write('{:x} '.format(address + load_address))
cmd = [self.symbolizer_path, '-arch', 'x86_64', '-l',
'0x%x' % load_address, '-o', symfile.symbolizable_path,
'-f', address_file_path]
output_array = subprocess.check_output(cmd).split('\n')
for i, frames in enumerate(symfile.frames_by_address.values()):
symbolized_name = self._matcher.Match(output_array[i])
for frame in frames:
frame.name = symbolized_name
finally:
os.remove(address_file_path)
def _SymbolizeWin(self, symfile):
"""Invoke symbolizer binary on windows and write all input in one go.
Unlike linux, on windows, symbolization talks through a shared system
service that handles communication with the NT symbol servers. This
creates an explicit serialization (and therefor lock contention) of
any process using the symbol API for files do not have a local PDB.
Thus, even though the windows symbolizer binary can be make command line
compatible with the POSIX addr2line interface, parallelizing the
symbolization does not yield the same performance effects. Running
just one symbolizer seems good enough for now. Can optimize later
if this becomes a bottleneck.
"""
cmd = [self.symbolizer_path, '--functions', '--demangle', '--exe',
symfile.symbolizable_path]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
stderr=None)
addrs = ["%x" % relative_pc for relative_pc in
symfile.frames_by_address.keys()]
(stdout_data, _) = proc.communicate('\n'.join(addrs))
# On windows, lines may contain '\r' character: e.g. "RtlUserThreadStart\r".
stdout_data.replace('\r', '')
stdout_data = stdout_data.split('\n')
# This is known to be in the same order as stderr_data.
for i, addr in enumerate(addrs):
for frame in symfile.frames_by_address[int(addr, 16)]:
# Output of addr2line with --functions is always 2 outputs per
# symbol, function name followed by source line number. Only grab
# the function name as line info is not always available.
frame.name = stdout_data[i * 2]
def _SymbolizeBreakpad(self, symfile):
module_filename = symfile.symbolizable_path
module = BreakpadSymbolsModule(module_filename)
module.Parse()
if module.code_id and symfile.code_id and module.code_id != symfile.code_id:
print("Warning: Code identifiers do not match for %s" % symfile.path)
print(" from trace file: %s" % symfile.code_id)
print(" from debug file: %s" % module.code_id)
return
addresses = list(symfile.frames_by_address.keys())
addresses.sort()
symbols_addresses = list(module.symbols.keys())
symbols_addresses.sort()
symbols_addresses.append(float('inf'))
offset = 0
skipped_addresses = 0
for symbol_offset in range(1, len(symbols_addresses)):
symbol_address_start = symbols_addresses[symbol_offset - 1]
symbol_address_end = symbols_addresses[symbol_offset]
resolved_symbol = module.symbols[symbol_address_start]
while (offset < len(addresses) and
addresses[offset] < symbol_address_end):
if addresses[offset] >= symbol_address_start:
for frame in symfile.frames_by_address[addresses[offset]]:
frame.name = resolved_symbol
else:
skipped_addresses = skipped_addresses + 1
offset = offset + 1
if skipped_addresses:
print("warning: %d unsymbolized symbols!" % skipped_addresses)
def SymbolizeSymfile(self, symfile):
if symfile.skip_symbolization:
for address, frames in symfile.frames_by_address.items():
unsymbolized_name = ('<' + symfile.module_name + '>')
# Only append the address if there's a library.
if symfile.symbolizable_path != _UNNAMED_FILE:
unsymbolized_name += ' + ' + str(hex(address))
for frame in frames:
frame.name = unsymbolized_name
return
if symfile.has_breakpad_symbols:
self._SymbolizeBreakpad(symfile)
elif self.is_mac:
self._SymbolizeMac(symfile)
elif self.is_win:
self._SymbolizeWin(symfile)
else:
self._SymbolizeLinuxAndAndroid(symfile)
def IsSymbolizableFile(self, file_path):
if self.is_win:
extension = os.path.splitext(file_path)[1].lower()
return extension in ['.dll', '.exe']
else:
result = subprocess.check_output(['file', '-0', file_path])
type_string = result[result.find('\0') + 1:]
return bool(re.match(r'.*(ELF|Mach-O) (32|64)-bit\b.*',
type_string, re.DOTALL))
def SymbolizeFiles(symfiles, symbolizer):
"""Symbolizes each file in the given list of SymbolizableFiles
and updates stack frames with symbolization results."""
if not symfiles:
print('Nothing to symbolize.')
return
print('Symbolizing...')
def _SubPrintf(message, *args):
print((' ' + message).format(*args))
for symfile in symfiles:
problem = None
if symfile.skip_symbolization:
pass
elif (symfile.has_breakpad_symbols and
os.path.isabs(symfile.symbolizable_path) and
os.path.isfile(symfile.symbolizable_path)):
pass
elif not os.path.isabs(symfile.symbolizable_path):
problem = 'not a file'
elif not os.path.isfile(symfile.symbolizable_path):
problem = "file doesn't exist"
elif not symbolizer.IsSymbolizableFile(symfile.symbolizable_path):
problem = 'file is not symbolizable'
if problem:
_SubPrintf("Problem with '{}': {}.",
symfile.symbolizable_path,
problem)
symfile.skip_symbolization = True
_SubPrintf('Symbolizing {} PCs from {}...',
len(symfile.frames_by_address),
symfile.symbolizable_path)
symbolizer.SymbolizeSymfile(symfile)
# Subpath of output path where unstripped libraries are stored.
ANDROID_UNSTRIPPED_SUBPATH = 'lib.unstripped'
def RemapAndroidFiles(symfiles, output_path, chrome_soname):
for symfile in symfiles:
filename = symfile.module_name
if os.path.splitext(filename)[1] == '.so':
symfile.symbolizable_path = os.path.join(
output_path, ANDROID_UNSTRIPPED_SUBPATH, filename)
elif os.path.splitext(filename)[1] == '.apk' and chrome_soname:
# If there is any pc in .apk memory map, then just assume it is from
# chroms.so since we memory map libraries from apk directly. This does
# not work for component builds.
symfile.symbolizable_path = os.path.join(
output_path, ANDROID_UNSTRIPPED_SUBPATH, chrome_soname)
else:
# Clobber file path to trigger "not a file" problem in SymbolizeFiles().
# Without this, files won't be symbolized with "file not found" problem,
# which is not accurate.
symfile.symbolizable_path = 'android://{}'.format(symfile.path)
def RemapCastFiles(symfiles, output_path):
for symfile in symfiles:
# In Chromecast, the symfile.path is structured as absolute path, which
# means it will break if we use os.path.join(output_path, symfile.path). For
# example, the unstripped library of libassistant located at
# out/target/product/[device]/symbols/system/chrome/lib/libassistant.so, the
# symfile.path will be /system/chrome/lib/libassistant.so
#
# See also: https://docs.python.org/3/library/os.path.html#os.path.join
symfile.symbolizable_path = output_path + symfile.path
def RemapMacFiles(symfiles, symbol_base_directory, version,
only_symbolize_chrome_symbols):
suffix = ("Google Chrome Framework.dSYM/Contents/Resources/DWARF/"
"Google Chrome Framework")
symbol_sub_dir = os.path.join(symbol_base_directory, version)
symbolizable_path = os.path.join(symbol_sub_dir, suffix)
for symfile in symfiles:
if symfile.path.endswith("Google Chrome Framework"):
symfile.symbolizable_path = symbolizable_path
elif only_symbolize_chrome_symbols:
symfile.skip_symbolization = True
def RemapWinFiles(symfiles, symbol_base_directory, version, is64bit,
only_symbolize_chrome_symbols):
folder = "win64" if is64bit else "win"
symbol_sub_dir = os.path.join(symbol_base_directory,
"chrome-" + folder + "-" + version)
for symfile in symfiles:
image = os.path.join(symbol_sub_dir, symfile.module_name)
symbols = image + ".pdb"
if os.path.isfile(image) and os.path.isfile(symbols):
symfile.symbolizable_path = image
elif only_symbolize_chrome_symbols:
symfile.skip_symbolization = True
def RemapBreakpadModules(symfiles, symbolizer, only_symbolize_chrome_symbols,
trace):
for symfile in symfiles:
image = symfile.module_name.lower()
if (trace.is_android and os.path.splitext(image)[1] == '.apk'
and trace.library_name):
image = trace.library_name
# Looked if the image has Breakpad symbols. Breakpad symbols are generated
# for Chrome modules for official builds.
if image in symbolizer.breakpad_modules:
symfile.symbolizable_path = symbolizer.breakpad_modules[image]
symfile.has_breakpad_symbols = True
elif only_symbolize_chrome_symbols:
symfile.skip_symbolization = True
def SymbolizeTrace(options, trace, symbolizer):
symfiles = ResolveSymbolizableFiles(trace.processes, trace.is_win,
trace.frame_as_object_type)
if options.use_breakpad_symbols:
RemapBreakpadModules(symfiles, symbolizer,
options.only_symbolize_chrome_symbols,
trace)
else:
if trace.is_android:
if not options.output_directory:
sys.exit('The trace file appears to be from Android. Please '
'specify output directory to properly symbolize it.')
RemapAndroidFiles(symfiles, os.path.abspath(options.output_directory),
trace.library_name)
if trace.is_cast:
RemapCastFiles(symfiles, os.path.abspath(options.output_directory))
if not trace.is_chromium:
if symbolizer.is_mac:
RemapMacFiles(symfiles, options.symbol_base_directory, trace.version,
options.only_symbolize_chrome_symbols)
if symbolizer.is_win:
RemapWinFiles(symfiles, options.symbol_base_directory, trace.version,
trace.is_64bit, options.only_symbolize_chrome_symbols)
SymbolizeFiles(symfiles, symbolizer)
def FetchAndExtractBreakpadSymbols(symbol_base_directory,
breakpad_info_folder,
trace,
symbolizer,
cloud_storage_bucket):
if breakpad_info_folder:
# Using local symbols from |breakpad_info_folder|.
symbol_sub_dir = breakpad_info_folder
else:
# Fetching the symbols from GCS (OS dependent).
if trace.is_win:
folder = 'win64-pgo' if trace.is_64bit else 'win-pgo'
elif trace.is_mac:
folder = 'mac64'
elif trace.is_linux:
folder = 'linux64'
else:
raise Exception('OS not supported for Breakpad symbolization (%s/%s)' %
(trace.os, trace.version))
gsc_folder = 'desktop-*/' + trace.version + '/' + folder
gcs_file = gsc_folder + '/breakpad-info'
symbol_sub_dir = os.path.join(symbol_base_directory,
'breakpad-info_' + trace.version +
'_' + folder)
zip_path = symbol_sub_dir + '/breakpad-info.zip'
# Check whether symbols are already downloaded and extracted.
if not os.path.isdir(symbol_sub_dir):
if cloud_storage.Exists(cloud_storage_bucket, gcs_file + '.zip'):
# Some version, like mac, doesn't have the .zip extension.
gcs_file = gcs_file + '.zip'
elif not cloud_storage.Exists(cloud_storage_bucket, gcs_file):
print("Can't find symbols on GCS " + gcs_file + ".")
return False
print('Downloading symbols files from GCS, please wait.')
cloud_storage.Get(cloud_storage_bucket, gcs_file, zip_path)
with zipfile.ZipFile(zip_path, 'r') as zip_file:
zip_file.extractall(symbol_sub_dir)
os.remove(zip_path)
# Parse breakpad module header (first line) and register known modules.
for root, _, filenames in os.walk(symbol_sub_dir):
for filename in filenames:
full_filename = os.path.abspath(os.path.join(root, filename))
with open(full_filename, 'r') as file_handle:
first_line = file_handle.readline()
fragments = first_line.rstrip().split()
if fragments[0] == 'MODULE':
binary = ' '.join(fragments[4:]).lower()
module_name, extension = os.path.splitext(binary)
if extension == ".pdb":
binary = module_name
symbolizer.breakpad_modules[binary] = full_filename
return True
def OpenTraceFile(file_path, mode):
if file_path.endswith('.gz'):
return gzip.open(file_path, mode + 'b')
else:
return open(file_path, mode + 't')
def FetchAndExtractSymbolsMac(symbol_base_directory, version,
cloud_storage_bucket):
def GetLocalPath(base_dir, version):
return os.path.join(base_dir, version + ".tar.bz2")
def GetSymbolsPath(version):
return "desktop-*/" + version + "/mac64/Google Chrome.dSYM.tar.bz2"
def ExtractSymbolTarFile(symbol_sub_dir, symbol_tar_file):
os.makedirs(symbol_sub_dir)
with tarfile.open(os.path.expanduser(symbol_tar_file), "r:bz2") as tar:
tar.extractall(symbol_sub_dir)
symbol_sub_dir = os.path.join(symbol_base_directory, version)
if os.path.isdir(symbol_sub_dir):
return True
bzip_path = GetLocalPath(symbol_base_directory, version)
if not os.path.isfile(bzip_path):
if not cloud_storage.Exists(cloud_storage_bucket, GetSymbolsPath(version)):
print("Can't find symbols on GCS '%s'." % version)
return False
print("Downloading symbols files from GCS, please wait.")
cloud_storage.Get(cloud_storage_bucket, GetSymbolsPath(version), bzip_path)
ExtractSymbolTarFile(symbol_sub_dir, bzip_path)
os.remove(bzip_path)
return True
def FetchAndExtractSymbolsWin(symbol_base_directory, version, is64bit,
cloud_storage_bucket):
def DownloadAndExtractZipFile(zip_path, source, destination):
if not os.path.isfile(zip_path):
if not cloud_storage.Exists(cloud_storage_bucket, source):
print("Can't find symbols on GCS '%s'." % version)
return False
print("Downloading symbols files from GCS, please wait.")
cloud_storage.Get(cloud_storage_bucket, source, zip_path)
if not os.path.isfile(zip_path):
print("Can't download symbols on GCS.")
return False
with zipfile.ZipFile(zip_path, "r") as zip_file:
for member in zip_file.namelist():
filename = os.path.basename(member)
# Skip directories.
if not filename:
continue
# Extract archived files.
source = zip_file.open(member)
target = open(os.path.join(destination, filename), 'wb')
with source, target:
shutil.copyfileobj(source, target)
folder = "win64" if is64bit else "win"
# Clang build (M61+)
folder_suffix = "-clang"
gcs_folder = "desktop-*/" + version + "/" + folder + folder_suffix + "/"
if not cloud_storage.Exists(cloud_storage_bucket, gcs_folder):
# MSVC build (before M61)
folder_suffix = "-pgo"
gcs_folder = "desktop-*/" + version + "/" + folder + folder_suffix + "/"
symbol_sub_dir = os.path.join(symbol_base_directory,
"chrome-" + folder + "-" + version)
if os.path.isdir(symbol_sub_dir):
return True
os.makedirs(symbol_sub_dir)
DownloadAndExtractZipFile(
os.path.join(symbol_base_directory,
"chrome-" + folder + "-" + version + "-syms.zip"),
gcs_folder + "chrome-win32-syms.zip",
symbol_sub_dir)
DownloadAndExtractZipFile(
os.path.join(symbol_base_directory,
"chrome-" + folder + "-" + version + ".zip"),
gcs_folder + "chrome-" + folder + folder_suffix + ".zip",
symbol_sub_dir)
return True
# Suffix used for backup files.
BACKUP_FILE_TAG = '.BACKUP'
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument(
'file',
help='Trace file to symbolize (.json or .json.gz)')
parser.add_argument(
'--no-backup', dest='backup', action='store_false',
help="Don't create {} files".format(BACKUP_FILE_TAG))
parser.add_argument(
'--is-local-build', action='store_true',
help="Indicate that the memlog trace is from a local build of Chromium.")
parser.add_argument(
'--is-cast', action='store_true',
help="Indicate that the memlog trace is from cast devices.")
parser.add_argument(
'--frame-as-object-type',
action='store_true',
help="Indicate that output of object types should use frame name. This "
"option is specifically used for cast, running Linux.")
parser.add_argument(
'--output-directory',
help='The path to the build output directory, such as out/Debug.')
parser.add_argument(
'--only-symbolize-chrome-symbols',
action='store_true',
help='Prevents symbolization of non-Chrome [system] symbols.')
parser.add_argument(
'--cloud-storage-bucket', default='chrome-unsigned',
help="Bucket that holds symbols for official Chrome builds. "
"Used by tests, which don't have access to the default bucket.")
parser.add_argument(
'--addr2line-executable', default=None,
help="The path to the executable used to convert address to line."
"Default uses the executable found in the PATH environment variable."
"Used by tests, which don't have the executable.")
parser.add_argument(
'--use-breakpad-symbols',
action='store_true',
help='Use breakpad symbols files for symbolisation.')
parser.add_argument(
'--breakpad-symbols-directory', default=None,
help='A path to a directory containing breakpad symbols.')
home_dir = os.path.expanduser('~')
default_dir = os.path.join(home_dir, "symbols")
parser.add_argument(
'--symbol-base-directory',
default=default_dir,
help='Directory where symbols are downloaded and cached.')
options = parser.parse_args(args)
if options.frame_as_object_type and not options.is_cast:
sys.exit("Frame-as-object-type is only supported for cast.")
symbolizer = Symbolizer(options.addr2line_executable)
if (symbolizer.symbolizer_path is None and
not options.use_breakpad_symbols):
sys.exit("Can't symbolize - no %s in PATH." % symbolizer.binary)
trace_file_path = options.file
print('Reading trace file...')
with OpenTraceFile(trace_file_path, 'r') as trace_file:
trace = Trace(json.load(trace_file), options.frame_as_object_type)
print('Trace loaded for %s/%s' % (trace.os, trace.version))
trace.is_chromium = options.is_local_build
trace.is_cast = options.is_cast
# Perform some sanity checks.
if (trace.is_win and sys.platform != 'win32' and
not options.use_breakpad_symbols):
print("Cannot symbolize a windows trace on this architecture!")
return False
# If the trace is from Chromium, assume that symbols are already present.
# Otherwise the trace is from Google Chrome. Assume that this is not a local
# build of Google Chrome with symbols, and that we need to fetch symbols
# from gcs.
if trace.is_chromium or options.output_directory:
if options.use_breakpad_symbols and options.breakpad_symbols_directory:
# Local build with local symbols.
FetchAndExtractBreakpadSymbols(
options.symbol_base_directory,
options.breakpad_symbols_directory,
trace, symbolizer,
options.cloud_storage_bucket)
else:
has_symbols = False
if options.use_breakpad_symbols:
# Official build, using Breakpad symbolization.
has_symbols = FetchAndExtractBreakpadSymbols(
options.symbol_base_directory,
options.breakpad_symbols_directory,
trace, symbolizer,
options.cloud_storage_bucket)
else:
# Official build, using native symbolization.
if symbolizer.is_mac:
has_symbols = FetchAndExtractSymbolsMac(options.symbol_base_directory,
trace.version,
options.cloud_storage_bucket)
elif symbolizer.is_win:
has_symbols = FetchAndExtractSymbolsWin(options.symbol_base_directory,
trace.version, trace.is_64bit,
options.cloud_storage_bucket)
else:
raise Exception('OS not supported for native symbolization (%s/%s)' %
(trace.os, trace.version))
if not has_symbols:
print('Cannot fetch symbols from GCS')
return False
SymbolizeTrace(options, trace, symbolizer)
if trace.modified:
trace.ApplyModifications()
if options.backup:
backup_file_path = trace_file_path + BACKUP_FILE_TAG
print('Backing up trace file to {}'.format(backup_file_path))
os.rename(trace_file_path, backup_file_path)
print('Updating the trace file...')
with OpenTraceFile(trace_file_path, 'w') as trace_file:
trace_file.write(json.dumps(trace.node))
else:
print('No modifications were made - not updating the trace file.')
return True
if __name__ == '__main__':
main(sys.argv[1:])
|
bsd-3-clause
|
petercable/mi-dataset
|
mi/dataset/parser/adcpt_acfgm_dcl_pd0.py
|
3
|
5943
|
#!/usr/bin/env python
"""
@package mi.dataset.parser.adcpt_acfgm_dcl_pd0
@file marine-integrations/mi/dataset/parser/adcpt_acfgm_dcl_pd0.py
@author Jeff Roy
@brief Particle and Parser classes for the adcpt_acfgm_dcl_pd0 drivers
The particles are parsed by the common PD0 Parser and
Abstract particle class in file adcp_pd0.py
"""
import binascii
import re
import ntplib
import mi.dataset.parser.adcp_pd0 as adcp_pd0
from mi.core.common import BaseEnum
from mi.core.exceptions import RecoverableSampleException
from mi.core.instrument.data_particle import DataParticleKey
from mi.core.log import get_logger
from mi.dataset.dataset_parser import Parser
from mi.dataset.parser import utilities
from mi.dataset.parser.pd0_parser import AdcpPd0Record, PD0ParsingException, InsufficientDataException
__author__ = 'Jeff Roy'
__license__ = 'Apache 2.0'
log = get_logger()
DATA_RE = re.compile(
r'(\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2}\.\d{3}) (\[adcpt:DLOGP4\]:)?([0-9A-F]+?)((?=\d{4}/)|(?=\r)|(?=\n))')
class DclKey(BaseEnum):
# Enumerations for the additional DCL parameters in the adcpt_acfgm_pd0_dcl streams
# The remainder of the streams are identical to the adcps_jln streams and
# are handled by the base AdcpPd0DataParticle class
# this enumeration is also used for the dcl_data_dict
# of the particle class constructor
# so includes the additional enumeration 'PD0_DATA'
DCL_CONTROLLER_TIMESTAMP = 'dcl_controller_timestamp'
DCL_CONTROLLER_STARTING_TIMESTAMP = 'dcl_controller_starting_timestamp'
PD0_DATA = 'pd0_data'
PD0_START_STRING = '\x7f\x7f'
class AdcptAcfgmDclPd0Parser(Parser):
def __init__(self,
config,
stream_handle,
exception_callback, # shouldn't be optional anymore
state_callback=None, # No longer used
publish_callback=None): # No longer used
self._file_parsed = False
self._record_buffer = []
self._last_values = {}
super(AdcptAcfgmDclPd0Parser, self).__init__(config,
stream_handle,
None, # State no longer used
None, # Sieve function no longer used
state_callback,
publish_callback,
exception_callback)
def _changed(self, particle):
particle_dict = particle.generate_dict()
stream = particle_dict.get('stream_name')
values = particle_dict.get('values')
last_values = self._last_values.get(stream)
if values == last_values:
return False
self._last_values[stream] = values
return True
def _parse_file(self):
pd0_buffer = ''
ts = None
count = 0
# Go through each line in the file
for line in self._stream_handle:
log.trace('line: %r', line)
records = DATA_RE.findall(line)
if records:
if ts is None:
ts = records[0][0]
data = ''.join([r[2] for r in records])
pd0_buffer += binascii.unhexlify(data)
# Look for start of a PD0 ensemble. If we have a particle queued up, ship it
# then reset our state.
if pd0_buffer.startswith(PD0_START_STRING):
try:
pd0 = AdcpPd0Record(pd0_buffer)
count += 1
self._create_particles(pd0, ts)
ts = None
pd0_buffer = ''
except InsufficientDataException:
continue
except PD0ParsingException as e:
self._exception_callback(RecoverableSampleException('Unable to parse PD0: %s' % e))
# provide an indication that the file was parsed
self._file_parsed = True
log.debug('PARSE_FILE create %s particles', len(self._record_buffer))
def _create_particles(self, pd0, ts):
utc_time = utilities.dcl_controller_timestamp_to_utc_time(ts)
utc_time = ntplib.system_to_ntp_time(utc_time)
velocity = adcp_pd0.VelocityEarth(pd0, port_timestamp=utc_time,
preferred_timestamp=DataParticleKey.PORT_TIMESTAMP)
self._record_buffer.append(velocity)
config = adcp_pd0.AdcpsConfig(pd0, port_timestamp=utc_time,
preferred_timestamp=DataParticleKey.PORT_TIMESTAMP)
engineering = adcp_pd0.AdcpsEngineering(pd0, port_timestamp=utc_time,
preferred_timestamp=DataParticleKey.PORT_TIMESTAMP)
for particle in [config, engineering]:
if self._changed(particle):
self._record_buffer.append(particle)
def get_records(self, num_records_requested=1):
"""
Returns a list of particles that is equal to the num_records_requested when there are that many particles
are available or a list of particles less than the num_records_requested when there are fewer than
num_records_requested available.
"""
particles_to_return = []
if num_records_requested > 0:
# If the file was not read, let's parse it
if self._file_parsed is False:
self._parse_file()
# Iterate through the particles returned, and pop them off from the beginning of the record
# buffer to the end
while len(particles_to_return) < num_records_requested and len(self._record_buffer) > 0:
particles_to_return.append(self._record_buffer.pop(0))
return particles_to_return
|
bsd-2-clause
|
RayRuizhiLiao/ITK_4D
|
Utilities/Doxygen/mcdoc.py
|
1
|
5816
|
#!/usr/bin/env python
import sys, os, re, glob
try:
import io
except ImportError:
import cStringIO as io
def usage():
sys.stdout.write( """usage: mdoc.py set group file [files...]
Add the tag "\\ingroup group" to all the doxygen comment with a \\class
tag in it.
usage: mdoc.py check group file [files...]
Check that the tag "\\ingroup group" is in all the doxygen comment with a \\class
tag in it. If the tag is not there, a warning is displayed with the file name, the
line number and the class name. The return value is 0 when all the doxygen comments
have the tag, and 1 when at least one doxygen comment don't have it.
usage: mdoc.py massive-set [ITK-source]
Add the tag "\\ingroup module" to all the headers in ITK, where 'module' is the
module name of the header.
usage: mdoc.py massive-check [ITK-source]
Check that all the headers in ITK have their module name in their \\ingroup tag.
As for 'check', a warning is displayed if the tag is missing and 1 is returned.
\n""")
def setGroup( fname, group ):
# sys.stderr.write("Processing "+ fname +"\n")
f = open( fname, "r" )
out = io.StringIO()
# load everything in memory
fcontent = f.read()
f.close()
# now parse all the doxygen fields
last = 0
for m in re.finditer(r"/\*\*(.*?)\*/", fcontent, re.DOTALL):
# write what is before the doxygen field to the output
out.write(fcontent[last:m.start(1)])
last = m.end(1)
dcontent = m.group(1)
# we don't care about doxygen fields not about a class
if r"\class" in dcontent and dcontent != " \class classname ":
# do we have a line with the expected content?
if re.search(r"\ingroup .*"+group+"(\s|$)", dcontent, re.MULTILINE):
# yes - just keep the content unchanged
out.write(dcontent)
else:
# add the expected group
if "\n" in dcontent:
# this is a multiline content. Find the indent
indent = re.search("( *)(\*|$)", dcontent).group(1)
lastLine = dcontent.splitlines()[-1]
if re.match(r'^ *$', lastLine):
out.write(dcontent+"* \\ingroup "+group+"\n"+indent)
else:
out.write(dcontent.rstrip()+"\n"+indent+"* \\ingroup "+group+"\n"+indent)
else:
out.write(dcontent+" \\ingroup "+group+" ")
else:
out.write(dcontent)
out.write(fcontent[last:])
# we can save the content to the original file
f = open( fname, "w" )
f.write( out.getvalue() )
f.close()
def checkGroup( fname, group ):
# sys.stderr.write("Checking"+ fname + "\n")
f = open( fname, "r" )
# load everything in memory
fcontent = f.read()
f.close()
# now parse all the doxygen fields
ret = 0
for m in re.finditer(r"/\*\*(.*?)\*/", fcontent, re.DOTALL):
dcontent = m.group(1)
# we don't care about doxygen fields not about a class
if r"\class" in dcontent and dcontent != " \class classname ":
# do we have a line with the expected content?
if not re.search(r"\\ingroup .*"+group+"(\s|$)", dcontent, re.MULTILINE):
# get class name and the line for debug output
cname = re.search(r"\\class +([^ ]*)", dcontent).group(1).strip()
line = len(fcontent[:m.start(1)].splitlines())
sys.stderr.write(r'%s:%s: error: "\ingroup %s" not set in class %s.' % (fname, line, group, cname) +"\n")
ret = 1
return ret
def main():
# first arg is the command
command = sys.argv[1]
if command == "set":
if len(sys.argv) < 4:
usage()
return 1
# second arg is the module name, and the rest are the files to process
module = sys.argv[2]
files = sys.argv[3:]
for fname in files:
setGroup(fname, module)
return 0
elif command == "massive-set":
if len(sys.argv) < 2:
usage()
return 1
if len(sys.argv) >= 3:
d = sys.argv[2]
else:
d = sys.path[0]+"/../.."
cmm = os.path.abspath(d+"/*/*/*/itk-module.cmake")
for fname in glob.glob(cmm):
f = file(fname, "r")
mcontent = f.read()
f.close()
module = re.search(r"itk_module\(([^ )]+)", mcontent).group(1)
dname = os.path.dirname(fname)
for fname2 in glob.glob(dname+"/include/*.h"):
setGroup(fname2, module)
return 0
elif command == "check":
if len(sys.argv) < 4:
usage()
return 1
# second arg is the module name, and the rest are the files to process
module = sys.argv[2]
files = sys.argv[3:]
ret = 0
count = 0
for fname in files:
if os.path.isdir(fname):
for fname2 in glob.glob(fname+"/*.h"):
count += 1
ret = max( ret, checkGroup(fname2, module) )
else:
count += 1
ret = max( ret, checkGroup(fname, module) )
sys.stderr.write(str(count)+" headers checked."+"\n")
return ret
elif command == "massive-check":
if len(sys.argv) < 2:
usage()
return 1
if len(sys.argv) >= 3:
d = sys.argv[2]
else:
d = sys.path[0]+"/../.."
cmm = os.path.abspath(d+"/*/*/*/itk-module.cmake")
ret = 0
count = 0
for fname in glob.glob(cmm):
f = file(fname, "r")
mcontent = f.read()
f.close()
module = re.search(r"itk_module\(([^ )]+)", mcontent).group(1)
dname = os.path.dirname(fname)
for fname2 in glob.glob(dname+"/include/*.h"):
count += 1
ret = max( ret, checkGroup(fname2, module) )
sys.stderr.write(str(count) + " headers checked."+"\n")
return ret
else:
sys.stderr.write("Unknown command" + command +"\n")
usage()
return 1
if __name__ == "__main__":
ret = main()
sys.exit(ret)
|
apache-2.0
|
RohitDas/cubeproject
|
lib/django/contrib/gis/db/backends/spatialite/schema.py
|
518
|
6882
|
from django.db.backends.sqlite3.schema import DatabaseSchemaEditor
from django.db.utils import DatabaseError
class SpatialiteSchemaEditor(DatabaseSchemaEditor):
sql_add_geometry_column = (
"SELECT AddGeometryColumn(%(table)s, %(column)s, %(srid)s, "
"%(geom_type)s, %(dim)s, %(null)s)"
)
sql_add_spatial_index = "SELECT CreateSpatialIndex(%(table)s, %(column)s)"
sql_drop_spatial_index = "DROP TABLE idx_%(table)s_%(column)s"
sql_remove_geometry_metadata = "SELECT DiscardGeometryColumn(%(table)s, %(column)s)"
sql_discard_geometry_columns = "DELETE FROM %(geom_table)s WHERE f_table_name = %(table)s"
sql_update_geometry_columns = (
"UPDATE %(geom_table)s SET f_table_name = %(new_table)s "
"WHERE f_table_name = %(old_table)s"
)
geometry_tables = [
"geometry_columns",
"geometry_columns_auth",
"geometry_columns_time",
"geometry_columns_statistics",
]
def __init__(self, *args, **kwargs):
super(SpatialiteSchemaEditor, self).__init__(*args, **kwargs)
self.geometry_sql = []
def geo_quote_name(self, name):
return self.connection.ops.geo_quote_name(name)
def column_sql(self, model, field, include_default=False):
from django.contrib.gis.db.models.fields import GeometryField
if not isinstance(field, GeometryField):
return super(SpatialiteSchemaEditor, self).column_sql(model, field, include_default)
# Geometry columns are created by the `AddGeometryColumn` function
self.geometry_sql.append(
self.sql_add_geometry_column % {
"table": self.geo_quote_name(model._meta.db_table),
"column": self.geo_quote_name(field.column),
"srid": field.srid,
"geom_type": self.geo_quote_name(field.geom_type),
"dim": field.dim,
"null": int(not field.null),
}
)
if field.spatial_index:
self.geometry_sql.append(
self.sql_add_spatial_index % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
}
)
return None, None
def remove_geometry_metadata(self, model, field):
self.execute(
self.sql_remove_geometry_metadata % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
}
)
self.execute(
self.sql_drop_spatial_index % {
"table": model._meta.db_table,
"column": field.column,
}
)
def create_model(self, model):
super(SpatialiteSchemaEditor, self).create_model(model)
# Create geometry columns
for sql in self.geometry_sql:
self.execute(sql)
self.geometry_sql = []
def delete_model(self, model, **kwargs):
from django.contrib.gis.db.models.fields import GeometryField
# Drop spatial metadata (dropping the table does not automatically remove them)
for field in model._meta.local_fields:
if isinstance(field, GeometryField):
self.remove_geometry_metadata(model, field)
# Make sure all geom stuff is gone
for geom_table in self.geometry_tables:
try:
self.execute(
self.sql_discard_geometry_columns % {
"geom_table": geom_table,
"table": self.quote_name(model._meta.db_table),
}
)
except DatabaseError:
pass
super(SpatialiteSchemaEditor, self).delete_model(model, **kwargs)
def add_field(self, model, field):
from django.contrib.gis.db.models.fields import GeometryField
if isinstance(field, GeometryField):
# Populate self.geometry_sql
self.column_sql(model, field)
for sql in self.geometry_sql:
self.execute(sql)
self.geometry_sql = []
else:
super(SpatialiteSchemaEditor, self).add_field(model, field)
def remove_field(self, model, field):
from django.contrib.gis.db.models.fields import GeometryField
# NOTE: If the field is a geometry field, the table is just recreated,
# the parent's remove_field can't be used cause it will skip the
# recreation if the field does not have a database type. Geometry fields
# do not have a db type cause they are added and removed via stored
# procedures.
if isinstance(field, GeometryField):
self._remake_table(model, delete_fields=[field])
else:
super(SpatialiteSchemaEditor, self).remove_field(model, field)
def alter_db_table(self, model, old_db_table, new_db_table):
from django.contrib.gis.db.models.fields import GeometryField
# Remove geometry-ness from temp table
for field in model._meta.local_fields:
if isinstance(field, GeometryField):
self.execute(
self.sql_remove_geometry_metadata % {
"table": self.quote_name(old_db_table),
"column": self.quote_name(field.column),
}
)
# Alter table
super(SpatialiteSchemaEditor, self).alter_db_table(model, old_db_table, new_db_table)
# Repoint any straggler names
for geom_table in self.geometry_tables:
try:
self.execute(
self.sql_update_geometry_columns % {
"geom_table": geom_table,
"old_table": self.quote_name(old_db_table),
"new_table": self.quote_name(new_db_table),
}
)
except DatabaseError:
pass
# Re-add geometry-ness and rename spatial index tables
for field in model._meta.local_fields:
if isinstance(field, GeometryField):
self.execute(self.sql_add_geometry_column % {
"table": self.geo_quote_name(new_db_table),
"column": self.geo_quote_name(field.column),
"srid": field.srid,
"geom_type": self.geo_quote_name(field.geom_type),
"dim": field.dim,
"null": int(not field.null),
})
if getattr(field, 'spatial_index', False):
self.execute(self.sql_rename_table % {
"old_table": self.quote_name("idx_%s_%s" % (old_db_table, field.column)),
"new_table": self.quote_name("idx_%s_%s" % (new_db_table, field.column)),
})
|
bsd-3-clause
|
BlueBrain/ITK
|
Modules/ThirdParty/pygccxml/src/pygccxml/binary_parsers/undname.py
|
12
|
10352
|
# Copyright 2014 Insight Software Consortium.
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
"""
provides low-level functionality, needed to undecorate\demangle compiler
generated unique names and map them to the declarations
On Windows:
ctypes package is used to call `UnDecorateSymbolName` function from
`dbghelp.dll`
On Linux:
"nm" utility is used.
"""
import os
import re
import ctypes
from .. import declarations
class UNDECORATE_NAME_OPTIONS:
"""defines few constants for `UnDecorateSymbolName` function"""
UNDNAME_COMPLETE = 0x0000 # Enables full undecoration.
# Removes leading underscores from Microsoft extended keywords.
UNDNAME_NO_LEADING_UNDERSCORES = 0x0001
# Disables expansion of Microsoft extended keywords.
UNDNAME_NO_MS_KEYWORDS = 0x0002
# Disables expansion of return type for primary declaration.
UNDNAME_NO_FUNCTION_RETURNS = 0x0004
# Disables expansion of the declaration model.
UNDNAME_NO_ALLOCATION_MODEL = 0x0008
# Disables expansion of the declaration language specifier.
UNDNAME_NO_ALLOCATION_LANGUAGE = 0x0010
UNDNAME_RESERVED1 = 0x0020 # RESERVED.
UNDNAME_RESERVED2 = 0x0040 # RESERVED.
UNDNAME_NO_THISTYPE = 0x0060 # Disables all modifiers on the this type.
# Disables expansion of access specifiers for members.
UNDNAME_NO_ACCESS_SPECIFIERS = 0x0080
# Disables expansion of "throw-signatures" for functions and pointers to
# functions.
UNDNAME_NO_THROW_SIGNATURES = 0x0100
# Disables expansion of static or virtual members.
UNDNAME_NO_MEMBER_TYPE = 0x0200
# Disables expansion of the Microsoft model for UDT returns.
UNDNAME_NO_RETURN_UDT_MODEL = 0x0400
UNDNAME_32_BIT_DECODE = 0x0800 # Undecorates 32-bit decorated names.
# Gets only the name for primary declaration; returns just [scope::]name.
# Expands template params.
UNDNAME_NAME_ONLY = 0x1000
# Input is just a type encoding; composes an abstract declarator.
UNDNAME_TYPE_ONLY = 0x2000
# The real template parameters are available.
UNDNAME_HAVE_PARAMETERS = 0x4000
UNDNAME_NO_ECSU = 0x8000 # Suppresses enum/class/struct/union.
# Suppresses check for valid identifier characters.
UNDNAME_NO_IDENT_CHAR_CHECK = 0x10000
UNDNAME_NO_PTR64 = 0x20000 # Does not include ptr64 in output.
UNDNAME_SCOPES_ONLY = UNDNAME_NO_LEADING_UNDERSCORES \
| UNDNAME_NO_MS_KEYWORDS \
| UNDNAME_NO_FUNCTION_RETURNS \
| UNDNAME_NO_ALLOCATION_MODEL \
| UNDNAME_NO_ALLOCATION_LANGUAGE \
| UNDNAME_NO_ACCESS_SPECIFIERS \
| UNDNAME_NO_THROW_SIGNATURES \
| UNDNAME_NO_MEMBER_TYPE \
| UNDNAME_NO_ECSU \
| UNDNAME_NO_IDENT_CHAR_CHECK
SHORT_UNIQUE_NAME = UNDNAME_NO_MS_KEYWORDS \
| UNDNAME_NO_ACCESS_SPECIFIERS | UNDNAME_NO_ECSU
class undname_creator_t:
"""implementation details - should not be used directly
formats declarations string representation and exported symbols, so they
could be matched later.
The class formats variables, free and member functions, symbols exported
from .dll, .map and .so files.
On Windows, the class works with unique name produced by MSVC compiler and
with undecorated names produced by `dbghelp.dll`
On Linux, the class works with mangled names produced by GCC-XML
( GCC 4.2 ) compiler and demangled name produced by "nm" utility.
"""
def __init__(self):
if 'nt' == os.name:
import ctypes.wintypes
self.__undname = ctypes.windll.dbghelp.UnDecorateSymbolName
self.__undname.argtypes = [
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_uint,
ctypes.c_uint]
self.__clean_ecsu = (
re.compile(r'(?P<startswith>^|\W)(?:(class|enum|struct|union)\s)'),
'%(startswith)s')
self.__fundamental_types = (
('short unsigned int',
'unsigned short'),
('short int',
'short'),
('long int',
'long'),
('long unsigned int',
'unsigned long'))
self.__calling_conventions = (
re.compile((
r'(?P<startswith>^|\s)(?:__(cdecl|clrcall|stdcall|fastcall' +
'|thiscall)\s)')), '%(startswith)s')
def normalize_undecorated(self, undname, options=None):
if options is None:
options = UNDECORATE_NAME_OPTIONS.SHORT_UNIQUE_NAME
if UNDECORATE_NAME_OPTIONS.UNDNAME_NO_ECSU & options:
expr, substitute = self.__clean_ecsu
undname = expr.sub(lambda m: substitute % m.groupdict(), undname)
if UNDECORATE_NAME_OPTIONS.UNDNAME_NO_ACCESS_SPECIFIERS & options:
for prefix in ('public: ', 'private: ', 'protected: '):
if undname.startswith(prefix):
undname = undname[len(prefix):]
break
if UNDECORATE_NAME_OPTIONS.UNDNAME_NO_MS_KEYWORDS & options:
expr, substitute = self.__calling_conventions
undname = expr.sub(lambda m: substitute % m.groupdict(), undname)
return undname.strip()
def undecorate_blob(self, name, options=None):
if options is None:
options = UNDECORATE_NAME_OPTIONS.SHORT_UNIQUE_NAME
buffer = ctypes.create_string_buffer(1024 * 16)
res = self.__undname(str(name), buffer, ctypes.sizeof(buffer), options)
if res:
return self.normalize_undecorated(str(buffer[:res]))
else:
return name
def __remove_leading_scope(self, s):
if s and s.startswith('::'):
return s[2:]
else:
return s
def __format_type_as_undecorated(self, type_, is_argument, hint):
result = []
type_ = declarations.remove_alias(type_)
if declarations.is_array(type_):
result.append(declarations.array_item_type(type_).decl_string)
result.append('*')
if is_argument:
result.append('const')
else:
result.append(self.__remove_leading_scope(type_.decl_string))
result = ' '.join(result)
if hint == 'nm':
for x in ('*', '&'):
result = result.replace(' ' + x, x)
return result
def __normalize(self, name):
for what, with_ in self.__fundamental_types:
name = name.replace(what, with_)
name = name.replace(', ', ',')
return name
def format_argtypes(self, argtypes, hint):
if not argtypes:
if hint == 'msvc':
return 'void'
else:
return ''
else:
formater = lambda type_: self.__format_type_as_undecorated(
type_, True, hint)
argsep = ','
if hint == 'nm':
# ugly hack, later, I will replace ', ' with ',', so single
# space will still exist
argsep = ', '
return argsep.join(map(formater, argtypes))
def format_calldef(self, calldef, hint):
calldef_type = calldef.function_type()
result = []
is_mem_fun = isinstance(calldef, declarations.member_calldef_t)
if is_mem_fun and hint == 'msvc' and calldef.virtuality != \
declarations.VIRTUALITY_TYPES.NOT_VIRTUAL:
result.append('virtual ')
if is_mem_fun and hint == 'msvc'and calldef.has_static:
result.append('static ')
if hint == 'msvc' and calldef_type.return_type:
# nm doesn't dump return type information
result.append(
self.__format_type_as_undecorated(
calldef.return_type, False, hint))
result.append(' ')
if is_mem_fun:
result.append(
self.__remove_leading_scope(
calldef.parent.decl_string) + '::')
result.append(calldef.name)
if isinstance(
calldef, (declarations.constructor_t, declarations.destructor_t)) \
and declarations.templates.is_instantiation(calldef.parent.name):
if hint == 'msvc':
result.append('<%s>' % ','.join(
declarations.templates.args(calldef.parent.name)))
result.append('(%s)' % self.format_argtypes(
calldef_type.arguments_types, hint))
if is_mem_fun and calldef.has_const:
if hint == 'nm':
result.append(' ')
result.append('const')
return ''.join(result)
def format_var(self, decl, hint):
result = []
is_mem_var = isinstance(decl.parent, declarations.class_t)
if is_mem_var and decl.type_qualifiers.has_static and hint == 'msvc':
result.append('static ')
if hint == 'msvc':
result.append(
self.__format_type_as_undecorated(decl.type, False, hint))
result.append(' ')
if is_mem_var:
result.append(
self.__remove_leading_scope(decl.parent.decl_string) + '::')
result.append(decl.name)
return ''.join(result)
def format_decl(self, decl, hint=None):
"""returns string, which contains full function name formatted exactly
as result of `dbghelp.UnDecorateSymbolName`, with
UNDNAME_NO_MS_KEYWORDS | UNDNAME_NO_ACCESS_SPECIFIERS | UNDNAME_NO_ECSU
options.
Different compilers/utilities undecorate/demangle mangled string
( unique names ) in a different way.
`hint` argument will tell pygccxml how to format declarations, so they
could be mapped later to the blobs.
The valid options are: "msvc" and "nm".
"""
name = None
if hint is None:
if 'nt' == os.name:
hint = 'msvc'
else:
hint = 'nm'
if isinstance(decl, declarations.calldef_t):
name = self.format_calldef(decl, hint)
elif isinstance(decl, declarations.variable_t):
name = self.format_var(decl, hint)
else:
raise NotImplementedError()
return self.__normalize(name)
|
apache-2.0
|
rail/treeherder
|
tests/webapp/api/test_note_api.py
|
11
|
5277
|
import json
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from rest_framework.test import APIClient
def test_note_list(webapp, sample_notes, jm):
"""
test retrieving a list of notes from the note-list endpoint
"""
job_id = jm.get_job_list(0, 1)[0]["id"]
resp = webapp.get(
reverse("note-list", kwargs={"project": jm.project}),
{"job_id": job_id}
)
assert resp.status_int == 200
assert isinstance(resp.json, list)
note_list = resp.json
assert set(note_list[0].keys()) == {
'note_timestamp',
'job_id',
'who',
'failure_classification_id',
'note',
'active_status',
'id'
}
# remove fields we don't want to compare
for note in note_list:
del(note["note_timestamp"])
del(note["id"])
assert len(note_list) == 2
exp_notes = [
{
"job_id": job_id,
"failure_classification_id": 1,
"who": "kellyclarkson",
"note": "you look like a man-o-lantern",
"active_status": "active",
},
{
"job_id": job_id,
"failure_classification_id": 0,
"who": "kellyclarkson",
"note": "you look like a man-o-lantern",
"active_status": "active",
}
]
import pprint
assert exp_notes == note_list, pprint.pprint({
"exp": exp_notes,
"act": note_list
})
jm.disconnect()
def test_note_detail(webapp, sample_notes, jm):
"""
test retrieving a single note from the notes-detail
endpoint.
"""
job = jm.get_job_list(0, 1)[0]
note = jm.get_job_note_list(job_id=job["id"])[0]
resp = webapp.get(
reverse("note-detail",
kwargs={"project": jm.project, "pk": int(note["id"])})
)
assert resp.status_int == 200
assert isinstance(resp.json, dict)
assert resp.json["id"] == note["id"]
assert set(resp.json.keys()) == set([
'note_timestamp',
'job_id',
'who',
'failure_classification_id',
'note',
'active_status',
'id'
])
jm.disconnect()
def test_note_detail_not_found(webapp, jm):
"""
test retrieving a HTTP 404 from the note-detail
endpoint.
"""
resp = webapp.get(
reverse("note-detail",
kwargs={"project": jm.project, "pk": -32767}),
expect_errors=True
)
assert resp.status_int == 404
jm.disconnect()
def test_note_detail_bad_project(webapp, jm):
"""
test retrieving a HTTP 404 from the note-detail
endpoint.
"""
resp = webapp.get(
reverse("note-detail",
kwargs={"project": "foo", "pk": -32767}),
expect_errors=True
)
assert resp.status_int == 404
assert resp.json == {"detail": "No project with name foo"}
jm.disconnect()
def test_create_note(webapp, eleven_jobs_stored, mock_message_broker, jm):
"""
test creating a single note via endpoint when authenticated
"""
client = APIClient()
user = User.objects.create(username="MyName", email="[email protected]")
client.force_authenticate(user=user)
job = jm.get_job_list(0, 1)[0]
resp = client.post(
reverse("note-list", kwargs={"project": jm.project}),
{
"job_id": job["id"],
"failure_classification_id": 2,
"who": "kelly clarkson",
"note": "you look like a man-o-lantern"
}
)
user.delete()
assert resp.status_code == 200
content = json.loads(resp.content)
assert content['message'] == 'note stored for job %s' % job["id"]
note_list = jm.get_job_note_list(job_id=job["id"])
del(note_list[0]["note_timestamp"])
assert note_list[0] == {
u'job_id': job["id"],
u'who': u'[email protected]',
u'failure_classification_id': 2L,
u'note': u'you look like a man-o-lantern',
u'active_status': u'active',
u'id': 1L
}
jm.disconnect()
def test_create_note_no_auth(eleven_jobs_stored, jm):
"""
test creating a single note via endpoint when not authenticated
gets a 403 Forbidden
"""
client = APIClient()
user = User.objects.create(username="MyName")
job = jm.get_job_list(0, 1)[0]
resp = client.post(
reverse("note-list", kwargs={"project": jm.project}),
{
"job_id": job["id"],
"failure_classification_id": 2,
"who": "kelly clarkson",
"note": "you look like a man-o-lantern"
}
)
user.delete()
assert resp.status_code == 403
def test_delete_note(webapp, sample_notes, mock_message_broker, jm):
"""
test creating a single note via endpoint
"""
client = APIClient()
user = User.objects.create(username="MyName", is_staff=True)
client.force_authenticate(user=user)
notes = jm.get_job_note_list(job_id=1)
resp = client.delete(
reverse("note-detail", kwargs={"project": jm.project, "pk": notes[0]['id']}),
)
new_notes = jm.get_job_note_list(job_id=1)
user.delete()
assert resp.status_code == 200, resp
assert len(new_notes) == len(notes) - 1
jm.disconnect()
|
mpl-2.0
|
grschafer/alacrity
|
alacrity/spritesheets/hero_icons_to_sheet.py
|
1
|
1432
|
import os
from PIL import Image
import dota_file_parser
# data from dota2 files: resource/flash3/images/heroes
print 'Expecting hero icons to be in "heroes" (e.g. "heroes/antimage.png")'
print 'Expecting hero data to be in current folder (e.g. "npc_heroes.txt")'
hero_data = dota_file_parser.parse('npc_heroes.txt')
hero_list = {k:v['HeroID'] for k,v in hero_data['DOTAHeroes'].iteritems() \
if k.startswith('npc_dota_hero') and
'HeroID' in v}
width = 128/2 # fullsize: 128
height = 72/2 # fullsize: 72
filename = "small_hero_sheet"
images = []
for name,hero_id in hero_list.iteritems():
# npc_dota_hero_ = 14 characters
name = name[14:]
img_name = os.path.join('heroes', name + '.png')
try:
im = Image.open(img_name)
im.thumbnail((width, height), Image.ANTIALIAS)
images.append((hero_id, im))
except IOError as e:
print e
max_id = max(hero_list.values())
sheet_size = ((max_id + 1) * width, height)
sheet = Image.new("RGB", sheet_size)
for hero_id,img in images:
sheet.paste(img, (hero_id * width, 0))
sheet.save('{}.png'.format(filename))
# make CSS
with open('{}.css.erb'.format(filename), 'wb') as f:
for name,hero_id in hero_list.iteritems():
f.write(""".{}-icon {{ background: url('<%= asset_path "{}.png" %>') no-repeat {}px 0px; width: {}px; height: {}px; }}\n""".format(name, filename, -hero_id * width, width, height))
|
mit
|
chaimleib/intervaltree
|
test/intervaltree_methods/restructure_test.py
|
1
|
17270
|
"""
intervaltree: A mutable, self-balancing interval tree for Python 2 and 3.
Queries may be by point, by range overlap, or by range envelopment.
Test module: IntervalTree, Special methods
Copyright 2013-2018 Chaim Leib Halbert
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from intervaltree import Interval, IntervalTree
import pytest
from test import data
try:
import cPickle as pickle
except ImportError:
import pickle
# -----------------------------------------------------------------------------
# REMOVAL
# -----------------------------------------------------------------------------
def test_emptying_partial():
t = IntervalTree.from_tuples(data.ivs1.data)
assert t[7:]
t.remove_overlap(7, t.end())
assert not t[7:]
t = IntervalTree.from_tuples(data.ivs1.data)
assert t[:7]
t.remove_overlap(t.begin(), 7)
assert not t[:7]
def test_remove_overlap():
t = IntervalTree.from_tuples(data.ivs1.data)
assert t[1]
t.remove_overlap(1)
assert not t[1]
t.verify()
assert t[8]
t.remove_overlap(8)
assert not t[8]
t.verify()
# -----------------------------------------------------------------------------
# MERGE_OVERLAPS
# -----------------------------------------------------------------------------
def test_merge_overlaps_empty():
t = IntervalTree()
t.merge_overlaps()
t.verify()
assert len(t) == 0
def test_merge_overlaps_gapless():
# default strict=True
t = IntervalTree.from_tuples(data.ivs2.data)
t.merge_overlaps()
t.verify()
assert [(iv.begin, iv.end, iv.data) for iv in sorted(t)] == data.ivs2.data
# strict=False
t = IntervalTree.from_tuples(data.ivs2.data)
rng = t.range()
t.merge_overlaps(strict=False)
t.verify()
assert len(t) == 1
assert t.pop() == rng
def test_merge_overlaps_with_gap():
t = IntervalTree.from_tuples(data.ivs1.data)
t.merge_overlaps()
t.verify()
assert len(t) == 2
assert t == IntervalTree([Interval(1, 2, '[1,2)'), Interval(4, 15)])
def test_merge_overlaps_reducer_wo_initializer():
def reducer(old, new):
return "%s, %s" % (old, new)
# empty tree
e = IntervalTree()
e.merge_overlaps(data_reducer=reducer)
e.verify()
assert not e
# one Interval in tree
o = IntervalTree.from_tuples([(1, 2, 'hello')])
o.merge_overlaps(data_reducer=reducer)
o.verify()
assert len(o) == 1
assert sorted(o) == [Interval(1, 2, 'hello')]
# many Intervals in tree, with gap
t = IntervalTree.from_tuples(data.ivs1.data)
t.merge_overlaps(data_reducer=reducer)
t.verify()
assert len(t) == 2
assert sorted(t) == [
Interval(1, 2, '[1,2)'),
Interval(4, 15, '[4,7), [5,9), [6,10), [8,10), [8,15), [10,12), [12,14), [14,15)')
]
def test_merge_overlaps_reducer_with_initializer():
def reducer(old, new):
return old + [new]
# empty tree
e = IntervalTree()
e.merge_overlaps(data_reducer=reducer, data_initializer=[])
e.verify()
assert not e
# one Interval in tree
o = IntervalTree.from_tuples([(1, 2, 'hello')])
o.merge_overlaps(data_reducer=reducer, data_initializer=[])
o.verify()
assert len(o) == 1
assert sorted(o) == [Interval(1, 2, ['hello'])]
# many Intervals in tree, with gap
t = IntervalTree.from_tuples(data.ivs1.data)
t.merge_overlaps(data_reducer=reducer, data_initializer=[])
t.verify()
assert len(t) == 2
assert sorted(t) == [
Interval(1, 2, ['[1,2)']),
Interval(4, 15, [
'[4,7)',
'[5,9)',
'[6,10)',
'[8,10)',
'[8,15)',
'[10,12)',
'[12,14)',
'[14,15)',
])
]
# -----------------------------------------------------------------------------
# MERGE_EQUALS
# -----------------------------------------------------------------------------
def test_merge_equals_empty():
t = IntervalTree()
t.merge_equals()
t.verify()
assert len(t) == 0
def test_merge_equals_wo_dupes():
t = IntervalTree.from_tuples(data.ivs1.data)
orig = IntervalTree.from_tuples(data.ivs1.data)
assert orig == t
t.merge_equals()
t.verify()
assert orig == t
def test_merge_equals_with_dupes():
t = IntervalTree.from_tuples(data.ivs1.data)
orig = IntervalTree.from_tuples(data.ivs1.data)
assert orig == t
# one dupe
assert t.containsi(4, 7, '[4,7)')
t.addi(4, 7, 'foo')
assert len(t) == len(orig) + 1
assert orig != t
t.merge_equals()
t.verify()
assert t != orig
assert t.containsi(4, 7)
assert not t.containsi(4, 7, 'foo')
assert not t.containsi(4, 7, '[4,7)')
# two dupes
t = IntervalTree.from_tuples(data.ivs1.data)
t.addi(4, 7, 'foo')
assert t.containsi(10, 12, '[10,12)')
t.addi(10, 12, 'bar')
assert len(t) == len(orig) + 2
assert t != orig
t.merge_equals()
t.verify()
assert t != orig
assert t.containsi(4, 7)
assert not t.containsi(4, 7, 'foo')
assert not t.containsi(4, 7, '[4,7)')
assert t.containsi(10, 12)
assert not t.containsi(10, 12, 'bar')
assert not t.containsi(10, 12, '[10,12)')
def test_merge_equals_reducer_wo_initializer():
def reducer(old, new):
return "%s, %s" % (old, new)
# empty tree
e = IntervalTree()
e.merge_equals(data_reducer=reducer)
e.verify()
assert not e
# one Interval in tree, no change
o = IntervalTree.from_tuples([(1, 2, 'hello')])
o.merge_equals(data_reducer=reducer)
o.verify()
assert len(o) == 1
assert sorted(o) == [Interval(1, 2, 'hello')]
# many Intervals in tree, no change
t = IntervalTree.from_tuples(data.ivs1.data)
orig = IntervalTree.from_tuples(data.ivs1.data)
t.merge_equals(data_reducer=reducer)
t.verify()
assert len(t) == len(orig)
assert t == orig
# many Intervals in tree, with change
t = IntervalTree.from_tuples(data.ivs1.data)
orig = IntervalTree.from_tuples(data.ivs1.data)
t.addi(4, 7, 'foo')
t.merge_equals(data_reducer=reducer)
t.verify()
assert len(t) == len(orig)
assert t != orig
assert not t.containsi(4, 7, 'foo')
assert not t.containsi(4, 7, '[4,7)')
assert t.containsi(4, 7, '[4,7), foo')
def test_merge_equals_reducer_with_initializer():
def reducer(old, new):
return old + [new]
# empty tree
e = IntervalTree()
e.merge_equals(data_reducer=reducer, data_initializer=[])
e.verify()
assert not e
# one Interval in tree, no change
o = IntervalTree.from_tuples([(1, 2, 'hello')])
o.merge_equals(data_reducer=reducer, data_initializer=[])
o.verify()
assert len(o) == 1
assert sorted(o) == [Interval(1, 2, ['hello'])]
# many Intervals in tree, no change
t = IntervalTree.from_tuples(data.ivs1.data)
orig = IntervalTree.from_tuples(data.ivs1.data)
t.merge_equals(data_reducer=reducer, data_initializer=[])
t.verify()
assert len(t) == len(orig)
assert t != orig
assert sorted(t) == [Interval(b, e, [d]) for b, e, d in sorted(orig)]
# many Intervals in tree, with change
t = IntervalTree.from_tuples(data.ivs1.data)
orig = IntervalTree.from_tuples(data.ivs1.data)
t.addi(4, 7, 'foo')
t.merge_equals(data_reducer=reducer, data_initializer=[])
t.verify()
assert len(t) == len(orig)
assert t != orig
assert not t.containsi(4, 7, 'foo')
assert not t.containsi(4, 7, '[4,7)')
assert t.containsi(4, 7, ['[4,7)', 'foo'])
# ------------------------------------------------------------------------------
# MERGE_NEIGHBORS
# ------------------------------------------------------------------------------
def test_merge_neighbors_empty():
t = IntervalTree()
t.merge_neighbors()
t.verify()
assert len(t) == 0
def test_merge_neighbors_gapless():
t = IntervalTree.from_tuples(data.ivs2.data)
t.merge_neighbors()
t.verify()
assert len(t) == 1
for begin, end, _ in t.items():
assert begin == data.ivs2.data[0][0]
assert end == data.ivs2.data[-1][1]
def test_merge_neighbors_with_gap_strict():
def reducer(old, new):
return "%s, %s" % (old, new)
# default distance=1
t = IntervalTree.from_tuples(data.ivs1.data)
t.merge_neighbors(data_reducer=reducer, distance=1, strict=True)
t.verify()
assert len(t) == 7
assert sorted(t) == [
Interval(1, 2, '[1,2)'),
Interval(4, 7, '[4,7)'),
Interval(5, 9, '[5,9)'),
Interval(6, 10, '[6,10)'),
Interval(8, 10, '[8,10)'),
Interval(8, 15, '[8,15)'),
Interval(10, 15, '[10,12), [12,14), [14,15)'),
]
# distance=2
t = IntervalTree.from_tuples(data.ivs1.data)
t.merge_neighbors(data_reducer=reducer, distance=2, strict=True)
t.verify()
assert len(t) == 6
assert sorted(t) == [
Interval(1, 7, '[1,2), [4,7)'),
Interval(5, 9, '[5,9)'),
Interval(6, 10, '[6,10)'),
Interval(8, 10, '[8,10)'),
Interval(8, 15, '[8,15)'),
Interval(10, 15, '[10,12), [12,14), [14,15)'),
]
def test_merge_neighbors_with_gap_nonstrict():
def reducer(old, new):
return "%s, %s" % (old, new)
# default distance=1
t = IntervalTree.from_tuples(data.ivs1.data)
t.merge_neighbors(data_reducer=reducer, distance=1, strict=False)
t.verify()
assert len(t) == 2
assert sorted(t) == [
Interval(1, 2, '[1,2)'),
Interval(4, 15, '[4,7), [5,9), [6,10), [8,10), [8,15), [10,12), [12,14), [14,15)'),
]
# distance=2
t = IntervalTree.from_tuples(data.ivs1.data)
t.merge_neighbors(data_reducer=reducer, distance=2, strict=False)
t.verify()
assert len(t) == 1
assert sorted(t) == [
Interval(1, 15, '[1,2), [4,7), [5,9), [6,10), [8,10), [8,15), [10,12), [12,14), [14,15)')
]
def test_merge_neighbors_reducer_wo_initializer():
def reducer(old, new):
return "%s, %s" % (old, new)
# empty tree
e = IntervalTree()
e.merge_neighbors(data_reducer=reducer)
e.verify()
assert not e
# one Interval in tree
o = IntervalTree.from_tuples([(1, 2, 'hello')])
o.merge_neighbors(data_reducer=reducer)
o.verify()
assert len(o) == 1
assert sorted(o) == [Interval(1, 2, 'hello')]
# many Intervals in tree, without gap
_data_no_gap = (
(1, 2, '[1,2)'),
(2, 3, '[2,3)'),
(3, 4, '[3,4)'),
)
t = IntervalTree.from_tuples(_data_no_gap)
t.merge_neighbors(data_reducer=reducer)
t.verify()
assert len(t) == 1
for begin, end, _data in t.items():
assert begin == 1
assert end == 4
assert _data == '[1,2), [2,3), [3,4)'
# many Intervals in tree, with gap and distance=2
_data_gap = (
(1, 2, '[1,2)'),
(4, 6, '[4,6)'),
(5, 8, '[5,8)'),
(13, 15, '[13,15)'),
)
t = IntervalTree.from_tuples(_data_gap)
t.merge_neighbors(data_reducer=reducer, distance=2)
t.verify()
assert len(t) == 3
assert sorted(t) == [
Interval(1, 6, '[1,2), [4,6)'),
Interval(5, 8, '[5,8)'),
Interval(13, 15, '[13,15)'),
]
def test_merge_neighbors_reducer_with_initializer():
def reducer(old, new):
return old + [new]
# empty tree
e = IntervalTree()
e.merge_neighbors(data_reducer=reducer, data_initializer=[])
e.verify()
assert not e
# one Interval in tree
o = IntervalTree.from_tuples([(1, 2, 'hello')])
o.merge_neighbors(data_reducer=reducer, data_initializer=[])
o.verify()
assert len(o) == 1
assert sorted(o) == [Interval(1, 2, ['hello'])]
# many Intervals in tree, without gap
_data_no_gap = (
(1, 2, '[1,2)'),
(2, 3, '[2,3)'),
(3, 4, '[3,4)'),
)
t = IntervalTree.from_tuples(_data_no_gap)
t.merge_neighbors(data_reducer=reducer, data_initializer=[])
t.verify()
assert len(t) == 1
for begin, end, _data in t.items():
assert begin == 1
assert end == 4
assert _data == ['[1,2)', '[2,3)', '[3,4)']
# many Intervals in tree, with gap and distance=2
_data_gap = (
(1, 2, '[1,2)'),
(4, 6, '[4,6)'),
(5, 8, '[5,8)'),
(13, 15, '[13,15)'),
)
t = IntervalTree.from_tuples(_data_gap)
t.merge_neighbors(data_reducer=reducer, data_initializer=[], distance=2)
t.verify()
assert len(t) == 3
assert sorted(t) == [
Interval(1, 6, ['[1,2)', '[4,6)']),
Interval(5, 8, ['[5,8)']),
Interval(13, 15, ['[13,15)']),
]
# -----------------------------------------------------------------------------
# CHOP
# -----------------------------------------------------------------------------
def test_chop():
t = IntervalTree([Interval(0, 10)])
t.chop(3, 7)
assert len(t) == 2
assert sorted(t)[0] == Interval(0, 3)
assert sorted(t)[1] == Interval(7, 10)
t = IntervalTree([Interval(0, 10)])
t.chop(0, 7)
assert len(t) == 1
assert sorted(t)[0] == Interval(7, 10)
t = IntervalTree([Interval(0, 10)])
t.chop(5, 10)
assert len(t) == 1
assert sorted(t)[0] == Interval(0, 5)
t = IntervalTree([Interval(0, 10)])
t.chop(-5, 15)
assert len(t) == 0
t = IntervalTree([Interval(0, 10)])
t.chop(0, 10)
assert len(t) == 0
def test_chop_datafunc():
def datafunc(iv, islower):
oldlimit = iv[islower]
return "oldlimit: {0}, islower: {1}".format(oldlimit, islower)
t = IntervalTree([Interval(0, 10)])
t.chop(3, 7, datafunc)
assert len(t) == 2
assert sorted(t)[0] == Interval(0, 3, 'oldlimit: 10, islower: True')
assert sorted(t)[1] == Interval(7, 10, 'oldlimit: 0, islower: False')
t = IntervalTree([Interval(0, 10)])
t.chop(0, 7, datafunc)
assert len(t) == 1
assert sorted(t)[0] == Interval(7, 10, 'oldlimit: 0, islower: False')
t = IntervalTree([Interval(0, 10)])
t.chop(5, 10, datafunc)
assert len(t) == 1
assert sorted(t)[0] == Interval(0, 5, 'oldlimit: 10, islower: True')
t = IntervalTree([Interval(0, 10)])
t.chop(-5, 15, datafunc)
assert len(t) == 0
t = IntervalTree([Interval(0, 10)])
t.chop(0, 10, datafunc)
assert len(t) == 0
# -----------------------------------------------------------------------------
# SLICE
# -----------------------------------------------------------------------------
def test_slice():
t = IntervalTree([Interval(5, 15)])
t.slice(10)
assert sorted(t)[0] == Interval(5, 10)
assert sorted(t)[1] == Interval(10, 15)
t = IntervalTree([Interval(5, 15)])
t.slice(5)
assert sorted(t)[0] == Interval(5, 15)
t.slice(15)
assert sorted(t)[0] == Interval(5, 15)
t.slice(0)
assert sorted(t)[0] == Interval(5, 15)
t.slice(20)
assert sorted(t)[0] == Interval(5, 15)
def test_slice_datafunc():
def datafunc(iv, islower):
oldlimit = iv[islower]
return "oldlimit: {0}, islower: {1}".format(oldlimit, islower)
t = IntervalTree([Interval(5, 15)])
t.slice(10, datafunc)
assert sorted(t)[0] == Interval(5, 10, 'oldlimit: 15, islower: True')
assert sorted(t)[1] == Interval(10, 15, 'oldlimit: 5, islower: False')
t = IntervalTree([Interval(5, 15)])
t.slice(5, datafunc)
assert sorted(t)[0] == Interval(5, 15)
t.slice(15, datafunc)
assert sorted(t)[0] == Interval(5, 15)
t.slice(0, datafunc)
assert sorted(t)[0] == Interval(5, 15)
t.slice(20, datafunc)
assert sorted(t)[0] == Interval(5, 15)
# -----------------------------------------------------------------------------
# SPLIT
# -----------------------------------------------------------------------------
def test_split_overlap_empty():
t = IntervalTree()
t.split_overlaps()
t.verify()
assert not t
def test_split_overlap_single_member():
t = IntervalTree([Interval(0, 1)])
t.split_overlaps()
t.verify()
assert len(t) == 1
def test_split_overlap():
t = IntervalTree.from_tuples(data.ivs1.data)
t.split_overlaps()
t.verify()
while t:
iv = set(t).pop()
t.remove(iv)
for other in t.overlap(iv):
assert other.begin == iv.begin
assert other.end == iv.end
# -----------------------------------------------------------------------------
# PICKLE
# -----------------------------------------------------------------------------
def test_pickle():
t = IntervalTree.from_tuples(data.ivs1.data)
p = pickle.dumps(t)
t2 = pickle.loads(p)
assert t == t2
t2.verify()
if __name__ == "__main__":
pytest.main([__file__, '-v'])
|
apache-2.0
|
science-is-the-new-black/bio_stuff
|
translate/translate.py
|
1
|
1635
|
"""Script for translating given sequence."""
from translate_dicts import translation_code
import regex as re
def translate_handler(sequence, start=False, Frame='all', code='translation_code', reverse=True)
if reverse == True:
return translate(sequence, start=start, Frame=Frame, code=code),
translate(sequence[::-1], start=start, Frame=Frame, code=code)
else:
return translate(sequence, start=start, Frame=Frame, code=code)
def translate(sequence, start=False, Frame='all', code='translation_code'):
if code == 'translation_code':
code_dict = translation_code
if Frame == 'all':
frames = [0, 1, 2]
else:
frames = [int(Frame)]
if start is True:
starts = re.finditer(r'ATG.*', sequence, overlapped=True)
results = [match.group(0) for match in starts]
# print(results)
prot_sequences = []
for j in results:
prot_sequences.append('')
for i in range(0, len(j), 3):
prot_sequences[len(prot_sequences)-1] =\
prot_sequences[len(prot_sequences)-1] +\
code_dict[j[i:i+3]]
if len(j[i+3+1:]) < 3:
break
else:
prot_sequences = ['', '', '']
for j in frames:
for i in range(j, len(sequence), 3):
prot_sequences[j] = prot_sequences[j] +\
code_dict[sequence[i:i+3]]
if len(sequence[i+3+1:]) < 3:
break
return prot_sequences
# translate('ACTGGTCAGCATGGGCTATGA', Frame = 1, start=True)
|
agpl-3.0
|
asfaltboy/GitSavvy
|
core/commands/init.py
|
1
|
5716
|
import os
import sublime
from sublime_plugin import WindowCommand
from ..git_command import GitCommand
from ...common import util
NO_REPO_MESSAGE = ("It looks like you haven't initialized Git in this directory. "
"Would you like to?")
REPO_PATH_PROMPT = "Enter root path of new git repo:"
CONFIRM_REINITIALIZE = ("It looks like Git is already initialized here. "
"Would you like to re-initialize?")
NAME_MESSAGE = "Enter your first and last name:"
EMAIL_MESSAGE = "Enter your email address:"
NO_CONFIG_MESSAGE = ("It looks like you haven't configured Git yet. Would you "
"like to enter your name and email for Git to use?")
RECLONE_CANT_BE_DONE = ("It looks like Git is already initialized here. "
"You can not re-clone")
GIT_URL = "Enter git url:"
views_with_offer_made = set()
class GsOfferInit(WindowCommand):
"""
If a git command fails indicating no git repo was found, this
command will ask the user whether they'd like to init a new repo.
Offer only once per session for a given view.
"""
def run(self):
active_view_id = self.window.active_view().id()
if active_view_id not in views_with_offer_made and sublime.ok_cancel_dialog(NO_REPO_MESSAGE):
self.window.run_command("gs_init")
else:
views_with_offer_made.add(active_view_id)
class GsInit(WindowCommand, GitCommand):
"""
If the active Sublime window has folders added to the project (or if Sublime was
opened from the terminal with something like `subl .`), initialize a new Git repo
at that location. If that directory cannot be determined, use the open file's
directory. If there is no open file, prompt the user for the directory to use.
If the selected directory has previosly been initialized with Git, prompt the user
to confirm a re-initialize before proceeding.
"""
def run(self):
sublime.set_timeout_async(self.run_async, 0)
def run_async(self):
open_folders = self.window.folders()
if open_folders:
suggested_git_root = open_folders[0]
else:
file_path = self.window.active_view().file_name()
if file_path:
suggested_git_root = os.path.dirname(file_path)
else:
suggested_git_root = ""
if suggested_git_root and os.path.exists(os.path.join(suggested_git_root, ".git")):
if sublime.ok_cancel_dialog(CONFIRM_REINITIALIZE):
self.on_done(suggested_git_root, re_init=True)
return
self.window.show_input_panel(REPO_PATH_PROMPT, suggested_git_root, self.on_done, None, None)
def on_done(self, path, re_init=False):
self.git("init", working_dir=path)
sublime.status_message("{word_start}nitialized repo successfully.".format(
word_start="Re-i" if re_init else "I"))
util.view.refresh_gitsavvy(self.window.active_view())
class GsClone(WindowCommand, GitCommand):
"""
If the active Sublime window has folders added to the project (or if Sublime was
opened from the terminal with something like `subl .`), initialize a new Git repo
at that location. If that directory cannot be determined, use the open file's
directory. If there is no open file, prompt the user for the directory to use.
If the selected directory has previosly been initialized with Git, prompt the user
to confirm a re-initialize before proceeding.
"""
def run(self):
sublime.set_timeout_async(self.run_async, 0)
def run_async(self):
self.window.show_input_panel(GIT_URL, '', self.on_enter_url, None, None)
def find_suggested_git_root(self):
open_folders = self.window.folders()
project = self.project_name_from_url(self.git_url)
if open_folders:
return "{}/{}".format(open_folders[0], project)
else:
file_path = self.window.active_view().file_name()
if file_path:
return "{}/{}".format(os.path.dirname(file_path), project)
else:
return ""
def on_enter_url(self, url):
self.git_url = url
self.suggested_git_root = self.find_suggested_git_root()
self.window.show_input_panel(REPO_PATH_PROMPT, self.suggested_git_root, self.on_enter_directory, None, None)
def on_enter_directory(self, path):
self.suggested_git_root = path
if self.suggested_git_root and os.path.exists(os.path.join(self.suggested_git_root, ".git")):
sublime.ok_cancel_dialog(RECLONE_CANT_BE_DONE)
return
self.do_clone()
def do_clone(self):
self.git("clone", self.git_url, self.suggested_git_root)
sublime.status_message("Cloned repo successfully.")
util.view.refresh_gitsavvy(self.window.active_view())
class GsSetupUserCommand(WindowCommand, GitCommand):
"""
Set user's name and email address in global Git config.
"""
def run(self):
sublime.set_timeout_async(self.run_async, 0)
def run_async(self):
if sublime.ok_cancel_dialog(NO_CONFIG_MESSAGE, "OK"):
self.get_name()
def get_name(self):
self.window.show_input_panel(NAME_MESSAGE, "", self.on_done_name, None, None)
def on_done_name(self, name):
self.git("config", "--global", "user.name", "\"{}\"".format(name))
self.get_email()
def get_email(self):
self.window.show_input_panel(EMAIL_MESSAGE, "", self.on_done_email, None, None)
def on_done_email(self, email):
self.git("config", "--global", "user.email", "\"{}\"".format(email))
|
mit
|
wifidog/wifidog-gateway
|
contrib/load-tester/common.py
|
13
|
1508
|
# -*- coding: utf-8 -*-
import socket
import fcntl
import struct
import argparse
# http://stackoverflow.com/questions/159137/getting-mac-address
def get_mac_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
return ':'.join(['%02x' % ord(char) for char in info[18:24]])
# http://code.activestate.com/recipes/439094-get-the-ip-address-associated-with-a-network-inter/
def get_ip_address(ifname):
print "ifname: %s" % ifname
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
def get_argparser():
parser = argparse.ArgumentParser(description='Hammer a wifidog'
+ ' instance with requests')
parser.add_argument(
'--target-interface',
required=True,
help='Interface where Wifidog is listening')
parser.add_argument(
'--source-interface-prefix',
required=True,
help='Prefix of the virtual interfaces from which Wifidog' +
' is exercised.')
parser.add_argument(
'--source-interface-count',
required=True,
help='Number of virtual interfaces, where interface is prefix+index')
parser.add_argument(
'--process-count',
required=True,
help='How many processes to run')
return parser
|
gpl-2.0
|
anielsen001/scipy
|
scipy/weave/accelerate_tools.py
|
97
|
12946
|
#**************************************************************************#
#* FILE ************** accelerate_tools.py ************************#
#**************************************************************************#
#* Author: Patrick Miller February 9 2002 *#
#**************************************************************************#
"""
accelerate_tools contains the interface for on-the-fly building of
C++ equivalents to Python functions.
"""
#**************************************************************************#
from __future__ import absolute_import, print_function
from types import InstanceType, XRangeType
import inspect
from hashlib import sha256
import scipy.weave as weave
from numpy.testing import assert_
from .bytecodecompiler import CXXCoder,Type_Descriptor,Function_Descriptor
def CStr(s):
"Hacky way to get legal C string from Python string"
if s is None:
return '""'
assert_(isinstance(s, str), msg="only None and string allowed")
r = repr('"'+s) # Better for embedded quotes
return '"'+r[2:-1]+'"'
##################################################################
# CLASS INSTANCE #
##################################################################
class Instance(Type_Descriptor):
cxxtype = 'PyObject*'
def __init__(self,prototype):
self.prototype = prototype
def check(self,s):
return "PyInstance_Check(%s)" % s
def inbound(self,s):
return s
def outbound(self,s):
return s,0
def get_attribute(self,name):
proto = getattr(self.prototype,name)
T = lookup_type(proto)
code = 'tempPY = PyObject_GetAttrString(%%(rhs)s,"%s");\n' % name
convert = T.inbound('tempPY')
code += '%%(lhsType)s %%(lhs)s = %s;\n' % convert
return T,code
def set_attribute(self,name):
proto = getattr(self.prototype,name)
T = lookup_type(proto)
convert,owned = T.outbound('%(rhs)s')
code = 'tempPY = %s;' % convert
if not owned:
code += ' Py_INCREF(tempPY);'
code += ' PyObject_SetAttrString(%%(lhs)s,"%s",tempPY);' % name
code += ' Py_DECREF(tempPY);\n'
return T,code
##################################################################
# CLASS BASIC #
##################################################################
class Basic(Type_Descriptor):
owned = 1
def check(self,s):
return "%s(%s)" % (self.checker,s)
def inbound(self,s):
return "%s(%s)" % (self.inbounder,s)
def outbound(self,s):
return "%s(%s)" % (self.outbounder,s),self.owned
class Basic_Number(Basic):
def literalizer(self,s):
return str(s)
def binop(self,symbol,a,b):
assert_(symbol in ['+','-','*','/'], msg=symbol)
return '%s %s %s' % (a,symbol,b),self
class Integer(Basic_Number):
cxxtype = "long"
checker = "PyInt_Check"
inbounder = "PyInt_AsLong"
outbounder = "PyInt_FromLong"
class Double(Basic_Number):
cxxtype = "double"
checker = "PyFloat_Check"
inbounder = "PyFloat_AsDouble"
outbounder = "PyFloat_FromDouble"
class String(Basic):
cxxtype = "char*"
checker = "PyString_Check"
inbounder = "PyString_AsString"
outbounder = "PyString_FromString"
def literalizer(self,s):
return CStr(s)
# -----------------------------------------------
# Singletonize the type names
# -----------------------------------------------
Integer = Integer()
Double = Double()
String = String()
import numpy as np
class Vector(Type_Descriptor):
cxxtype = 'PyArrayObject*'
refcount = 1
dims = 1
module_init_code = 'import_array();\n'
inbounder = "(PyArrayObject*)"
outbounder = "(PyObject*)"
owned = 0 # Conversion is by casting!
prerequisites = Type_Descriptor.prerequisites + \
['#include "numpy/arrayobject.h"']
dims = 1
def check(self,s):
return "PyArray_Check(%s) && ((PyArrayObject*)%s)->nd == %d && ((PyArrayObject*)%s)->descr->type_num == %s" % (
s,s,self.dims,s,self.typecode)
def inbound(self,s):
return "%s(%s)" % (self.inbounder,s)
def outbound(self,s):
return "%s(%s)" % (self.outbounder,s),self.owned
def getitem(self,A,v,t):
assert_(self.dims == len(v), msg='Expect dimension %d' % self.dims)
code = '*((%s*)(%s->data' % (self.cxxbase,A)
for i in range(self.dims):
# assert that ''t[i]'' is an integer
code += '+%s*%s->strides[%d]' % (v[i],A,i)
code += '))'
return code,self.pybase
def setitem(self,A,v,t):
return self.getitem(A,v,t)
class matrix(Vector):
dims = 2
class IntegerVector(Vector):
typecode = 'PyArray_INT'
cxxbase = 'int'
pybase = Integer
class Integermatrix(matrix):
typecode = 'PyArray_INT'
cxxbase = 'int'
pybase = Integer
class LongVector(Vector):
typecode = 'PyArray_LONG'
cxxbase = 'long'
pybase = Integer
class Longmatrix(matrix):
typecode = 'PyArray_LONG'
cxxbase = 'long'
pybase = Integer
class DoubleVector(Vector):
typecode = 'PyArray_DOUBLE'
cxxbase = 'double'
pybase = Double
class Doublematrix(matrix):
typecode = 'PyArray_DOUBLE'
cxxbase = 'double'
pybase = Double
##################################################################
# CLASS XRANGE #
##################################################################
class XRange(Type_Descriptor):
cxxtype = 'XRange'
prerequisites = ['''
class XRange {
public:
XRange(long aLow, long aHigh, long aStep=1)
: low(aLow),high(aHigh),step(aStep)
{
}
XRange(long aHigh)
: low(0),high(aHigh),step(1)
{
}
long low;
long high;
long step;
};''']
# -----------------------------------------------
# Singletonize the type names
# -----------------------------------------------
IntegerVector = IntegerVector()
Integermatrix = Integermatrix()
LongVector = LongVector()
Longmatrix = Longmatrix()
DoubleVector = DoubleVector()
Doublematrix = Doublematrix()
XRange = XRange()
typedefs = {
int: Integer,
float: Double,
str: String,
(np.ndarray,1,int): IntegerVector,
(np.ndarray,2,int): Integermatrix,
(np.ndarray,1,np.long): LongVector,
(np.ndarray,2,np.long): Longmatrix,
(np.ndarray,1,float): DoubleVector,
(np.ndarray,2,float): Doublematrix,
XRangeType: XRange,
}
import math
functiondefs = {
(len,(String,)):
Function_Descriptor(code='strlen(%s)',return_type=Integer),
(len,(LongVector,)):
Function_Descriptor(code='PyArray_Size((PyObject*)%s)',return_type=Integer),
(float,(Integer,)):
Function_Descriptor(code='(double)(%s)',return_type=Double),
(range,(Integer,Integer)):
Function_Descriptor(code='XRange(%s)',return_type=XRange),
(range,(Integer)):
Function_Descriptor(code='XRange(%s)',return_type=XRange),
(math.sin,(Double,)):
Function_Descriptor(code='sin(%s)',return_type=Double),
(math.cos,(Double,)):
Function_Descriptor(code='cos(%s)',return_type=Double),
(math.sqrt,(Double,)):
Function_Descriptor(code='sqrt(%s)',return_type=Double),
}
##################################################################
# FUNCTION LOOKUP_TYPE #
##################################################################
def lookup_type(x):
T = type(x)
try:
return typedefs[T]
except:
if isinstance(T,np.ndarray):
return typedefs[(T,len(x.shape),x.dtype.char)]
elif issubclass(T, InstanceType):
return Instance(x)
else:
raise NotImplementedError(T)
##################################################################
# class ACCELERATE #
##################################################################
class accelerate(object):
def __init__(self, function, *args, **kw):
assert_(inspect.isfunction(function))
self.function = function
self.module = inspect.getmodule(function)
if self.module is None:
import __main__
self.module = __main__
self.__call_map = {}
def __cache(self,*args):
raise TypeError
def __call__(self,*args):
try:
return self.__cache(*args)
except TypeError:
# Figure out type info -- Do as tuple so its hashable
signature = tuple(map(lookup_type,args))
# If we know the function, call it
try:
fast = self.__call_map[signature]
except:
fast = self.singleton(signature)
self.__cache = fast
self.__call_map[signature] = fast
return fast(*args)
def signature(self,*args):
# Figure out type info -- Do as tuple so its hashable
signature = tuple(map(lookup_type,args))
return self.singleton(signature)
def singleton(self,signature):
identifier = self.identifier(signature)
# Generate a new function, then call it
f = self.function
# See if we have an accelerated version of module
try:
print('lookup',self.module.__name__+'_weave')
accelerated_module = __import__(self.module.__name__+'_weave')
print('have accelerated',self.module.__name__+'_weave')
fast = getattr(accelerated_module,identifier)
return fast
except ImportError:
accelerated_module = None
except AttributeError:
pass
P = self.accelerate(signature,identifier)
E = weave.ext_tools.ext_module(self.module.__name__+'_weave')
E.add_function(P)
E.generate_file()
weave.build_tools.build_extension(self.module.__name__+'_weave.cpp',verbose=2)
if accelerated_module:
raise NotImplementedError('Reload')
else:
accelerated_module = __import__(self.module.__name__+'_weave')
fast = getattr(accelerated_module,identifier)
return fast
def identifier(self,signature):
# Build a (truncated, see gh-3216) SHA-256 checksum
f = self.function
co = f.func_code
identifier = str(signature) + \
str(co.co_argcount) + \
str(co.co_consts) + \
str(co.co_varnames) + \
co.co_code
return 'F' + sha256(identifier).hexdigest()[:32]
def accelerate(self,signature,identifier):
P = Python2CXX(self.function,signature,name=identifier)
return P
def code(self,*args):
if len(args) != self.function.func_code.co_argcount:
raise TypeError('%s() takes exactly %d arguments (%d given)' %
(self.function.__name__,
self.function.func_code.co_argcount,
len(args)))
signature = tuple(map(lookup_type,args))
ident = self.function.__name__
return self.accelerate(signature,ident).function_code()
##################################################################
# CLASS PYTHON2CXX #
##################################################################
class Python2CXX(CXXCoder):
def typedef_by_value(self,v):
T = lookup_type(v)
if T not in self.used:
self.used.append(T)
return T
def function_by_signature(self,signature):
descriptor = functiondefs[signature]
if descriptor.return_type not in self.used:
self.used.append(descriptor.return_type)
return descriptor
def __init__(self,f,signature,name=None):
# Make sure function is a function
assert_(inspect.isfunction(f))
# and check the input type signature
assert_(reduce(lambda x,y: x and y,
map(lambda x: isinstance(x,Type_Descriptor),
signature),
1), msg='%s not all type objects' % signature)
self.arg_specs = []
self.customize = weave.base_info.custom_info()
CXXCoder.__init__(self,f,signature,name)
return
def function_code(self):
code = self.wrapped_code()
for T in self.used:
if T is not None and T.module_init_code:
self.customize.add_module_init_code(T.module_init_code)
return code
def python_function_definition_code(self):
return '{ "%s", wrapper_%s, METH_VARARGS, %s },\n' % (
self.name,
self.name,
CStr(self.function.__doc__))
|
bsd-3-clause
|
V155/qutebrowser
|
qutebrowser/browser/webkit/rfc6266.py
|
5
|
9768
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""pyPEG parsing for the RFC 6266 (Content-Disposition) header."""
import urllib.parse
import string
import re
import attr
import pypeg2 as peg
from qutebrowser.utils import utils
class UniqueNamespace(peg.Namespace):
"""A pyPEG2 namespace which prevents setting a value twice."""
def __setitem__(self, key, value):
if key in self:
raise DuplicateParamError(key)
super().__setitem__(key, value)
# RFC 2616
ctl_chars = ''.join(chr(i) for i in range(32)) + chr(127)
# RFC 5987
attr_chars_nonalnum = '!#$&+-.^_`|~'
attr_chars = string.ascii_letters + string.digits + attr_chars_nonalnum
# RFC 5987 gives this alternative construction of the token character class
token_chars = attr_chars + "*'%" # flake8: disable=S001
# Definitions from https://tools.ietf.org/html/rfc2616#section-2.2
# token was redefined from attr_chars to avoid using AnyBut,
# which might include non-ascii octets.
token_re = '[{}]+'.format(re.escape(token_chars))
class Token(str):
"""A token (RFC 2616, Section 2.2)."""
grammar = re.compile(token_re)
# RFC 2616 says some linear whitespace (LWS) is in fact allowed in text
# and qdtext; however it also mentions folding that whitespace into
# a single SP (which isn't in CTL) before interpretation.
# Assume the caller already that folding when parsing headers.
# NOTE: qdtext also allows non-ascii, which we choose to parse
# as ISO-8859-1; rejecting it entirely would also be permitted.
# Some broken browsers attempt encoding-sniffing, which is broken
# because the spec only allows iso, and because encoding-sniffing
# can mangle valid values.
# Everything else in this grammar (including RFC 5987 ext values)
# is in an ascii-safe encoding.
qdtext_re = r'[^"{}]'.format(re.escape(ctl_chars))
quoted_pair_re = r'\\[{}]'.format(re.escape(
''.join(chr(i) for i in range(128))))
class QuotedString(str):
"""A quoted string (RFC 2616, Section 2.2)."""
grammar = re.compile(r'"({}|{})+"'.format(quoted_pair_re, qdtext_re))
def __str__(self):
s = super().__str__()
s = s[1:-1] # remove quotes
s = re.sub(r'\\(.)', r'\1', s) # drop backslashes
return s
class Value(str):
"""A value. (RFC 2616, Section 3.6)."""
grammar = [re.compile(token_re), QuotedString]
class Charset(str):
"""A charset (RFC5987, Section 3.2.1)."""
# Other charsets are forbidden, the spec reserves them
# for future evolutions.
grammar = re.compile('UTF-8|ISO-8859-1', re.I)
class Language(str):
"""A language-tag (RFC 5646, Section 2.1).
FIXME: This grammar is not 100% correct yet.
https://github.com/qutebrowser/qutebrowser/issues/105
"""
grammar = re.compile('[A-Za-z0-9-]+')
attr_char_re = '[{}]'.format(re.escape(attr_chars))
hex_digit_re = '%[' + string.hexdigits + ']{2}'
class ValueChars(str):
"""A value of an attribute.
FIXME: Can we merge this with Value?
https://github.com/qutebrowser/qutebrowser/issues/105
"""
grammar = re.compile('({}|{})*'.format(attr_char_re, hex_digit_re))
class ExtValue(peg.List):
"""An ext-value of an attribute (RFC 5987, Section 3.2)."""
grammar = peg.contiguous(Charset, "'", peg.optional(Language), "'",
ValueChars)
class ExtToken(peg.Symbol):
"""A token introducing an extended value (RFC 6266, Section 4.1)."""
regex = re.compile(token_re + r'\*')
def __str__(self):
return super().__str__().lower()
class NoExtToken(peg.Symbol):
"""A token introducing a normal value (RFC 6266, Section 4.1)."""
regex = re.compile(token_re + r'(?<!\*)')
def __str__(self):
return super().__str__().lower()
class DispositionParm(str):
"""A parameter for the Disposition-Type header (RFC6266, Section 4.1)."""
grammar = peg.attr('name', NoExtToken), '=', Value
class ExtDispositionParm:
"""An extended parameter (RFC6266, Section 4.1)."""
grammar = peg.attr('name', ExtToken), '=', ExtValue
def __init__(self, value, name=None):
self.name = name
self.value = value
class DispositionType(peg.List):
"""The disposition type (RFC6266, Section 4.1)."""
grammar = [re.compile('(inline|attachment)', re.I), Token]
class DispositionParmList(UniqueNamespace):
"""A list of disposition parameters (RFC6266, Section 4.1)."""
grammar = peg.maybe_some(';', [ExtDispositionParm, DispositionParm])
class ContentDispositionValue:
"""A complete Content-Disposition value (RFC 6266, Section 4.1)."""
# Allows nonconformant final semicolon
# I've seen it in the wild, and browsers accept it
# http://greenbytes.de/tech/tc2231/#attwithasciifilenamenqs
grammar = (peg.attr('dtype', DispositionType),
peg.attr('params', DispositionParmList),
peg.optional(';'))
@attr.s
class LangTagged:
"""A string with an associated language."""
string = attr.ib()
langtag = attr.ib()
class Error(Exception):
"""Base class for RFC6266 errors."""
class DuplicateParamError(Error):
"""Exception raised when a parameter has been given twice."""
class InvalidISO8859Error(Error):
"""Exception raised when a byte is invalid in ISO-8859-1."""
class _ContentDisposition:
"""Records various indications and hints about content disposition.
These can be used to know if a file should be downloaded or
displayed directly, and to hint what filename it should have
in the download case.
"""
def __init__(self, disposition, assocs):
"""Used internally after parsing the header."""
assert len(disposition) == 1
self.disposition = disposition[0]
self.assocs = dict(assocs) # So we can change values
if 'filename*' in self.assocs:
param = self.assocs['filename*']
assert isinstance(param, ExtDispositionParm)
self.assocs['filename*'] = parse_ext_value(param.value).string
def filename(self):
"""The filename from the Content-Disposition header or None.
On safety:
This property records the intent of the sender.
You shouldn't use this sender-controlled value as a filesystem path, it
can be insecure. Serving files with this filename can be dangerous as
well, due to a certain browser using the part after the dot for
mime-sniffing. Saving it to a database is fine by itself though.
"""
if 'filename*' in self.assocs:
return self.assocs['filename*']
elif 'filename' in self.assocs:
# XXX Reject non-ascii (parsed via qdtext) here?
return self.assocs['filename']
return None
def is_inline(self):
"""Return if the file should be handled inline.
If not, and unless your application supports other dispositions
than the standard inline and attachment, it should be handled
as an attachment.
"""
return self.disposition.lower() == 'inline'
def __repr__(self):
return utils.get_repr(self, constructor=True,
disposition=self.disposition, assocs=self.assocs)
def normalize_ws(text):
"""Do LWS (linear whitespace) folding."""
return ' '.join(text.split())
def parse_headers(content_disposition):
"""Build a _ContentDisposition from header values."""
# We allow non-ascii here (it will only be parsed inside of qdtext, and
# rejected by the grammar if it appears in other places), although parsing
# it can be ambiguous. Parsing it ensures that a non-ambiguous filename*
# value won't get dismissed because of an unrelated ambiguity in the
# filename parameter. But it does mean we occasionally give
# less-than-certain values for some legacy senders.
content_disposition = content_disposition.decode('iso-8859-1')
# Our parsing is relaxed in these regards:
# - The grammar allows a final ';' in the header;
# - We do LWS-folding, and possibly normalise other broken
# whitespace, instead of rejecting non-lws-safe text.
# XXX Would prefer to accept only the quoted whitespace
# case, rather than normalising everything.
content_disposition = normalize_ws(content_disposition)
parsed = peg.parse(content_disposition, ContentDispositionValue)
return _ContentDisposition(disposition=parsed.dtype, assocs=parsed.params)
def parse_ext_value(val):
"""Parse the value of an extended attribute."""
if len(val) == 3:
charset, langtag, coded = val
else:
charset, coded = val
langtag = None
decoded = urllib.parse.unquote(coded, charset, errors='strict')
if charset == 'iso-8859-1':
# Fail if the filename contains an invalid ISO-8859-1 char
for c in decoded:
if 0x7F <= ord(c) <= 0x9F:
raise InvalidISO8859Error(c)
return LangTagged(decoded, langtag)
|
gpl-3.0
|
mikewiebe-ansible/ansible
|
test/units/modules/network/check_point/test_cp_mgmt_service_icmp.py
|
19
|
3878
|
# Ansible module to manage CheckPoint Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleExitJson
from ansible.module_utils import basic
from ansible.modules.network.check_point import cp_mgmt_service_icmp
OBJECT = {
"name": "Icmp1",
"icmp_type": 5,
"icmp_code": 7
}
CREATE_PAYLOAD = {
"name": "Icmp1",
"icmp_type": 5,
"icmp_code": 7
}
UPDATE_PAYLOAD = {
"name": "Icmp1",
"icmp_type": 45,
"icmp_code": 13
}
OBJECT_AFTER_UPDATE = UPDATE_PAYLOAD
DELETE_PAYLOAD = {
"name": "Icmp1",
"state": "absent"
}
function_path = 'ansible.modules.network.check_point.cp_mgmt_service_icmp.api_call'
api_call_object = 'service-icmp'
class TestCheckpointServiceIcmp(object):
module = cp_mgmt_service_icmp
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.module_utils.network.checkpoint.checkpoint.Connection')
return connection_class_mock.return_value
def test_create(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True, api_call_object: OBJECT}
result = self._run_module(CREATE_PAYLOAD)
assert result['changed']
assert OBJECT.items() == result[api_call_object].items()
def test_create_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False, api_call_object: OBJECT}
result = self._run_module(CREATE_PAYLOAD)
assert not result['changed']
def test_update(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True, api_call_object: OBJECT_AFTER_UPDATE}
result = self._run_module(UPDATE_PAYLOAD)
assert result['changed']
assert OBJECT_AFTER_UPDATE.items() == result[api_call_object].items()
def test_update_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False, api_call_object: OBJECT_AFTER_UPDATE}
result = self._run_module(UPDATE_PAYLOAD)
assert not result['changed']
def test_delete(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True}
result = self._run_module(DELETE_PAYLOAD)
assert result['changed']
def test_delete_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False}
result = self._run_module(DELETE_PAYLOAD)
assert not result['changed']
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
|
gpl-3.0
|
JakeTobin/kenthackenoughF2015
|
Pi/flask/test_flaskr.py
|
157
|
2059
|
# -*- coding: utf-8 -*-
"""
Flaskr Tests
~~~~~~~~~~~~
Tests the Flaskr application.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import os
import flaskr
import tempfile
@pytest.fixture
def client(request):
db_fd, flaskr.app.config['DATABASE'] = tempfile.mkstemp()
flaskr.app.config['TESTING'] = True
client = flaskr.app.test_client()
with flaskr.app.app_context():
flaskr.init_db()
def teardown():
os.close(db_fd)
os.unlink(flaskr.app.config['DATABASE'])
request.addfinalizer(teardown)
return client
def login(client, username, password):
return client.post('/login', data=dict(
username=username,
password=password
), follow_redirects=True)
def logout(client):
return client.get('/logout', follow_redirects=True)
def test_empty_db(client):
"""Start with a blank database."""
rv = client.get('/')
assert b'No entries here so far' in rv.data
def test_login_logout(client):
"""Make sure login and logout works"""
rv = login(client, flaskr.app.config['USERNAME'],
flaskr.app.config['PASSWORD'])
assert b'You were logged in' in rv.data
rv = logout(client)
assert b'You were logged out' in rv.data
rv = login(client, flaskr.app.config['USERNAME'] + 'x',
flaskr.app.config['PASSWORD'])
assert b'Invalid username' in rv.data
rv = login(client, flaskr.app.config['USERNAME'],
flaskr.app.config['PASSWORD'] + 'x')
assert b'Invalid password' in rv.data
def test_messages(client):
"""Test that messages work"""
login(client, flaskr.app.config['USERNAME'],
flaskr.app.config['PASSWORD'])
rv = client.post('/add', data=dict(
title='<Hello>',
text='<strong>HTML</strong> allowed here'
), follow_redirects=True)
assert b'No entries here so far' not in rv.data
assert b'<Hello>' in rv.data
assert b'<strong>HTML</strong> allowed here' in rv.data
|
mit
|
philanthropy-u/edx-platform
|
lms/djangoapps/mobile_api/users/views.py
|
2
|
15245
|
"""
Views for user API
"""
import json
from django.shortcuts import redirect
from django.utils import dateparse
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import UsageKey
from rest_framework import generics, views
from rest_framework.decorators import api_view
from rest_framework.response import Response
from xblock.fields import Scope
from xblock.runtime import KeyValueStore
from courseware.access import is_mobile_available_for_user
from courseware.courses import get_current_child
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from courseware.views.index import save_positions_recursively_up
from experiments.models import ExperimentData, ExperimentKeyValue
from lms.djangoapps.courseware.access_utils import ACCESS_GRANTED
from mobile_api.utils import API_V05
from openedx.features.course_duration_limits.access import check_course_expired
from student.models import CourseEnrollment, User
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from .. import errors
from ..decorators import mobile_course_access, mobile_view
from .serializers import CourseEnrollmentSerializer, CourseEnrollmentSerializerv05, UserSerializer
@mobile_view(is_user=True)
class UserDetail(generics.RetrieveAPIView):
"""
**Use Case**
Get information about the specified user and access other resources
the user has permissions for.
Users are redirected to this endpoint after they sign in.
You can use the **course_enrollments** value in the response to get a
list of courses the user is enrolled in.
**Example Request**
GET /api/mobile/{version}/users/{username}
**Response Values**
If the request is successful, the request returns an HTTP 200 "OK" response.
The HTTP 200 response has the following values.
* course_enrollments: The URI to list the courses the currently signed
in user is enrolled in.
* email: The email address of the currently signed in user.
* id: The ID of the user.
* name: The full name of the currently signed in user.
* username: The username of the currently signed in user.
"""
queryset = (
User.objects.all().select_related('profile')
)
serializer_class = UserSerializer
lookup_field = 'username'
def get_serializer_context(self):
context = super(UserDetail, self).get_serializer_context()
context['api_version'] = self.kwargs.get('api_version')
return context
@mobile_view(is_user=True)
class UserCourseStatus(views.APIView):
"""
**Use Cases**
Get or update the ID of the module that the specified user last
visited in the specified course.
**Example Requests**
GET /api/mobile/{version}/users/{username}/course_status_info/{course_id}
PATCH /api/mobile/{version}/users/{username}/course_status_info/{course_id}
**PATCH Parameters**
The body of the PATCH request can include the following parameters.
* last_visited_module_id={module_id}
* modification_date={date}
The modification_date parameter is optional. If it is present, the
update will only take effect if the modification_date in the
request is later than the modification_date saved on the server.
**Response Values**
If the request is successful, the request returns an HTTP 200 "OK" response.
The HTTP 200 response has the following values.
* last_visited_module_id: The ID of the last module that the user
visited in the course.
* last_visited_module_path: The ID of the modules in the path from the
last visited module to the course module.
"""
http_method_names = ["get", "patch"]
def _last_visited_module_path(self, request, course):
"""
Returns the path from the last module visited by the current user in the given course up to
the course module. If there is no such visit, the first item deep enough down the course
tree is used.
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
course_module = get_module_for_descriptor(
request.user, request, course, field_data_cache, course.id, course=course
)
path = [course_module]
chapter = get_current_child(course_module, min_depth=2)
if chapter is not None:
path.append(chapter)
section = get_current_child(chapter, min_depth=1)
if section is not None:
path.append(section)
path.reverse()
return path
def _get_course_info(self, request, course):
"""
Returns the course status
"""
path = self._last_visited_module_path(request, course)
path_ids = [unicode(module.location) for module in path]
return Response({
"last_visited_module_id": path_ids[0],
"last_visited_module_path": path_ids,
})
def _update_last_visited_module_id(self, request, course, module_key, modification_date):
"""
Saves the module id if the found modification_date is less recent than the passed modification date
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
try:
module_descriptor = modulestore().get_item(module_key)
except ItemNotFoundError:
return Response(errors.ERROR_INVALID_MODULE_ID, status=400)
module = get_module_for_descriptor(
request.user, request, module_descriptor, field_data_cache, course.id, course=course
)
if modification_date:
key = KeyValueStore.Key(
scope=Scope.user_state,
user_id=request.user.id,
block_scope_id=course.location,
field_name='position'
)
original_store_date = field_data_cache.last_modified(key)
if original_store_date is not None and modification_date < original_store_date:
# old modification date so skip update
return self._get_course_info(request, course)
save_positions_recursively_up(request.user, request, field_data_cache, module, course=course)
return self._get_course_info(request, course)
@mobile_course_access(depth=2)
def get(self, request, course, *args, **kwargs): # pylint: disable=unused-argument
"""
Get the ID of the module that the specified user last visited in the specified course.
"""
return self._get_course_info(request, course)
@mobile_course_access(depth=2)
def patch(self, request, course, *args, **kwargs): # pylint: disable=unused-argument
"""
Update the ID of the module that the specified user last visited in the specified course.
"""
module_id = request.data.get("last_visited_module_id")
modification_date_string = request.data.get("modification_date")
modification_date = None
if modification_date_string:
modification_date = dateparse.parse_datetime(modification_date_string)
if not modification_date or not modification_date.tzinfo:
return Response(errors.ERROR_INVALID_MODIFICATION_DATE, status=400)
if module_id:
try:
module_key = UsageKey.from_string(module_id)
except InvalidKeyError:
return Response(errors.ERROR_INVALID_MODULE_ID, status=400)
return self._update_last_visited_module_id(request, course, module_key, modification_date)
else:
# The arguments are optional, so if there's no argument just succeed
return self._get_course_info(request, course)
@mobile_view(is_user=True)
class UserCourseEnrollmentsList(generics.ListAPIView):
"""
**Use Case**
Get information about the courses that the currently signed in user is
enrolled in.
v1 differs from v0.5 version by returning ALL enrollments for
a user rather than only the enrollments the user has access to (that haven't expired).
An additional attribute "expiration" has been added to the response, which lists the date
when access to the course will expire or null if it doesn't expire.
**Example Request**
GET /api/mobile/v1/users/{username}/course_enrollments/
**Response Values**
If the request for information about the user is successful, the
request returns an HTTP 200 "OK" response.
The HTTP 200 response has the following values.
* expiration: The course expiration date for given user course pair
or null if the course does not expire.
* certificate: Information about the user's earned certificate in the
course.
* course: A collection of the following data about the course.
* courseware_access: A JSON representation with access information for the course,
including any access errors.
* course_about: The URL to the course about page.
* course_sharing_utm_parameters: Encoded UTM parameters to be included in course sharing url
* course_handouts: The URI to get data for course handouts.
* course_image: The path to the course image.
* course_updates: The URI to get data for course updates.
* discussion_url: The URI to access data for course discussions if
it is enabled, otherwise null.
* end: The end date of the course.
* id: The unique ID of the course.
* name: The name of the course.
* number: The course number.
* org: The organization that created the course.
* start: The date and time when the course starts.
* start_display:
If start_type is a string, then the advertised_start date for the course.
If start_type is a timestamp, then a formatted date for the start of the course.
If start_type is empty, then the value is None and it indicates that the course has not yet started.
* start_type: One of either "string", "timestamp", or "empty"
* subscription_id: A unique "clean" (alphanumeric with '_') ID of
the course.
* video_outline: The URI to get the list of all videos that the user
can access in the course.
* created: The date the course was created.
* is_active: Whether the course is currently active. Possible values
are true or false.
* mode: The type of certificate registration for this course (honor or
certified).
* url: URL to the downloadable version of the certificate, if exists.
"""
queryset = CourseEnrollment.objects.all()
lookup_field = 'username'
# In Django Rest Framework v3, there is a default pagination
# class that transmutes the response data into a dictionary
# with pagination information. The original response data (a list)
# is stored in a "results" value of the dictionary.
# For backwards compatibility with the existing API, we disable
# the default behavior by setting the pagination_class to None.
pagination_class = None
def is_org(self, check_org, course_org):
"""
Check course org matches request org param or no param provided
"""
return check_org is None or (check_org.lower() == course_org.lower())
def hide_course_for_enrollment_fee_experiment(self, user, enrollment, experiment_id=9):
"""
Hide enrolled courses from mobile app as part of REV-73/REV-19
"""
course_key = enrollment.course_overview.id
try:
courses_excluded_from_mobile = ExperimentKeyValue.objects.get(
experiment_id=10,
key="mobile_app_exclusion"
).value
courses_excluded_from_mobile = json.loads(courses_excluded_from_mobile.replace('\r', '').replace('\n', ''))
if enrollment.mode == 'audit' and str(course_key) in courses_excluded_from_mobile.keys():
activationTime = dateparse.parse_datetime(courses_excluded_from_mobile[str(course_key)])
if activationTime and enrollment.created and enrollment.created > activationTime:
return True
except (ExperimentKeyValue.DoesNotExist, AttributeError):
pass
try:
ExperimentData.objects.get(
user=user,
experiment_id=experiment_id,
key='enrolled_{0}'.format(course_key),
)
except ExperimentData.DoesNotExist:
return False
try:
ExperimentData.objects.get(
user=user,
experiment_id=experiment_id,
key='paid_{0}'.format(course_key),
)
except ExperimentData.DoesNotExist:
return True
return False
def get_serializer_context(self):
context = super(UserCourseEnrollmentsList, self).get_serializer_context()
context['api_version'] = self.kwargs.get('api_version')
return context
def get_serializer_class(self):
api_version = self.kwargs.get('api_version')
if api_version == API_V05:
return CourseEnrollmentSerializerv05
return CourseEnrollmentSerializer
def get_queryset(self):
api_version = self.kwargs.get('api_version')
enrollments = self.queryset.filter(
user__username=self.kwargs['username'],
is_active=True
).order_by('created').reverse()
org = self.request.query_params.get('org', None)
same_org = (
enrollment for enrollment in enrollments
if enrollment.course_overview and self.is_org(org, enrollment.course_overview.org)
)
mobile_available = (
enrollment for enrollment in same_org
if is_mobile_available_for_user(self.request.user, enrollment.course_overview)
)
not_hidden_for_experiments = (
enrollment for enrollment in mobile_available
if not self.hide_course_for_enrollment_fee_experiment(self.request.user, enrollment)
)
not_duration_limited = (
enrollment for enrollment in not_hidden_for_experiments
if check_course_expired(self.request.user, enrollment.course) == ACCESS_GRANTED
)
if api_version == API_V05:
# for v0.5 don't return expired courses
return list(not_duration_limited)
else:
# return all courses, with associated expiration
return list(mobile_available)
@api_view(["GET"])
@mobile_view()
def my_user_info(request, api_version):
"""
Redirect to the currently-logged-in user's info page
"""
return redirect("user-detail", api_version=api_version, username=request.user.username)
|
agpl-3.0
|
xzturn/tensorflow
|
tensorflow/python/layers/pooling_test.py
|
21
|
8576
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.pooling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import test_util
from tensorflow.python.layers import pooling as pooling_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
class PoolingTest(test.TestCase):
def testInvalidDataFormat(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'data_format'):
pooling_layers.max_pooling2d(images, 3, strides=2, data_format='invalid')
def testInvalidStrides(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'strides'):
pooling_layers.max_pooling2d(images, 3, strides=(1, 2, 3))
with self.assertRaisesRegexp(ValueError, 'strides'):
pooling_layers.max_pooling2d(images, 3, strides=None)
def testInvalidPoolSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'pool_size'):
pooling_layers.max_pooling2d(images, (1, 2, 3), strides=2)
with self.assertRaisesRegexp(ValueError, 'pool_size'):
pooling_layers.max_pooling2d(images, None, strides=2)
def testCreateMaxPooling2D(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = pooling_layers.MaxPooling2D([2, 2], strides=2)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, 3, 4, 4])
def testCreateAveragePooling2D(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = pooling_layers.AveragePooling2D([2, 2], strides=2)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, 3, 4, 4])
@test_util.run_deprecated_v1
def testCreateMaxPooling2DChannelsFirst(self):
height, width = 7, 9
images = random_ops.random_uniform((5, 2, height, width))
layer = pooling_layers.MaxPooling2D([2, 2],
strides=1,
data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, 2, 6, 8])
@test_util.run_deprecated_v1
def testCreateAveragePooling2DChannelsFirst(self):
height, width = 5, 6
images = random_ops.random_uniform((3, 4, height, width))
layer = pooling_layers.AveragePooling2D((2, 2),
strides=(1, 1),
padding='valid',
data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [3, 4, 4, 5])
@test_util.run_deprecated_v1
def testCreateAveragePooling2DChannelsFirstWithNoneBatch(self):
height, width = 5, 6
images = array_ops.placeholder(dtype='float32',
shape=(None, 4, height, width))
layer = pooling_layers.AveragePooling2D((2, 2),
strides=(1, 1),
padding='valid',
data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [None, 4, 4, 5])
def testCreateMaxPooling1D(self):
width = 7
channels = 3
images = random_ops.random_uniform((5, width, channels))
layer = pooling_layers.MaxPooling1D(2, strides=2)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, width // 2, channels])
def testCreateAveragePooling1D(self):
width = 7
channels = 3
images = random_ops.random_uniform((5, width, channels))
layer = pooling_layers.AveragePooling1D(2, strides=2)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, width // 2, channels])
def testCreateMaxPooling1DChannelsFirst(self):
width = 7
channels = 3
images = random_ops.random_uniform((5, channels, width))
layer = pooling_layers.MaxPooling1D(
2, strides=2, data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, channels, width // 2])
def testCreateAveragePooling1DChannelsFirst(self):
width = 7
channels = 3
images = random_ops.random_uniform((5, channels, width))
layer = pooling_layers.AveragePooling1D(
2, strides=2, data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, channels, width // 2])
def testCreateMaxPooling3D(self):
depth, height, width = 6, 7, 9
images = random_ops.random_uniform((5, depth, height, width, 4))
layer = pooling_layers.MaxPooling3D([2, 2, 2], strides=2)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 4, 4])
def testCreateAveragePooling3D(self):
depth, height, width = 6, 7, 9
images = random_ops.random_uniform((5, depth, height, width, 4))
layer = pooling_layers.AveragePooling3D([2, 2, 2], strides=2)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 4, 4])
def testMaxPooling3DChannelsFirst(self):
depth, height, width = 6, 7, 9
images = random_ops.random_uniform((5, 2, depth, height, width))
layer = pooling_layers.MaxPooling3D(
[2, 2, 2], strides=2, data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, 2, 3, 3, 4])
def testAveragePooling3DChannelsFirst(self):
depth, height, width = 6, 7, 9
images = random_ops.random_uniform((5, 2, depth, height, width))
layer = pooling_layers.AveragePooling3D(
[2, 2, 2], strides=2, data_format='channels_first')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, 2, 3, 3, 4])
def testCreateMaxPooling2DIntegerPoolSize(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4))
layer = pooling_layers.MaxPooling2D(2, strides=2)
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, 3, 4, 4])
def testMaxPooling2DPaddingSame(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 4), seed=1)
layer = pooling_layers.MaxPooling2D(
images.get_shape()[1:3], strides=2, padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(), [5, 4, 5, 4])
def testCreatePooling2DWithStrides(self):
height, width = 6, 8
# Test strides tuple
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layer = pooling_layers.MaxPooling2D([2, 2], strides=(2, 2), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 3])
# Test strides integer
layer = pooling_layers.MaxPooling2D([2, 2], strides=2, padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 3])
# Test unequal strides
layer = pooling_layers.MaxPooling2D([2, 2], strides=(2, 1), padding='same')
output = layer.apply(images)
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width, 3])
if __name__ == '__main__':
test.main()
|
apache-2.0
|
google/material-design-icons
|
update/venv/lib/python3.9/site-packages/pip/_internal/utils/unpacking.py
|
6
|
9032
|
"""Utilities related archives.
"""
import logging
import os
import shutil
import stat
import tarfile
import zipfile
from typing import Iterable, List, Optional
from zipfile import ZipInfo
from pip._internal.exceptions import InstallationError
from pip._internal.utils.filetypes import (
BZ2_EXTENSIONS,
TAR_EXTENSIONS,
XZ_EXTENSIONS,
ZIP_EXTENSIONS,
)
from pip._internal.utils.misc import ensure_dir
logger = logging.getLogger(__name__)
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
try:
import bz2 # noqa
SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS
except ImportError:
logger.debug("bz2 module is not available")
try:
# Only for Python 3.3+
import lzma # noqa
SUPPORTED_EXTENSIONS += XZ_EXTENSIONS
except ImportError:
logger.debug("lzma module is not available")
def current_umask():
# type: () -> int
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def split_leading_dir(path):
# type: (str) -> List[str]
path = path.lstrip("/").lstrip("\\")
if "/" in path and (
("\\" in path and path.find("/") < path.find("\\")) or "\\" not in path
):
return path.split("/", 1)
elif "\\" in path:
return path.split("\\", 1)
else:
return [path, ""]
def has_leading_dir(paths):
# type: (Iterable[str]) -> bool
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def is_within_directory(directory, target):
# type: (str, str) -> bool
"""
Return true if the absolute path of target is within the directory
"""
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def set_extracted_file_to_default_mode_plus_executable(path):
# type: (str) -> None
"""
Make file present at path have execute for user/group/world
(chmod +x) is no-op on windows per python docs
"""
os.chmod(path, (0o777 & ~current_umask() | 0o111))
def zip_item_is_executable(info):
# type: (ZipInfo) -> bool
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for
# user/group/world?
return bool(mode and stat.S_ISREG(mode) and mode & 0o111)
def unzip_file(filename, location, flatten=True):
# type: (str, str, bool) -> None
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
zipfp = open(filename, "rb")
try:
zip = zipfile.ZipFile(zipfp, allowZip64=True)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if not is_within_directory(location, fn):
message = (
"The zip file ({}) has a file ({}) trying to install "
"outside target directory ({})"
)
raise InstallationError(message.format(filename, fn, location))
if fn.endswith("/") or fn.endswith("\\"):
# A directory
ensure_dir(fn)
else:
ensure_dir(dir)
# Don't use read() to avoid allocating an arbitrarily large
# chunk of memory for the file's content
fp = zip.open(name)
try:
with open(fn, "wb") as destfp:
shutil.copyfileobj(fp, destfp)
finally:
fp.close()
if zip_item_is_executable(info):
set_extracted_file_to_default_mode_plus_executable(fn)
finally:
zipfp.close()
def untar_file(filename, location):
# type: (str, str) -> None
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
if filename.lower().endswith(".gz") or filename.lower().endswith(".tgz"):
mode = "r:gz"
elif filename.lower().endswith(BZ2_EXTENSIONS):
mode = "r:bz2"
elif filename.lower().endswith(XZ_EXTENSIONS):
mode = "r:xz"
elif filename.lower().endswith(".tar"):
mode = "r"
else:
logger.warning(
"Cannot determine compression type for file %s",
filename,
)
mode = "r:*"
tar = tarfile.open(filename, mode)
try:
leading = has_leading_dir([member.name for member in tar.getmembers()])
for member in tar.getmembers():
fn = member.name
if leading:
fn = split_leading_dir(fn)[1]
path = os.path.join(location, fn)
if not is_within_directory(location, path):
message = (
"The tar file ({}) has a file ({}) trying to install "
"outside target directory ({})"
)
raise InstallationError(message.format(filename, path, location))
if member.isdir():
ensure_dir(path)
elif member.issym():
try:
# https://github.com/python/typeshed/issues/2673
tar._extract_member(member, path) # type: ignore
except Exception as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
"In the tar file %s the member %s is invalid: %s",
filename,
member.name,
exc,
)
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
"In the tar file %s the member %s is invalid: %s",
filename,
member.name,
exc,
)
continue
ensure_dir(os.path.dirname(path))
assert fp is not None
with open(path, "wb") as destfp:
shutil.copyfileobj(fp, destfp)
fp.close()
# Update the timestamp (useful for cython compiled files)
tar.utime(member, path)
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
set_extracted_file_to_default_mode_plus_executable(path)
finally:
tar.close()
def unpack_file(
filename, # type: str
location, # type: str
content_type=None, # type: Optional[str]
):
# type: (...) -> None
filename = os.path.realpath(filename)
if (
content_type == "application/zip"
or filename.lower().endswith(ZIP_EXTENSIONS)
or zipfile.is_zipfile(filename)
):
unzip_file(filename, location, flatten=not filename.endswith(".whl"))
elif (
content_type == "application/x-gzip"
or tarfile.is_tarfile(filename)
or filename.lower().endswith(TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)
):
untar_file(filename, location)
else:
# FIXME: handle?
# FIXME: magic signatures?
logger.critical(
"Cannot unpack file %s (downloaded from %s, content-type: %s); "
"cannot detect archive format",
filename,
location,
content_type,
)
raise InstallationError(f"Cannot determine archive format of {location}")
|
apache-2.0
|
ferabra/edx-platform
|
lms/djangoapps/licenses/management/commands/import_serial_numbers.py
|
106
|
2288
|
import os.path
from django.utils.html import escape
from django.core.management.base import BaseCommand, CommandError
from xmodule.modulestore.django import modulestore
from licenses.models import CourseSoftware, UserLicense
from opaque_keys.edx.locations import SlashSeparatedCourseKey
class Command(BaseCommand):
help = """Imports serial numbers for software used in a course.
Usage: import_serial_numbers <course_id> <software_name> <file>
<file> is a text file that list one available serial number per line.
Example:
import_serial_numbers MITx/6.002x/2012_Fall matlab serials.txt
"""
args = "course_id software_id serial_file"
def handle(self, *args, **options):
course_id, software_name, filename = self._parse_arguments(args)
software, _ = CourseSoftware.objects.get_or_create(course_id=course_id,
name=software_name)
self._import_serials(software, filename)
def _parse_arguments(self, args):
if len(args) != 3:
raise CommandError("Incorrect number of arguments")
course_id = args[0]
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
if not modulestore().has_course(course_key):
raise CommandError("Unknown course_id")
software_name = escape(args[1].lower())
filename = os.path.abspath(args[2])
if not os.path.exists(filename):
raise CommandError("Cannot find filename {0}".format(filename))
return course_key, software_name, filename
def _import_serials(self, software, filename):
print "Importing serial numbers for {0}.".format(software)
serials = set(unicode(l.strip()) for l in open(filename))
# remove serial numbers we already have
licenses = UserLicense.objects.filter(software=software)
known_serials = set(l.serial for l in licenses)
if known_serials:
serials = serials.difference(known_serials)
# add serial numbers them to the database
for serial in serials:
license = UserLicense(software=software, serial=serial)
license.save()
print "{0} new serial numbers imported.".format(len(serials))
|
agpl-3.0
|
ktonon/GameSoup
|
gamesoup/expressions/context.py
|
1
|
1688
|
import UserDict
class TemplateContext(UserDict.UserDict):
def update_with_bindings(self, queryset):
'''
Given a queryset of bindings, update this template
context with any bindings that match parameters.
'''
from gamesoup.library.models import InterfaceTemplateParameterBinding
for param_name, expr in self.items():
try:
binding = queryset.get(parameter__name=param_name)
self[param_name] = binding.expr
except InterfaceTemplateParameterBinding.DoesNotExist:
pass # It's ok if the type doesn't provide a binding
def __str__(self):
return str(unicode(self))
def __unicode__(self):
'''
>>> from gamesoup.expressions.syntax import Expr
>>> e = Expr.parse('[]')
>>> c = TemplateContext({
... 'I.b': e,
... 'I.a': e,
... 'I.c': e,
... })
>>> print "%s" % c
a=[],b=[],c=[]
'''
return ','.join([
'%s=%r' % (k.split('.')[1], v)
for k, v in sorted(self.items())
])
def __repr__(self):
'''
>>> from gamesoup.expressions.syntax import Expr
>>> e = Expr.parse('[]')
>>> c = TemplateContext({
... 'I.b': e,
... 'I.a': e,
... 'I.c': e,
... })
>>> c
I.a : []
I.b : []
I.c : []
>>> TemplateContext({})
<BLANKLINE>
'''
# Items sorted by keys
return '\n'.join([
'%s : %r' % (k, v)
for k, v in sorted(self.items())
])
|
mit
|
freeflightsim/ffs-app-engine
|
ffs-cal.appspot.com/gdata/atom/http_core.py
|
8
|
18853
|
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# TODO: add proxy handling.
__author__ = '[email protected] (Jeff Scudder)'
import os
import StringIO
import urlparse
import urllib
import httplib
ssl = None
try:
import ssl
except ImportError:
pass
class Error(Exception):
pass
class UnknownSize(Error):
pass
class ProxyError(Error):
pass
MIME_BOUNDARY = 'END_OF_PART'
class HttpRequest(object):
"""Contains all of the parameters for an HTTP 1.1 request.
The HTTP headers are represented by a dictionary, and it is the
responsibility of the user to ensure that duplicate field names are combined
into one header value according to the rules in section 4.2 of RFC 2616.
"""
method = None
uri = None
def __init__(self, uri=None, method=None, headers=None):
"""Construct an HTTP request.
Args:
uri: The full path or partial path as a Uri object or a string.
method: The HTTP method for the request, examples include 'GET', 'POST',
etc.
headers: dict of strings The HTTP headers to include in the request.
"""
self.headers = headers or {}
self._body_parts = []
if method is not None:
self.method = method
if isinstance(uri, (str, unicode)):
uri = Uri.parse_uri(uri)
self.uri = uri or Uri()
def add_body_part(self, data, mime_type, size=None):
"""Adds data to the HTTP request body.
If more than one part is added, this is assumed to be a mime-multipart
request. This method is designed to create MIME 1.0 requests as specified
in RFC 1341.
Args:
data: str or a file-like object containing a part of the request body.
mime_type: str The MIME type describing the data
size: int Required if the data is a file like object. If the data is a
string, the size is calculated so this parameter is ignored.
"""
if isinstance(data, str):
size = len(data)
if size is None:
# TODO: support chunked transfer if some of the body is of unknown size.
raise UnknownSize('Each part of the body must have a known size.')
if 'Content-Length' in self.headers:
content_length = int(self.headers['Content-Length'])
else:
content_length = 0
# If this is the first part added to the body, then this is not a multipart
# request.
if len(self._body_parts) == 0:
self.headers['Content-Type'] = mime_type
content_length = size
self._body_parts.append(data)
elif len(self._body_parts) == 1:
# This is the first member in a mime-multipart request, so change the
# _body_parts list to indicate a multipart payload.
self._body_parts.insert(0, 'Media multipart posting')
boundary_string = '\r\n--%s\r\n' % (MIME_BOUNDARY,)
content_length += len(boundary_string) + size
self._body_parts.insert(1, boundary_string)
content_length += len('Media multipart posting')
# Put the content type of the first part of the body into the multipart
# payload.
original_type_string = 'Content-Type: %s\r\n\r\n' % (
self.headers['Content-Type'],)
self._body_parts.insert(2, original_type_string)
content_length += len(original_type_string)
boundary_string = '\r\n--%s\r\n' % (MIME_BOUNDARY,)
self._body_parts.append(boundary_string)
content_length += len(boundary_string)
# Change the headers to indicate this is now a mime multipart request.
self.headers['Content-Type'] = 'multipart/related; boundary="%s"' % (
MIME_BOUNDARY,)
self.headers['MIME-version'] = '1.0'
# Include the mime type of this part.
type_string = 'Content-Type: %s\r\n\r\n' % (mime_type)
self._body_parts.append(type_string)
content_length += len(type_string)
self._body_parts.append(data)
ending_boundary_string = '\r\n--%s--' % (MIME_BOUNDARY,)
self._body_parts.append(ending_boundary_string)
content_length += len(ending_boundary_string)
else:
# This is a mime multipart request.
boundary_string = '\r\n--%s\r\n' % (MIME_BOUNDARY,)
self._body_parts.insert(-1, boundary_string)
content_length += len(boundary_string) + size
# Include the mime type of this part.
type_string = 'Content-Type: %s\r\n\r\n' % (mime_type)
self._body_parts.insert(-1, type_string)
content_length += len(type_string)
self._body_parts.insert(-1, data)
self.headers['Content-Length'] = str(content_length)
# I could add an "append_to_body_part" method as well.
AddBodyPart = add_body_part
def add_form_inputs(self, form_data,
mime_type='application/x-www-form-urlencoded'):
"""Form-encodes and adds data to the request body.
Args:
form_data: dict or sequnce or two member tuples which contains the
form keys and values.
mime_type: str The MIME type of the form data being sent. Defaults
to 'application/x-www-form-urlencoded'.
"""
body = urllib.urlencode(form_data)
self.add_body_part(body, mime_type)
AddFormInputs = add_form_inputs
def _copy(self):
"""Creates a deep copy of this request."""
copied_uri = Uri(self.uri.scheme, self.uri.host, self.uri.port,
self.uri.path, self.uri.query.copy())
new_request = HttpRequest(uri=copied_uri, method=self.method,
headers=self.headers.copy())
new_request._body_parts = self._body_parts[:]
return new_request
def _dump(self):
"""Converts to a printable string for debugging purposes.
In order to preserve the request, it does not read from file-like objects
in the body.
"""
output = 'HTTP Request\n method: %s\n url: %s\n headers:\n' % (
self.method, str(self.uri))
for header, value in self.headers.iteritems():
output += ' %s: %s\n' % (header, value)
output += ' body sections:\n'
i = 0
for part in self._body_parts:
if isinstance(part, (str, unicode)):
output += ' %s: %s\n' % (i, part)
else:
output += ' %s: <file like object>\n' % i
i += 1
return output
def _apply_defaults(http_request):
if http_request.uri.scheme is None:
if http_request.uri.port == 443:
http_request.uri.scheme = 'https'
else:
http_request.uri.scheme = 'http'
class Uri(object):
"""A URI as used in HTTP 1.1"""
scheme = None
host = None
port = None
path = None
def __init__(self, scheme=None, host=None, port=None, path=None, query=None):
"""Constructor for a URI.
Args:
scheme: str This is usually 'http' or 'https'.
host: str The host name or IP address of the desired server.
post: int The server's port number.
path: str The path of the resource following the host. This begins with
a /, example: '/calendar/feeds/default/allcalendars/full'
query: dict of strings The URL query parameters. The keys and values are
both escaped so this dict should contain the unescaped values.
For example {'my key': 'val', 'second': '!!!'} will become
'?my+key=val&second=%21%21%21' which is appended to the path.
"""
self.query = query or {}
if scheme is not None:
self.scheme = scheme
if host is not None:
self.host = host
if port is not None:
self.port = port
if path:
self.path = path
def _get_query_string(self):
param_pairs = []
for key, value in self.query.iteritems():
param_pairs.append('='.join((urllib.quote_plus(key),
urllib.quote_plus(str(value)))))
return '&'.join(param_pairs)
def _get_relative_path(self):
"""Returns the path with the query parameters escaped and appended."""
param_string = self._get_query_string()
if self.path is None:
path = '/'
else:
path = self.path
if param_string:
return '?'.join([path, param_string])
else:
return path
def _to_string(self):
if self.scheme is None and self.port == 443:
scheme = 'https'
elif self.scheme is None:
scheme = 'http'
else:
scheme = self.scheme
if self.path is None:
path = '/'
else:
path = self.path
if self.port is None:
return '%s://%s%s' % (scheme, self.host, self._get_relative_path())
else:
return '%s://%s:%s%s' % (scheme, self.host, str(self.port),
self._get_relative_path())
def __str__(self):
return self._to_string()
def modify_request(self, http_request=None):
"""Sets HTTP request components based on the URI."""
if http_request is None:
http_request = HttpRequest()
if http_request.uri is None:
http_request.uri = Uri()
# Determine the correct scheme.
if self.scheme:
http_request.uri.scheme = self.scheme
if self.port:
http_request.uri.port = self.port
if self.host:
http_request.uri.host = self.host
# Set the relative uri path
if self.path:
http_request.uri.path = self.path
if self.query:
http_request.uri.query = self.query.copy()
return http_request
ModifyRequest = modify_request
def parse_uri(uri_string):
"""Creates a Uri object which corresponds to the URI string.
This method can accept partial URIs, but it will leave missing
members of the Uri unset.
"""
parts = urlparse.urlparse(uri_string)
uri = Uri()
if parts[0]:
uri.scheme = parts[0]
if parts[1]:
host_parts = parts[1].split(':')
if host_parts[0]:
uri.host = host_parts[0]
if len(host_parts) > 1:
uri.port = int(host_parts[1])
if parts[2]:
uri.path = parts[2]
if parts[4]:
param_pairs = parts[4].split('&')
for pair in param_pairs:
pair_parts = pair.split('=')
if len(pair_parts) > 1:
uri.query[urllib.unquote_plus(pair_parts[0])] = (
urllib.unquote_plus(pair_parts[1]))
elif len(pair_parts) == 1:
uri.query[urllib.unquote_plus(pair_parts[0])] = None
return uri
parse_uri = staticmethod(parse_uri)
ParseUri = parse_uri
parse_uri = Uri.parse_uri
ParseUri = Uri.parse_uri
class HttpResponse(object):
status = None
reason = None
_body = None
def __init__(self, status=None, reason=None, headers=None, body=None):
self._headers = headers or {}
if status is not None:
self.status = status
if reason is not None:
self.reason = reason
if body is not None:
if hasattr(body, 'read'):
self._body = body
else:
self._body = StringIO.StringIO(body)
def getheader(self, name, default=None):
if name in self._headers:
return self._headers[name]
else:
return default
def getheaders(self):
return self._headers
def read(self, amt=None):
if self._body is None:
return None
if not amt:
return self._body.read()
else:
return self._body.read(amt)
def _dump_response(http_response):
"""Converts to a string for printing debug messages.
Does not read the body since that may consume the content.
"""
output = 'HttpResponse\n status: %s\n reason: %s\n headers:' % (
http_response.status, http_response.reason)
headers = http_response.getheaders()
if isinstance(headers, dict):
for header, value in headers.iteritems():
output += ' %s: %s\n' % (header, value)
else:
for pair in headers:
output += ' %s: %s\n' % (pair[0], pair[1])
return output
class HttpClient(object):
"""Performs HTTP requests using httplib."""
debug = None
def request(self, http_request):
return self._http_request(http_request.method, http_request.uri,
http_request.headers, http_request._body_parts)
Request = request
def _get_connection(self, uri, headers=None):
"""Opens a socket connection to the server to set up an HTTP request.
Args:
uri: The full URL for the request as a Uri object.
headers: A dict of string pairs containing the HTTP headers for the
request.
"""
connection = None
if uri.scheme == 'https':
if not uri.port:
connection = httplib.HTTPSConnection(uri.host)
else:
connection = httplib.HTTPSConnection(uri.host, int(uri.port))
else:
if not uri.port:
connection = httplib.HTTPConnection(uri.host)
else:
connection = httplib.HTTPConnection(uri.host, int(uri.port))
return connection
def _http_request(self, method, uri, headers=None, body_parts=None):
"""Makes an HTTP request using httplib.
Args:
method: str example: 'GET', 'POST', 'PUT', 'DELETE', etc.
uri: str or atom.http_core.Uri
headers: dict of strings mapping to strings which will be sent as HTTP
headers in the request.
body_parts: list of strings, objects with a read method, or objects
which can be converted to strings using str. Each of these
will be sent in order as the body of the HTTP request.
"""
if isinstance(uri, (str, unicode)):
uri = Uri.parse_uri(uri)
connection = self._get_connection(uri, headers=headers)
if self.debug:
connection.debuglevel = 1
if connection.host != uri.host:
connection.putrequest(method, str(uri))
else:
connection.putrequest(method, uri._get_relative_path())
# Overcome a bug in Python 2.4 and 2.5
# httplib.HTTPConnection.putrequest adding
# HTTP request header 'Host: www.google.com:443' instead of
# 'Host: www.google.com', and thus resulting the error message
# 'Token invalid - AuthSub token has wrong scope' in the HTTP response.
if (uri.scheme == 'https' and int(uri.port or 443) == 443 and
hasattr(connection, '_buffer') and
isinstance(connection._buffer, list)):
header_line = 'Host: %s:443' % uri.host
replacement_header_line = 'Host: %s' % uri.host
try:
connection._buffer[connection._buffer.index(header_line)] = (
replacement_header_line)
except ValueError: # header_line missing from connection._buffer
pass
# Send the HTTP headers.
for header_name, value in headers.iteritems():
connection.putheader(header_name, value)
connection.endheaders()
# If there is data, send it in the request.
if body_parts:
for part in body_parts:
_send_data_part(part, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def _send_data_part(data, connection):
if isinstance(data, (str, unicode)):
# I might want to just allow str, not unicode.
connection.send(data)
return
# Check to see if data is a file-like object that has a read method.
elif hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string and send the data.
connection.send(str(data))
return
class ProxiedHttpClient(HttpClient):
def _get_connection(self, uri, headers=None):
# Check to see if there are proxy settings required for this request.
proxy = None
if uri.scheme == 'https':
proxy = os.environ.get('https_proxy')
elif uri.scheme == 'http':
proxy = os.environ.get('http_proxy')
if not proxy:
return HttpClient._get_connection(self, uri, headers=headers)
# Now we have the URL of the appropriate proxy server.
# Get a username and password for the proxy if required.
proxy_auth = _get_proxy_auth()
if uri.scheme == 'https':
import socket
if proxy_auth:
proxy_auth = 'Proxy-authorization: %s' % proxy_auth
# Construct the proxy connect command.
port = uri.port
if not port:
port = 443
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (uri.host, port)
# Set the user agent to send to the proxy
user_agent = ''
if headers and 'User-Agent' in headers:
user_agent = 'User-Agent: %s\r\n' % (headers['User-Agent'])
proxy_pieces = '%s%s%s\r\n' % (proxy_connect, proxy_auth, user_agent)
# Find the proxy host and port.
proxy_uri = Uri.parse_uri(proxy)
if not proxy_uri.port:
proxy_uri.port = '80'
# Connect to the proxy server, very simple recv and error checking
p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
p_sock.connect((proxy_uri.host, int(proxy_uri.port)))
p_sock.sendall(proxy_pieces)
response = ''
# Wait for the full response.
while response.find("\r\n\r\n") == -1:
response += p_sock.recv(8192)
p_status = response.split()[1]
if p_status != str(200):
raise ProxyError('Error status=%s' % str(p_status))
# Trivial setup for ssl socket.
sslobj = None
if ssl is not None:
sslobj = ssl.wrap_socket(p_sock, None, None)
else:
sock_ssl = socket.ssl(p_sock, None, Nonesock_)
sslobj = httplib.FakeSocket(p_sock, sock_ssl)
# Initalize httplib and replace with the proxy socket.
connection = httplib.HTTPConnection(proxy_uri.host)
connection.sock = sslobj
return connection
elif uri.scheme == 'http':
proxy_uri = Uri.parse_uri(proxy)
if not proxy_uri.port:
proxy_uri.port = '80'
if proxy_auth:
headers['Proxy-Authorization'] = proxy_auth.strip()
return httplib.HTTPConnection(proxy_uri.host, int(proxy_uri.port))
return None
def _get_proxy_auth():
import base64
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
user_auth = base64.b64encode('%s:%s' % (proxy_username,
proxy_password))
return 'Basic %s\r\n' % (user_auth.strip())
else:
return ''
|
gpl-2.0
|
sourcegraph/pydep
|
pydep/vcs.py
|
3
|
1048
|
import re
repo_url_patterns = [
r'(?:git\+)?((?:https?|git)\://github.com/(?:[^/#]+)/(?:[^/#]+))(?:/.*)?',
r'(?:git\+|hg\+)?((?:https?|git|hg)\://bitbucket.org/(?:[^/#]+)/(?:[^/#]+))(?:/.*)?',
r'(?:git\+|hg\+)?((?:https?|git|hg)\://code.google.com/p/(?:[^/#]+))(?:/.*)?',
]
def parse_repo_url(url):
"""Returns the canonical repository clone URL from a string that contains it"""
for pattern in repo_url_patterns:
match = re.match(pattern, url)
if match is not None:
return match.group(1)
return None
def parse_repo_url_and_revision(url):
"""Returns the canonical repository clone URL and revision from a string that contains it"""
full_url = parse_repo_url(url)
if full_url is None:
return url, '' # fall back to returning the full URL
components = full_url.split('@')
if len(components) == 2:
return components[0], components[1]
elif len(components) == 1:
return components[0], ''
return full_url, '' # fall back to returning the full URL
|
bsd-3-clause
|
vrenaville/OCB
|
addons/account_followup/report/account_followup_print.py
|
63
|
5695
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from collections import defaultdict
from openerp.osv import osv
from openerp.report import report_sxw
class report_rappel(report_sxw.rml_parse):
_name = "account_followup.report.rappel"
def __init__(self, cr, uid, name, context=None):
super(report_rappel, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'ids_to_objects': self._ids_to_objects,
'getLines': self._lines_get,
'get_text': self._get_text
})
def _ids_to_objects(self, ids):
all_lines = []
for line in self.pool['account_followup.stat.by.partner'].browse(self.cr, self.uid, ids):
if line not in all_lines:
all_lines.append(line)
return all_lines
def _lines_get(self, stat_by_partner_line):
return self._lines_get_with_partner(stat_by_partner_line.partner_id, stat_by_partner_line.company_id.id)
def _lines_get_with_partner(self, partner, company_id):
moveline_obj = self.pool['account.move.line']
moveline_ids = moveline_obj.search(self.cr, self.uid, [
('partner_id', '=', partner.id),
('account_id.type', '=', 'receivable'),
('reconcile_id', '=', False),
('state', '!=', 'draft'),
('company_id', '=', company_id),
])
# lines_per_currency = {currency: [line data, ...], ...}
lines_per_currency = defaultdict(list)
for line in moveline_obj.browse(self.cr, self.uid, moveline_ids):
currency = line.currency_id or line.company_id.currency_id
line_data = {
'name': line.move_id.name,
'ref': line.ref,
'date': line.date,
'date_maturity': line.date_maturity,
'balance': line.amount_currency if currency != line.company_id.currency_id else line.debit - line.credit,
'blocked': line.blocked,
'currency_id': currency,
}
lines_per_currency[currency].append(line_data)
return [{'line': lines, 'currency': currency} for currency, lines in lines_per_currency.items()]
def _get_text(self, stat_line, followup_id, context=None):
context = dict(context or {}, lang=stat_line.partner_id.lang)
fp_obj = self.pool['account_followup.followup']
fp_line = fp_obj.browse(self.cr, self.uid, followup_id, context=context).followup_line
if not fp_line:
raise osv.except_osv(_('Error!'),_("The followup plan defined for the current company does not have any followup action."))
#the default text will be the first fp_line in the sequence with a description.
default_text = ''
li_delay = []
for line in fp_line:
if not default_text and line.description:
default_text = line.description
li_delay.append(line.delay)
li_delay.sort(reverse=True)
a = {}
#look into the lines of the partner that already have a followup level, and take the description of the higher level for which it is available
partner_line_ids = self.pool['account.move.line'].search(self.cr, self.uid, [('partner_id','=',stat_line.partner_id.id),('reconcile_id','=',False),('company_id','=',stat_line.company_id.id),('blocked','=',False),('state','!=','draft'),('debit','!=',False),('account_id.type','=','receivable'),('followup_line_id','!=',False)])
partner_max_delay = 0
partner_max_text = ''
for i in self.pool['account.move.line'].browse(self.cr, self.uid, partner_line_ids, context=context):
if i.followup_line_id.delay > partner_max_delay and i.followup_line_id.description:
partner_max_delay = i.followup_line_id.delay
partner_max_text = i.followup_line_id.description
text = partner_max_delay and partner_max_text or default_text
if text:
text = text % {
'partner_name': stat_line.partner_id.name,
'date': time.strftime('%Y-%m-%d'),
'company_name': stat_line.company_id.name,
'user_signature': self.pool['res.users'].browse(self.cr, self.uid, self.uid, context).signature or '',
}
return text
class report_followup(osv.AbstractModel):
_name = 'report.account_followup.report_followup'
_inherit = 'report.abstract_report'
_template = 'account_followup.report_followup'
_wrapped_report_class = report_rappel
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
lukeburden/django-allauth
|
allauth/socialaccount/providers/basecamp/provider.py
|
10
|
1174
|
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class BasecampAccount(ProviderAccount):
def get_avatar_url(self):
return None
def to_str(self):
dflt = super(BasecampAccount, self).to_str()
return self.account.extra_data.get('name', dflt)
class BasecampProvider(OAuth2Provider):
id = 'basecamp'
name = 'Basecamp'
account_class = BasecampAccount
def get_auth_params(self, request, action):
data = super(BasecampProvider, self).get_auth_params(request, action)
data['type'] = 'web_server'
return data
def extract_uid(self, data):
data = data['identity']
return str(data['id'])
def extract_common_fields(self, data):
data = data['identity']
return dict(
email=data.get('email_address'),
username=data.get('email_address'),
first_name=data.get('first_name'),
last_name=data.get('last_name'),
name="%s %s" % (data.get('first_name'), data.get('last_name')),
)
provider_classes = [BasecampProvider]
|
mit
|
rkokkelk/CouchPotatoServer
|
libs/rtorrent/__init__.py
|
64
|
24837
|
# Copyright (c) 2013 Chris Lucas, <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import urllib
import os.path
import time
import xmlrpclib
from rtorrent.common import find_torrent, join_uri, \
update_uri, is_valid_port, convert_version_tuple_to_str
from rtorrent.lib.torrentparser import TorrentParser
from rtorrent.lib.xmlrpc.http import HTTPServerProxy
from rtorrent.lib.xmlrpc.scgi import SCGIServerProxy
from rtorrent.rpc import Method
from rtorrent.lib.xmlrpc.basic_auth import BasicAuthTransport
from rtorrent.torrent import Torrent
from rtorrent.group import Group
import rtorrent.rpc # @UnresolvedImport
__version__ = "0.2.9"
__author__ = "Chris Lucas"
__contact__ = "[email protected]"
__license__ = "MIT"
MIN_RTORRENT_VERSION = (0, 8, 1)
MIN_RTORRENT_VERSION_STR = convert_version_tuple_to_str(MIN_RTORRENT_VERSION)
class RTorrent:
""" Create a new rTorrent connection """
rpc_prefix = None
def __init__(self, uri, username=None, password=None,
verify=False, sp=None, sp_kwargs=None):
self.uri = self._transform_uri(uri) # : From X{__init__(self, url)}
self.username = username
self.password = password
self.scheme = urllib.splittype(self.uri)[0]
if sp:
self.sp = sp
elif self.scheme in ['http', 'https']:
self.sp = HTTPServerProxy
elif self.scheme == 'scgi':
self.sp = SCGIServerProxy
else:
raise NotImplementedError()
self.sp_kwargs = sp_kwargs or {}
self.torrents = [] # : List of L{Torrent} instances
self._rpc_methods = [] # : List of rTorrent RPC methods
self._torrent_cache = []
self._client_version_tuple = ()
if verify is True:
self._verify_conn()
def _transform_uri(self, uri):
scheme = urllib.splittype(uri)[0]
if scheme == 'httprpc' or scheme.startswith('httprpc+'):
# Try find HTTPRPC transport (token after '+' in 'httprpc+https'), otherwise assume HTTP
transport = scheme[scheme.index('+') + 1:] if '+' in scheme else 'http'
# Transform URI with new path and scheme
uri = join_uri(uri, 'plugins/httprpc/action.php', construct=False)
return update_uri(uri, scheme=transport)
return uri
def _get_conn(self):
"""Get ServerProxy instance"""
if self.username and self.password:
if self.scheme == 'scgi':
raise NotImplementedError()
secure = self.scheme == 'https'
return self.sp(
self.uri,
transport=BasicAuthTransport(secure, self.username, self.password),
**self.sp_kwargs
)
return self.sp(self.uri, **self.sp_kwargs)
def _verify_conn(self):
# check for rpc methods that should be available
assert "system.client_version" in self._get_rpc_methods(), "Required RPC method not available."
assert "system.library_version" in self._get_rpc_methods(), "Required RPC method not available."
# minimum rTorrent version check
assert self._meets_version_requirement() is True,\
"Error: Minimum rTorrent version required is {0}".format(
MIN_RTORRENT_VERSION_STR)
def test_connection(self):
try:
self._verify_conn()
except:
return False
return True
def _meets_version_requirement(self):
return self._get_client_version_tuple() >= MIN_RTORRENT_VERSION
def _get_client_version_tuple(self):
conn = self._get_conn()
if not self._client_version_tuple:
if not hasattr(self, "client_version"):
setattr(self, "client_version",
conn.system.client_version())
rtver = getattr(self, "client_version")
self._client_version_tuple = tuple([int(i) for i in
rtver.split(".")])
return self._client_version_tuple
def _update_rpc_methods(self):
self._rpc_methods = self._get_conn().system.listMethods()
return self._rpc_methods
def _get_rpc_methods(self):
""" Get list of raw RPC commands
@return: raw RPC commands
@rtype: list
"""
return(self._rpc_methods or self._update_rpc_methods())
def get_torrents(self, view="main"):
"""Get list of all torrents in specified view
@return: list of L{Torrent} instances
@rtype: list
@todo: add validity check for specified view
"""
self.torrents = []
methods = rtorrent.torrent.methods
retriever_methods = [m for m in methods
if m.is_retriever() and m.is_available(self)]
m = rtorrent.rpc.Multicall(self)
m.add("d.multicall", view, "d.get_hash=",
*[method.rpc_call + "=" for method in retriever_methods])
results = m.call()[0] # only sent one call, only need first result
for result in results:
results_dict = {}
# build results_dict
for m, r in zip(retriever_methods, result[1:]): # result[0] is the info_hash
results_dict[m.varname] = rtorrent.rpc.process_result(m, r)
self.torrents.append(
Torrent(self, info_hash=result[0], **results_dict)
)
self._manage_torrent_cache()
return(self.torrents)
def _manage_torrent_cache(self):
"""Carry tracker/peer/file lists over to new torrent list"""
for torrent in self._torrent_cache:
new_torrent = rtorrent.common.find_torrent(torrent.info_hash,
self.torrents)
if new_torrent is not None:
new_torrent.files = torrent.files
new_torrent.peers = torrent.peers
new_torrent.trackers = torrent.trackers
self._torrent_cache = self.torrents
def _get_load_function(self, file_type, start, verbose):
"""Determine correct "load torrent" RPC method"""
func_name = None
if file_type == "url":
# url strings can be input directly
if start and verbose:
func_name = "load_start_verbose"
elif start:
func_name = "load_start"
elif verbose:
func_name = "load_verbose"
else:
func_name = "load"
elif file_type in ["file", "raw"]:
if start and verbose:
func_name = "load_raw_start_verbose"
elif start:
func_name = "load_raw_start"
elif verbose:
func_name = "load_raw_verbose"
else:
func_name = "load_raw"
return(func_name)
def load_torrent(self, torrent, start=False, verbose=False, verify_load=True, verify_retries=3):
"""
Loads torrent into rTorrent (with various enhancements)
@param torrent: can be a url, a path to a local file, or the raw data
of a torrent file
@type torrent: str
@param start: start torrent when loaded
@type start: bool
@param verbose: print error messages to rTorrent log
@type verbose: bool
@param verify_load: verify that torrent was added to rTorrent successfully
@type verify_load: bool
@return: Depends on verify_load:
- if verify_load is True, (and the torrent was
loaded successfully), it'll return a L{Torrent} instance
- if verify_load is False, it'll return None
@rtype: L{Torrent} instance or None
@raise AssertionError: If the torrent wasn't successfully added to rTorrent
- Check L{TorrentParser} for the AssertionError's
it raises
@note: Because this function includes url verification (if a url was input)
as well as verification as to whether the torrent was successfully added,
this function doesn't execute instantaneously. If that's what you're
looking for, use load_torrent_simple() instead.
"""
p = self._get_conn()
tp = TorrentParser(torrent)
torrent = xmlrpclib.Binary(tp._raw_torrent)
info_hash = tp.info_hash
func_name = self._get_load_function("raw", start, verbose)
# load torrent
getattr(p, func_name)(torrent)
if verify_load:
i = 0
while i < verify_retries:
self.get_torrents()
if info_hash in [t.info_hash for t in self.torrents]:
break
# was still getting AssertionErrors, delay should help
time.sleep(1)
i += 1
assert info_hash in [t.info_hash for t in self.torrents],\
"Adding torrent was unsuccessful."
return(find_torrent(info_hash, self.torrents))
def load_torrent_simple(self, torrent, file_type,
start=False, verbose=False):
"""Loads torrent into rTorrent
@param torrent: can be a url, a path to a local file, or the raw data
of a torrent file
@type torrent: str
@param file_type: valid options: "url", "file", or "raw"
@type file_type: str
@param start: start torrent when loaded
@type start: bool
@param verbose: print error messages to rTorrent log
@type verbose: bool
@return: None
@raise AssertionError: if incorrect file_type is specified
@note: This function was written for speed, it includes no enhancements.
If you input a url, it won't check if it's valid. You also can't get
verification that the torrent was successfully added to rTorrent.
Use load_torrent() if you would like these features.
"""
p = self._get_conn()
assert file_type in ["raw", "file", "url"], \
"Invalid file_type, options are: 'url', 'file', 'raw'."
func_name = self._get_load_function(file_type, start, verbose)
if file_type == "file":
# since we have to assume we're connected to a remote rTorrent
# client, we have to read the file and send it to rT as raw
assert os.path.isfile(torrent), \
"Invalid path: \"{0}\"".format(torrent)
torrent = open(torrent, "rb").read()
if file_type in ["raw", "file"]:
finput = xmlrpclib.Binary(torrent)
elif file_type == "url":
finput = torrent
getattr(p, func_name)(finput)
def get_views(self):
p = self._get_conn()
return p.view_list()
def create_group(self, name, persistent=True, view=None):
p = self._get_conn()
if persistent is True:
p.group.insert_persistent_view('', name)
else:
assert view is not None, "view parameter required on non-persistent groups"
p.group.insert('', name, view)
self._update_rpc_methods()
def get_group(self, name):
assert name is not None, "group name required"
group = Group(self, name)
group.update()
return group
def set_dht_port(self, port):
"""Set DHT port
@param port: port
@type port: int
@raise AssertionError: if invalid port is given
"""
assert is_valid_port(port), "Valid port range is 0-65535"
self.dht_port = self._p.set_dht_port(port)
def enable_check_hash(self):
"""Alias for set_check_hash(True)"""
self.set_check_hash(True)
def disable_check_hash(self):
"""Alias for set_check_hash(False)"""
self.set_check_hash(False)
def find_torrent(self, info_hash):
"""Frontend for rtorrent.common.find_torrent"""
return(rtorrent.common.find_torrent(info_hash, self.get_torrents()))
def poll(self):
""" poll rTorrent to get latest torrent/peer/tracker/file information
@note: This essentially refreshes every aspect of the rTorrent
connection, so it can be very slow if working with a remote
connection that has a lot of torrents loaded.
@return: None
"""
self.update()
torrents = self.get_torrents()
for t in torrents:
t.poll()
def update(self):
"""Refresh rTorrent client info
@note: All fields are stored as attributes to self.
@return: None
"""
multicall = rtorrent.rpc.Multicall(self)
retriever_methods = [m for m in methods
if m.is_retriever() and m.is_available(self)]
for method in retriever_methods:
multicall.add(method)
multicall.call()
def _build_class_methods(class_obj):
# multicall add class
caller = lambda self, multicall, method, *args:\
multicall.add(method, self.rpc_id, *args)
caller.__doc__ = """Same as Multicall.add(), but with automatic inclusion
of the rpc_id
@param multicall: A L{Multicall} instance
@type: multicall: Multicall
@param method: L{Method} instance or raw rpc method
@type: Method or str
@param args: optional arguments to pass
"""
setattr(class_obj, "multicall_add", caller)
def __compare_rpc_methods(rt_new, rt_old):
from pprint import pprint
rt_new_methods = set(rt_new._get_rpc_methods())
rt_old_methods = set(rt_old._get_rpc_methods())
print("New Methods:")
pprint(rt_new_methods - rt_old_methods)
print("Methods not in new rTorrent:")
pprint(rt_old_methods - rt_new_methods)
def __check_supported_methods(rt):
from pprint import pprint
supported_methods = set([m.rpc_call for m in
methods +
rtorrent.file.methods +
rtorrent.torrent.methods +
rtorrent.tracker.methods +
rtorrent.peer.methods])
all_methods = set(rt._get_rpc_methods())
print("Methods NOT in supported methods")
pprint(all_methods - supported_methods)
print("Supported methods NOT in all methods")
pprint(supported_methods - all_methods)
methods = [
# RETRIEVERS
Method(RTorrent, 'get_xmlrpc_size_limit', 'get_xmlrpc_size_limit'),
Method(RTorrent, 'get_proxy_address', 'get_proxy_address'),
Method(RTorrent, 'get_split_suffix', 'get_split_suffix'),
Method(RTorrent, 'get_up_limit', 'get_upload_rate'),
Method(RTorrent, 'get_max_memory_usage', 'get_max_memory_usage'),
Method(RTorrent, 'get_max_open_files', 'get_max_open_files'),
Method(RTorrent, 'get_min_peers_seed', 'get_min_peers_seed'),
Method(RTorrent, 'get_use_udp_trackers', 'get_use_udp_trackers'),
Method(RTorrent, 'get_preload_min_size', 'get_preload_min_size'),
Method(RTorrent, 'get_max_uploads', 'get_max_uploads'),
Method(RTorrent, 'get_max_peers', 'get_max_peers'),
Method(RTorrent, 'get_timeout_sync', 'get_timeout_sync'),
Method(RTorrent, 'get_receive_buffer_size', 'get_receive_buffer_size'),
Method(RTorrent, 'get_split_file_size', 'get_split_file_size'),
Method(RTorrent, 'get_dht_throttle', 'get_dht_throttle'),
Method(RTorrent, 'get_max_peers_seed', 'get_max_peers_seed'),
Method(RTorrent, 'get_min_peers', 'get_min_peers'),
Method(RTorrent, 'get_tracker_numwant', 'get_tracker_numwant'),
Method(RTorrent, 'get_max_open_sockets', 'get_max_open_sockets'),
Method(RTorrent, 'get_session', 'get_session'),
Method(RTorrent, 'get_ip', 'get_ip'),
Method(RTorrent, 'get_scgi_dont_route', 'get_scgi_dont_route'),
Method(RTorrent, 'get_hash_read_ahead', 'get_hash_read_ahead'),
Method(RTorrent, 'get_http_cacert', 'get_http_cacert'),
Method(RTorrent, 'get_dht_port', 'get_dht_port'),
Method(RTorrent, 'get_handshake_log', 'get_handshake_log'),
Method(RTorrent, 'get_preload_type', 'get_preload_type'),
Method(RTorrent, 'get_max_open_http', 'get_max_open_http'),
Method(RTorrent, 'get_http_capath', 'get_http_capath'),
Method(RTorrent, 'get_max_downloads_global', 'get_max_downloads_global'),
Method(RTorrent, 'get_name', 'get_name'),
Method(RTorrent, 'get_session_on_completion', 'get_session_on_completion'),
Method(RTorrent, 'get_down_limit', 'get_download_rate'),
Method(RTorrent, 'get_down_total', 'get_down_total'),
Method(RTorrent, 'get_up_rate', 'get_up_rate'),
Method(RTorrent, 'get_hash_max_tries', 'get_hash_max_tries'),
Method(RTorrent, 'get_peer_exchange', 'get_peer_exchange'),
Method(RTorrent, 'get_down_rate', 'get_down_rate'),
Method(RTorrent, 'get_connection_seed', 'get_connection_seed'),
Method(RTorrent, 'get_http_proxy', 'get_http_proxy'),
Method(RTorrent, 'get_stats_preloaded', 'get_stats_preloaded'),
Method(RTorrent, 'get_timeout_safe_sync', 'get_timeout_safe_sync'),
Method(RTorrent, 'get_hash_interval', 'get_hash_interval'),
Method(RTorrent, 'get_port_random', 'get_port_random'),
Method(RTorrent, 'get_directory', 'get_directory'),
Method(RTorrent, 'get_port_open', 'get_port_open'),
Method(RTorrent, 'get_max_file_size', 'get_max_file_size'),
Method(RTorrent, 'get_stats_not_preloaded', 'get_stats_not_preloaded'),
Method(RTorrent, 'get_memory_usage', 'get_memory_usage'),
Method(RTorrent, 'get_connection_leech', 'get_connection_leech'),
Method(RTorrent, 'get_check_hash', 'get_check_hash',
boolean=True,
),
Method(RTorrent, 'get_session_lock', 'get_session_lock'),
Method(RTorrent, 'get_preload_required_rate', 'get_preload_required_rate'),
Method(RTorrent, 'get_max_uploads_global', 'get_max_uploads_global'),
Method(RTorrent, 'get_send_buffer_size', 'get_send_buffer_size'),
Method(RTorrent, 'get_port_range', 'get_port_range'),
Method(RTorrent, 'get_max_downloads_div', 'get_max_downloads_div'),
Method(RTorrent, 'get_max_uploads_div', 'get_max_uploads_div'),
Method(RTorrent, 'get_safe_sync', 'get_safe_sync'),
Method(RTorrent, 'get_bind', 'get_bind'),
Method(RTorrent, 'get_up_total', 'get_up_total'),
Method(RTorrent, 'get_client_version', 'system.client_version'),
Method(RTorrent, 'get_library_version', 'system.library_version'),
Method(RTorrent, 'get_api_version', 'system.api_version',
min_version=(0, 9, 1)
),
Method(RTorrent, "get_system_time", "system.time",
docstring="""Get the current time of the system rTorrent is running on
@return: time (posix)
@rtype: int""",
),
# MODIFIERS
Method(RTorrent, 'set_http_proxy', 'set_http_proxy'),
Method(RTorrent, 'set_max_memory_usage', 'set_max_memory_usage'),
Method(RTorrent, 'set_max_file_size', 'set_max_file_size'),
Method(RTorrent, 'set_bind', 'set_bind',
docstring="""Set address bind
@param arg: ip address
@type arg: str
""",
),
Method(RTorrent, 'set_up_limit', 'set_upload_rate',
docstring="""Set global upload limit (in bytes)
@param arg: speed limit
@type arg: int
""",
),
Method(RTorrent, 'set_port_random', 'set_port_random'),
Method(RTorrent, 'set_connection_leech', 'set_connection_leech'),
Method(RTorrent, 'set_tracker_numwant', 'set_tracker_numwant'),
Method(RTorrent, 'set_max_peers', 'set_max_peers'),
Method(RTorrent, 'set_min_peers', 'set_min_peers'),
Method(RTorrent, 'set_max_uploads_div', 'set_max_uploads_div'),
Method(RTorrent, 'set_max_open_files', 'set_max_open_files'),
Method(RTorrent, 'set_max_downloads_global', 'set_max_downloads_global'),
Method(RTorrent, 'set_session_lock', 'set_session_lock'),
Method(RTorrent, 'set_session', 'set_session'),
Method(RTorrent, 'set_split_suffix', 'set_split_suffix'),
Method(RTorrent, 'set_hash_interval', 'set_hash_interval'),
Method(RTorrent, 'set_handshake_log', 'set_handshake_log'),
Method(RTorrent, 'set_port_range', 'set_port_range'),
Method(RTorrent, 'set_min_peers_seed', 'set_min_peers_seed'),
Method(RTorrent, 'set_scgi_dont_route', 'set_scgi_dont_route'),
Method(RTorrent, 'set_preload_min_size', 'set_preload_min_size'),
Method(RTorrent, 'set_log.tracker', 'set_log.tracker'),
Method(RTorrent, 'set_max_uploads_global', 'set_max_uploads_global'),
Method(RTorrent, 'set_down_limit', 'set_download_rate',
docstring="""Set global download limit (in bytes)
@param arg: speed limit
@type arg: int
""",
),
Method(RTorrent, 'set_preload_required_rate', 'set_preload_required_rate'),
Method(RTorrent, 'set_hash_read_ahead', 'set_hash_read_ahead'),
Method(RTorrent, 'set_max_peers_seed', 'set_max_peers_seed'),
Method(RTorrent, 'set_max_uploads', 'set_max_uploads'),
Method(RTorrent, 'set_session_on_completion', 'set_session_on_completion'),
Method(RTorrent, 'set_max_open_http', 'set_max_open_http'),
Method(RTorrent, 'set_directory', 'set_directory'),
Method(RTorrent, 'set_http_cacert', 'set_http_cacert'),
Method(RTorrent, 'set_dht_throttle', 'set_dht_throttle'),
Method(RTorrent, 'set_hash_max_tries', 'set_hash_max_tries'),
Method(RTorrent, 'set_proxy_address', 'set_proxy_address'),
Method(RTorrent, 'set_split_file_size', 'set_split_file_size'),
Method(RTorrent, 'set_receive_buffer_size', 'set_receive_buffer_size'),
Method(RTorrent, 'set_use_udp_trackers', 'set_use_udp_trackers'),
Method(RTorrent, 'set_connection_seed', 'set_connection_seed'),
Method(RTorrent, 'set_xmlrpc_size_limit', 'set_xmlrpc_size_limit'),
Method(RTorrent, 'set_xmlrpc_dialect', 'set_xmlrpc_dialect'),
Method(RTorrent, 'set_safe_sync', 'set_safe_sync'),
Method(RTorrent, 'set_http_capath', 'set_http_capath'),
Method(RTorrent, 'set_send_buffer_size', 'set_send_buffer_size'),
Method(RTorrent, 'set_max_downloads_div', 'set_max_downloads_div'),
Method(RTorrent, 'set_name', 'set_name'),
Method(RTorrent, 'set_port_open', 'set_port_open'),
Method(RTorrent, 'set_timeout_sync', 'set_timeout_sync'),
Method(RTorrent, 'set_peer_exchange', 'set_peer_exchange'),
Method(RTorrent, 'set_ip', 'set_ip',
docstring="""Set IP
@param arg: ip address
@type arg: str
""",
),
Method(RTorrent, 'set_timeout_safe_sync', 'set_timeout_safe_sync'),
Method(RTorrent, 'set_preload_type', 'set_preload_type'),
Method(RTorrent, 'set_check_hash', 'set_check_hash',
docstring="""Enable/Disable hash checking on finished torrents
@param arg: True to enable, False to disable
@type arg: bool
""",
boolean=True,
),
]
_all_methods_list = [methods,
rtorrent.file.methods,
rtorrent.torrent.methods,
rtorrent.tracker.methods,
rtorrent.peer.methods,
]
class_methods_pair = {
RTorrent: methods,
rtorrent.file.File: rtorrent.file.methods,
rtorrent.torrent.Torrent: rtorrent.torrent.methods,
rtorrent.tracker.Tracker: rtorrent.tracker.methods,
rtorrent.peer.Peer: rtorrent.peer.methods,
}
for c in class_methods_pair.keys():
rtorrent.rpc._build_rpc_methods(c, class_methods_pair[c])
_build_class_methods(c)
|
gpl-3.0
|
danilito19/django
|
django/core/checks/security/base.py
|
341
|
6313
|
from django.conf import settings
from .. import Tags, Warning, register
SECRET_KEY_MIN_LENGTH = 50
SECRET_KEY_MIN_UNIQUE_CHARACTERS = 5
W001 = Warning(
"You do not have 'django.middleware.security.SecurityMiddleware' "
"in your MIDDLEWARE_CLASSES so the SECURE_HSTS_SECONDS, "
"SECURE_CONTENT_TYPE_NOSNIFF, "
"SECURE_BROWSER_XSS_FILTER, and SECURE_SSL_REDIRECT settings "
"will have no effect.",
id='security.W001',
)
W002 = Warning(
"You do not have "
"'django.middleware.clickjacking.XFrameOptionsMiddleware' in your "
"MIDDLEWARE_CLASSES, so your pages will not be served with an "
"'x-frame-options' header. Unless there is a good reason for your "
"site to be served in a frame, you should consider enabling this "
"header to help prevent clickjacking attacks.",
id='security.W002',
)
W004 = Warning(
"You have not set a value for the SECURE_HSTS_SECONDS setting. "
"If your entire site is served only over SSL, you may want to consider "
"setting a value and enabling HTTP Strict Transport Security. "
"Be sure to read the documentation first; enabling HSTS carelessly "
"can cause serious, irreversible problems.",
id='security.W004',
)
W005 = Warning(
"You have not set the SECURE_HSTS_INCLUDE_SUBDOMAINS setting to True. "
"Without this, your site is potentially vulnerable to attack "
"via an insecure connection to a subdomain. Only set this to True if "
"you are certain that all subdomains of your domain should be served "
"exclusively via SSL.",
id='security.W005',
)
W006 = Warning(
"Your SECURE_CONTENT_TYPE_NOSNIFF setting is not set to True, "
"so your pages will not be served with an "
"'x-content-type-options: nosniff' header. "
"You should consider enabling this header to prevent the "
"browser from identifying content types incorrectly.",
id='security.W006',
)
W007 = Warning(
"Your SECURE_BROWSER_XSS_FILTER setting is not set to True, "
"so your pages will not be served with an "
"'x-xss-protection: 1; mode=block' header. "
"You should consider enabling this header to activate the "
"browser's XSS filtering and help prevent XSS attacks.",
id='security.W007',
)
W008 = Warning(
"Your SECURE_SSL_REDIRECT setting is not set to True. "
"Unless your site should be available over both SSL and non-SSL "
"connections, you may want to either set this setting True "
"or configure a load balancer or reverse-proxy server "
"to redirect all connections to HTTPS.",
id='security.W008',
)
W009 = Warning(
"Your SECRET_KEY has less than %(min_length)s characters or less than "
"%(min_unique_chars)s unique characters. Please generate a long and random "
"SECRET_KEY, otherwise many of Django's security-critical features will be "
"vulnerable to attack." % {
'min_length': SECRET_KEY_MIN_LENGTH,
'min_unique_chars': SECRET_KEY_MIN_UNIQUE_CHARACTERS,
},
id='security.W009',
)
W018 = Warning(
"You should not have DEBUG set to True in deployment.",
id='security.W018',
)
W019 = Warning(
"You have "
"'django.middleware.clickjacking.XFrameOptionsMiddleware' in your "
"MIDDLEWARE_CLASSES, but X_FRAME_OPTIONS is not set to 'DENY'. "
"The default is 'SAMEORIGIN', but unless there is a good reason for "
"your site to serve other parts of itself in a frame, you should "
"change it to 'DENY'.",
id='security.W019',
)
W020 = Warning(
"ALLOWED_HOSTS must not be empty in deployment.",
id='security.W020',
)
def _security_middleware():
return "django.middleware.security.SecurityMiddleware" in settings.MIDDLEWARE_CLASSES
def _xframe_middleware():
return "django.middleware.clickjacking.XFrameOptionsMiddleware" in settings.MIDDLEWARE_CLASSES
@register(Tags.security, deploy=True)
def check_security_middleware(app_configs, **kwargs):
passed_check = _security_middleware()
return [] if passed_check else [W001]
@register(Tags.security, deploy=True)
def check_xframe_options_middleware(app_configs, **kwargs):
passed_check = _xframe_middleware()
return [] if passed_check else [W002]
@register(Tags.security, deploy=True)
def check_sts(app_configs, **kwargs):
passed_check = not _security_middleware() or settings.SECURE_HSTS_SECONDS
return [] if passed_check else [W004]
@register(Tags.security, deploy=True)
def check_sts_include_subdomains(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
not settings.SECURE_HSTS_SECONDS or
settings.SECURE_HSTS_INCLUDE_SUBDOMAINS is True
)
return [] if passed_check else [W005]
@register(Tags.security, deploy=True)
def check_content_type_nosniff(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
settings.SECURE_CONTENT_TYPE_NOSNIFF is True
)
return [] if passed_check else [W006]
@register(Tags.security, deploy=True)
def check_xss_filter(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
settings.SECURE_BROWSER_XSS_FILTER is True
)
return [] if passed_check else [W007]
@register(Tags.security, deploy=True)
def check_ssl_redirect(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
settings.SECURE_SSL_REDIRECT is True
)
return [] if passed_check else [W008]
@register(Tags.security, deploy=True)
def check_secret_key(app_configs, **kwargs):
passed_check = (
getattr(settings, 'SECRET_KEY', None) and
len(set(settings.SECRET_KEY)) >= SECRET_KEY_MIN_UNIQUE_CHARACTERS and
len(settings.SECRET_KEY) >= SECRET_KEY_MIN_LENGTH
)
return [] if passed_check else [W009]
@register(Tags.security, deploy=True)
def check_debug(app_configs, **kwargs):
passed_check = not settings.DEBUG
return [] if passed_check else [W018]
@register(Tags.security, deploy=True)
def check_xframe_deny(app_configs, **kwargs):
passed_check = (
not _xframe_middleware() or
settings.X_FRAME_OPTIONS == 'DENY'
)
return [] if passed_check else [W019]
@register(Tags.security, deploy=True)
def check_allowed_hosts(app_configs, **kwargs):
return [] if settings.ALLOWED_HOSTS else [W020]
|
bsd-3-clause
|
hiroakis/ansible
|
v1/tests/TestVault.py
|
118
|
5121
|
#!/usr/bin/env python
from unittest import TestCase
import getpass
import os
import shutil
import time
import tempfile
from binascii import unhexlify
from binascii import hexlify
from nose.plugins.skip import SkipTest
from ansible import errors
from ansible.utils.vault import VaultLib
# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Util import Counter
HAS_COUNTER = True
except ImportError:
HAS_COUNTER = False
# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Protocol.KDF import PBKDF2
HAS_PBKDF2 = True
except ImportError:
HAS_PBKDF2 = False
# AES IMPORTS
try:
from Crypto.Cipher import AES as AES
HAS_AES = True
except ImportError:
HAS_AES = False
class TestVaultLib(TestCase):
def _is_fips(self):
try:
data = open('/proc/sys/crypto/fips_enabled').read().strip()
except:
return False
if data != '1':
return False
return True
def test_methods_exist(self):
v = VaultLib('ansible')
slots = ['is_encrypted',
'encrypt',
'decrypt',
'_add_header',
'_split_header',]
for slot in slots:
assert hasattr(v, slot), "VaultLib is missing the %s method" % slot
def test_is_encrypted(self):
v = VaultLib(None)
assert not v.is_encrypted("foobar"), "encryption check on plaintext failed"
data = "$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify("ansible")
assert v.is_encrypted(data), "encryption check on headered text failed"
def test_add_header(self):
v = VaultLib('ansible')
v.cipher_name = "TEST"
sensitive_data = "ansible"
data = v._add_header(sensitive_data)
lines = data.split('\n')
assert len(lines) > 1, "failed to properly add header"
header = lines[0]
assert header.endswith(';TEST'), "header does end with cipher name"
header_parts = header.split(';')
assert len(header_parts) == 3, "header has the wrong number of parts"
assert header_parts[0] == '$ANSIBLE_VAULT', "header does not start with $ANSIBLE_VAULT"
assert header_parts[1] == v.version, "header version is incorrect"
assert header_parts[2] == 'TEST', "header does end with cipher name"
def test_split_header(self):
v = VaultLib('ansible')
data = "$ANSIBLE_VAULT;9.9;TEST\nansible"
rdata = v._split_header(data)
lines = rdata.split('\n')
assert lines[0] == "ansible"
assert v.cipher_name == 'TEST', "cipher name was not set"
assert v.version == "9.9"
def test_encrypt_decrypt_aes(self):
if self._is_fips():
raise SkipTest('MD5 not available on FIPS enabled systems')
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
v.cipher_name = 'AES'
enc_data = v.encrypt("foobar")
dec_data = v.decrypt(enc_data)
assert enc_data != "foobar", "encryption failed"
assert dec_data == "foobar", "decryption failed"
def test_encrypt_decrypt_aes256(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
v.cipher_name = 'AES256'
enc_data = v.encrypt("foobar")
dec_data = v.decrypt(enc_data)
assert enc_data != "foobar", "encryption failed"
assert dec_data == "foobar", "decryption failed"
def test_encrypt_encrypted(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
v.cipher_name = 'AES'
data = "$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify("ansible")
error_hit = False
try:
enc_data = v.encrypt(data)
except errors.AnsibleError, e:
error_hit = True
assert error_hit, "No error was thrown when trying to encrypt data with a header"
def test_decrypt_decrypted(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
data = "ansible"
error_hit = False
try:
dec_data = v.decrypt(data)
except errors.AnsibleError, e:
error_hit = True
assert error_hit, "No error was thrown when trying to decrypt data without a header"
def test_cipher_not_set(self):
# not setting the cipher should default to AES256
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
data = "ansible"
error_hit = False
try:
enc_data = v.encrypt(data)
except errors.AnsibleError, e:
error_hit = True
assert not error_hit, "An error was thrown when trying to encrypt data without the cipher set"
assert v.cipher_name == "AES256", "cipher name is not set to AES256: %s" % v.cipher_name
|
gpl-3.0
|
pniedzielski/fb-hackathon-2013-11-21
|
src/repl.it/jsrepl/extern/python/unclosured/lib/python2.7/quopri.py
|
424
|
6969
|
#! /usr/bin/env python
"""Conversions to/from quoted-printable transport encoding as per RFC 1521."""
# (Dec 1991 version).
__all__ = ["encode", "decode", "encodestring", "decodestring"]
ESCAPE = '='
MAXLINESIZE = 76
HEX = '0123456789ABCDEF'
EMPTYSTRING = ''
try:
from binascii import a2b_qp, b2a_qp
except ImportError:
a2b_qp = None
b2a_qp = None
def needsquoting(c, quotetabs, header):
"""Decide whether a particular character needs to be quoted.
The 'quotetabs' flag indicates whether embedded tabs and spaces should be
quoted. Note that line-ending tabs and spaces are always encoded, as per
RFC 1521.
"""
if c in ' \t':
return quotetabs
# if header, we have to escape _ because _ is used to escape space
if c == '_':
return header
return c == ESCAPE or not (' ' <= c <= '~')
def quote(c):
"""Quote a single character."""
i = ord(c)
return ESCAPE + HEX[i//16] + HEX[i%16]
def encode(input, output, quotetabs, header = 0):
"""Read 'input', apply quoted-printable encoding, and write to 'output'.
'input' and 'output' are files with readline() and write() methods.
The 'quotetabs' flag indicates whether embedded tabs and spaces should be
quoted. Note that line-ending tabs and spaces are always encoded, as per
RFC 1521.
The 'header' flag indicates whether we are encoding spaces as _ as per
RFC 1522.
"""
if b2a_qp is not None:
data = input.read()
odata = b2a_qp(data, quotetabs = quotetabs, header = header)
output.write(odata)
return
def write(s, output=output, lineEnd='\n'):
# RFC 1521 requires that the line ending in a space or tab must have
# that trailing character encoded.
if s and s[-1:] in ' \t':
output.write(s[:-1] + quote(s[-1]) + lineEnd)
elif s == '.':
output.write(quote(s) + lineEnd)
else:
output.write(s + lineEnd)
prevline = None
while 1:
line = input.readline()
if not line:
break
outline = []
# Strip off any readline induced trailing newline
stripped = ''
if line[-1:] == '\n':
line = line[:-1]
stripped = '\n'
# Calculate the un-length-limited encoded line
for c in line:
if needsquoting(c, quotetabs, header):
c = quote(c)
if header and c == ' ':
outline.append('_')
else:
outline.append(c)
# First, write out the previous line
if prevline is not None:
write(prevline)
# Now see if we need any soft line breaks because of RFC-imposed
# length limitations. Then do the thisline->prevline dance.
thisline = EMPTYSTRING.join(outline)
while len(thisline) > MAXLINESIZE:
# Don't forget to include the soft line break `=' sign in the
# length calculation!
write(thisline[:MAXLINESIZE-1], lineEnd='=\n')
thisline = thisline[MAXLINESIZE-1:]
# Write out the current line
prevline = thisline
# Write out the last line, without a trailing newline
if prevline is not None:
write(prevline, lineEnd=stripped)
def encodestring(s, quotetabs = 0, header = 0):
if b2a_qp is not None:
return b2a_qp(s, quotetabs = quotetabs, header = header)
from cStringIO import StringIO
infp = StringIO(s)
outfp = StringIO()
encode(infp, outfp, quotetabs, header)
return outfp.getvalue()
def decode(input, output, header = 0):
"""Read 'input', apply quoted-printable decoding, and write to 'output'.
'input' and 'output' are files with readline() and write() methods.
If 'header' is true, decode underscore as space (per RFC 1522)."""
if a2b_qp is not None:
data = input.read()
odata = a2b_qp(data, header = header)
output.write(odata)
return
new = ''
while 1:
line = input.readline()
if not line: break
i, n = 0, len(line)
if n > 0 and line[n-1] == '\n':
partial = 0; n = n-1
# Strip trailing whitespace
while n > 0 and line[n-1] in " \t\r":
n = n-1
else:
partial = 1
while i < n:
c = line[i]
if c == '_' and header:
new = new + ' '; i = i+1
elif c != ESCAPE:
new = new + c; i = i+1
elif i+1 == n and not partial:
partial = 1; break
elif i+1 < n and line[i+1] == ESCAPE:
new = new + ESCAPE; i = i+2
elif i+2 < n and ishex(line[i+1]) and ishex(line[i+2]):
new = new + chr(unhex(line[i+1:i+3])); i = i+3
else: # Bad escape sequence -- leave it in
new = new + c; i = i+1
if not partial:
output.write(new + '\n')
new = ''
if new:
output.write(new)
def decodestring(s, header = 0):
if a2b_qp is not None:
return a2b_qp(s, header = header)
from cStringIO import StringIO
infp = StringIO(s)
outfp = StringIO()
decode(infp, outfp, header = header)
return outfp.getvalue()
# Other helper functions
def ishex(c):
"""Return true if the character 'c' is a hexadecimal digit."""
return '0' <= c <= '9' or 'a' <= c <= 'f' or 'A' <= c <= 'F'
def unhex(s):
"""Get the integer value of a hexadecimal number."""
bits = 0
for c in s:
if '0' <= c <= '9':
i = ord('0')
elif 'a' <= c <= 'f':
i = ord('a')-10
elif 'A' <= c <= 'F':
i = ord('A')-10
else:
break
bits = bits*16 + (ord(c) - i)
return bits
def main():
import sys
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'td')
except getopt.error, msg:
sys.stdout = sys.stderr
print msg
print "usage: quopri [-t | -d] [file] ..."
print "-t: quote tabs"
print "-d: decode; default encode"
sys.exit(2)
deco = 0
tabs = 0
for o, a in opts:
if o == '-t': tabs = 1
if o == '-d': deco = 1
if tabs and deco:
sys.stdout = sys.stderr
print "-t and -d are mutually exclusive"
sys.exit(2)
if not args: args = ['-']
sts = 0
for file in args:
if file == '-':
fp = sys.stdin
else:
try:
fp = open(file)
except IOError, msg:
sys.stderr.write("%s: can't open (%s)\n" % (file, msg))
sts = 1
continue
if deco:
decode(fp, sys.stdout)
else:
encode(fp, sys.stdout, tabs)
if fp is not sys.stdin:
fp.close()
if sts:
sys.exit(sts)
if __name__ == '__main__':
main()
|
agpl-3.0
|
jriegel/FreeCAD
|
src/Mod/Path/PathScripts/PathSelection.py
|
3
|
8502
|
# -*- coding: utf-8 -*-
#***************************************************************************
#* *
#* Copyright (c) 2015 Dan Falck <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
'''Path selection function select a face or faces, two edges, etc to get a dictionary with what was selected in order '''
import FreeCAD,FreeCADGui
import Part
from FreeCAD import Vector
def Sort2Edges(edgelist):
'''Sort2Edges(edgelist) simple function to reorder the start and end pts of two edges based on their selection order. Returns the list, the start point, and their common point, => edgelist, vertex, vertex'''
if len(edgelist)>=2:
vlist = []
e0 = edgelist[0]
e1=edgelist[1]
a0 = e0.Vertexes[0]
a1 = e0.Vertexes[1]
b0 = e1.Vertexes[0]
b1 = e1.Vertexes[1]
# comparison routine to order two edges:
if a1.isSame(b0):
vlist.append((a0.Point.x,a0.Point.y))
vlist.append((a1.Point.x,a1.Point.y))
vlist.append((b1.Point.x,b1.Point.y))
elif a0.isSame(b0):
vlist.append((a1.Point.x,a1.Point.y))
vlist.append((a0.Point.x,a0.Point.y))
vlist.append((b1.Point.x,b1.Point.y))
elif a0.isSame(b1):
vlist.append((a1.Point.x,a1.Point.y))
vlist.append((a0.Point.x,a0.Point.y))
vlist.append((b0.Point.x,b0.Point.y))
elif a1.isSame(b1):
vlist.append((a0.Point.x,a0.Point.y))
vlist.append((a1.Point.x,a1.Point.y))
vlist.append((b0.Point.x,b0.Point.y))
edgestart = Vector(vlist[0][0],vlist[0][1],e0.Vertexes[1].Z)
edgecommon = Vector(vlist[1][0],vlist[1][1],e0.Vertexes[1].Z)
return vlist,edgestart,edgecommon
def segments(poly):
''' A sequence of (x,y) numeric coordinates pairs '''
return zip(poly, poly[1:] + [poly[0]])
def check_clockwise(poly):
'''
check_clockwise(poly) a function for returning a boolean if the selected wire is clockwise or counter clockwise
based on point order. poly = [(x1,y1),(x2,y2),(x3,y3)]
'''
clockwise = False
if (sum(x0*y1 - x1*y0 for ((x0, y0), (x1, y1)) in segments(poly))) < 0:
clockwise = not clockwise
return clockwise
def multiSelect():
'''
multiSelect() A function for selecting elements of an object for CNC path operations.
Select just a face, an edge,or two edges to indicate direction, a vertex on the object, a point not on the object,
or some combination. Returns a dictionary.
'''
sel = FreeCADGui.Selection.getSelectionEx()
numobjs = len([selobj.Object for selobj in sel])
if numobjs == 0:
FreeCAD.Console.PrintError('Please select some objects and try again.\n')
return
goodselect = False
for s in sel:
for i in s.SubObjects:
if i.ShapeType == 'Face':
goodselect = True
if i.ShapeType == 'Edge':
goodselect = True
if i.ShapeType == 'Vertex':
goodselect = True
if not goodselect:
FreeCAD.Console.PrintError('Please select a face and/or edges along with points (optional) and try again.\n')
return
selItems = {}
selItems['objname']=None #the parent object name - a 3D solid
selItems['pointlist']=None #start and end points
selItems['pointnames']=None #names of points for document object
selItems['facenames']=None # the selected face name
selItems['facelist']=None #list of faces selected
selItems['edgelist']=None #some edges that could be selected along with points and faces
selItems['edgenames']=None
selItems['pathwire']=None #the whole wire around edges of the face
selItems['clockwise']=None
selItems['circles']=None
facenames = []
edgelist =[]
edgenames=[]
ptlist=[]
ptnames=[]
circlelist=[]
face = False
edges = False
points = False
wireobj = False
circles = False
facelist= []
for s in sel:
if s.Object.Shape.ShapeType in ['Solid','Compound','Wire','Vertex']:
if not (s.Object.Shape.ShapeType =='Vertex'):
objname = s.ObjectName
selItems['objname'] =objname
if s.Object.Shape.ShapeType == 'Wire':
wireobj = True
if s.Object.Shape.ShapeType == 'Vertex':
ptnames.append(s.ObjectName)
# ptlist.append(s.Object)
points = True
for sub in s.SubObjects:
if sub.ShapeType =='Face':
facelist.append(sub)
face = True
if sub.ShapeType =='Edge':
edge = sub
edgelist.append(edge)
edges = True
if isinstance(sub.Curve,Part.Circle):
circlelist.append(edge)
circles = True
if sub.ShapeType =='Vertex':
ptlist.append(sub)
points = True
for sub in s.SubElementNames:
if 'Face' in sub:
facename = sub
facenames.append(facename)
if 'Edge' in sub:
edgenames.append(sub)
# now indicate which wire is going to be processed, based on which edges are selected
if facelist:
selItems['facelist']=facelist
if edges:
if face:
selItems['edgelist'] =edgelist
for fw in facelist[0].Wires:
for e in fw.Edges:
if e.isSame(edge):
pathwire = fw
selItems['pathwire'] =pathwire
elif wireobj:
selItems['pathwire'] =s.Object.Shape
selItems['edgelist'] =edgelist
else:
for w in s.Object.Shape.Wires:
for e in w.Edges:
if e.BoundBox.ZMax == e.BoundBox.ZMin: #if they are on same plane in Z as sel edge
if e.isSame(edge):
pathwire = w
selItems['pathwire'] =pathwire
selItems['edgelist'] =edgelist
if not edges:
if face:
selItems['pathwire'] =facelist[0].OuterWire
if edges and (len(edgelist)>=2):
vlist,edgestart,edgecommon=Sort2Edges(edgelist)
edgepts ={}
edgepts['vlist'] = vlist
edgepts['edgestart']=edgestart # start point of edges selected
edgepts['edgecommon']=edgecommon # point where two edges join- will be last point in in first gcode line
selItems['edgepts']=edgepts
if check_clockwise(vlist):
selItems['clockwise']=True
elif check_clockwise(vlist) == False:
selItems['clockwise']=False
if points:
selItems['pointlist'] = ptlist
selItems['pointnames'] = ptnames
if edges:
selItems['edgenames']=edgenames
if face:
selItems['facenames'] = facenames
if circles:
selItems['circles'] = circlelist
return selItems
|
lgpl-2.1
|
jeffknupp/sandman2
|
sandman2/service.py
|
1
|
9904
|
"""Automatically generated REST API services from SQLAlchemy
ORM models or a database introspection."""
# Third-party imports
from flask import request, make_response
import flask
from flask.views import MethodView
from sqlalchemy import asc, desc
# Application imports
from sandman2.exception import NotFoundException, BadRequestException
from sandman2.model import db
from sandman2.decorators import etag, validate_fields
def add_link_headers(response, links):
"""Return *response* with the proper link headers set, based on the contents
of *links*.
:param response: :class:`flask.Response` response object for links to be
added
:param dict links: Dictionary of links to be added
:rtype :class:`flask.Response` :
"""
link_string = '<{}>; rel=self'.format(links['self'])
for link in links.values():
link_string += ', <{}>; rel=related'.format(link)
response.headers['Link'] = link_string
return response
def jsonify(resource):
"""Return a Flask ``Response`` object containing a
JSON representation of *resource*.
:param resource: The resource to act as the basis of the response
"""
response = flask.jsonify(resource.to_dict())
response = add_link_headers(response, resource.links())
return response
def is_valid_method(model, resource=None):
"""Return the error message to be sent to the client if the current
request passes fails any user-defined validation."""
validation_function_name = 'is_valid_{}'.format(
request.method.lower())
if hasattr(model, validation_function_name):
return getattr(model, validation_function_name)(request, resource)
class Service(MethodView):
"""The *Service* class is a generic extension of Flask's *MethodView*,
providing default RESTful functionality for a given ORM resource.
Each service has an associated *__model__* attribute which represents the
ORM resource it exposes. Services are JSON-only. HTML-based representation
is available through the admin interface.
"""
#: The sandman2.model.Model-derived class to expose
__model__ = None
#: The string used to describe the elements when a collection is
#: returned.
__json_collection_name__ = 'resources'
def delete(self, resource_id):
"""Return an HTTP response object resulting from a HTTP DELETE call.
:param resource_id: The value of the resource's primary key
"""
resource = self._resource(resource_id)
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
db.session().delete(resource)
db.session().commit()
return self._no_content_response()
@etag
def get(self, resource_id=None):
"""Return an HTTP response object resulting from an HTTP GET call.
If *resource_id* is provided, return just the single resource.
Otherwise, return the full collection.
:param resource_id: The value of the resource's primary key
"""
if request.path.endswith('meta'):
return self._meta()
if resource_id is None:
error_message = is_valid_method(self.__model__)
if error_message:
raise BadRequestException(error_message)
if 'export' in request.args:
return self._export(self._all_resources())
return flask.jsonify({
self.__json_collection_name__: self._all_resources()
})
else:
resource = self._resource(resource_id)
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
return jsonify(resource)
def patch(self, resource_id):
"""Return an HTTP response object resulting from an HTTP PATCH call.
:returns: ``HTTP 200`` if the resource already exists
:returns: ``HTTP 400`` if the request is malformed
:returns: ``HTTP 404`` if the resource is not found
:param resource_id: The value of the resource's primary key
"""
resource = self._resource(resource_id)
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
if not request.json:
raise BadRequestException('No JSON data received')
resource.update(request.json)
db.session().merge(resource)
db.session().commit()
return jsonify(resource)
@validate_fields
def post(self):
"""Return the JSON representation of a new resource created through
an HTTP POST call.
:returns: ``HTTP 201`` if a resource is properly created
:returns: ``HTTP 204`` if the resource already exists
:returns: ``HTTP 400`` if the request is malformed or missing data
"""
resource = self.__model__.query.filter_by(**request.json).first()
if resource:
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
return self._no_content_response()
resource = self.__model__(**request.json) # pylint: disable=not-callable
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
db.session().add(resource)
db.session().commit()
return self._created_response(resource)
def put(self, resource_id):
"""Return the JSON representation of a new resource created or updated
through an HTTP PUT call.
If resource_id is not provided, it is assumed the primary key field is
included and a totally new resource is created. Otherwise, the existing
resource referred to by *resource_id* is updated with the provided JSON
data. This method is idempotent.
:returns: ``HTTP 201`` if a new resource is created
:returns: ``HTTP 200`` if a resource is updated
:returns: ``HTTP 400`` if the request is malformed or missing data
"""
resource = self.__model__.query.get(resource_id)
if resource:
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
resource.update(request.json)
db.session().merge(resource)
db.session().commit()
return jsonify(resource)
resource = self.__model__(**request.json) # pylint: disable=not-callable
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
db.session().add(resource)
db.session().commit()
return self._created_response(resource)
def _meta(self):
"""Return a description of this resource as reported by the
database."""
return flask.jsonify(self.__model__.description())
def _resource(self, resource_id):
"""Return the ``sandman2.model.Model`` instance with the given
*resource_id*.
:rtype: :class:`sandman2.model.Model`
"""
resource = self.__model__.query.get(resource_id)
if not resource:
raise NotFoundException()
return resource
def _all_resources(self):
"""Return the complete collection of resources as a list of
dictionaries.
:rtype: :class:`sandman2.model.Model`
"""
queryset = self.__model__.query
args = {k: v for (k, v) in request.args.items() if k not in ('page', 'export')}
limit = None
if args:
filters = []
order = []
for key, value in args.items():
if value.startswith('%'):
filters.append(getattr(self.__model__, key).like(str(value), escape='/'))
elif key == 'sort':
direction = desc if value.startswith('-') else asc
order.append(direction(getattr(self.__model__, value.lstrip('-'))))
elif key == 'limit':
limit = int(value)
elif hasattr(self.__model__, key):
filters.append(getattr(self.__model__, key) == value)
else:
raise BadRequestException('Invalid field [{}]'.format(key))
queryset = queryset.filter(*filters).order_by(*order)
if 'page' in request.args:
resources = queryset.paginate(page=int(request.args['page']), per_page=limit).items
else:
queryset = queryset.limit(limit)
resources = queryset.all()
return [r.to_dict() for r in resources]
def _export(self, collection):
"""Return a CSV of the resources in *collection*.
:param list collection: A list of resources represented by dicts
"""
fieldnames = collection[0].keys()
faux_csv = ','.join(fieldnames) + '\r\n'
for resource in collection:
faux_csv += ','.join((str(x) for x in resource.values())) + '\r\n'
response = make_response(faux_csv)
response.mimetype = 'text/csv'
return response
@staticmethod
def _no_content_response():
"""Return an HTTP 204 "No Content" response.
:returns: HTTP Response
"""
response = make_response()
response.status_code = 204
return response
@staticmethod
def _created_response(resource):
"""Return an HTTP 201 "Created" response.
:returns: HTTP Response
"""
response = jsonify(resource)
response.status_code = 201
return response
|
apache-2.0
|
bliz937/kivy
|
kivy/core/camera/__init__.py
|
18
|
4533
|
'''
Camera
======
Core class for acquiring the camera and converting its input into a
:class:`~kivy.graphics.texture.Texture`.
.. versionchanged:: 1.8.0
There is now 2 distinct Gstreamer implementation: one using Gi/Gst
working for both Python 2+3 with Gstreamer 1.0, and one using PyGST
working only for Python 2 + Gstreamer 0.10.
If you have issue with GStreamer, have a look at
:ref:`gstreamer-compatibility`
'''
__all__ = ('CameraBase', 'Camera')
from kivy.utils import platform
from kivy.event import EventDispatcher
from kivy.logger import Logger
from kivy.core import core_select_lib
class CameraBase(EventDispatcher):
'''Abstract Camera Widget class.
Concrete camera classes must implement initialization and
frame capturing to a buffer that can be uploaded to the gpu.
:Parameters:
`index`: int
Source index of the camera.
`size` : tuple (int, int)
Size at which the image is drawn. If no size is specified,
it defaults to the resolution of the camera image.
`resolution` : tuple (int, int)
Resolution to try to request from the camera.
Used in the gstreamer pipeline by forcing the appsink caps
to this resolution. If the camera doesnt support the resolution,
a negotiation error might be thrown.
:Events:
`on_load`
Fired when the camera is loaded and the texture has become
available.
`on_frame`
Fired each time the camera texture is updated.
'''
__events__ = ('on_load', 'on_texture')
def __init__(self, **kwargs):
kwargs.setdefault('stopped', False)
kwargs.setdefault('resolution', (640, 480))
kwargs.setdefault('index', 0)
self.stopped = kwargs.get('stopped')
self._resolution = kwargs.get('resolution')
self._index = kwargs.get('index')
self._buffer = None
self._format = 'rgb'
self._texture = None
self.capture_device = None
kwargs.setdefault('size', self._resolution)
super(CameraBase, self).__init__()
self.init_camera()
if not self.stopped:
self.start()
def _set_resolution(self, res):
self._resolution = res
self.init_camera()
def _get_resolution(self):
return self._resolution
resolution = property(lambda self: self._get_resolution(),
lambda self, x: self._set_resolution(x),
doc='Resolution of camera capture (width, height)')
def _set_index(self, x):
if x == self._index:
return
self._index = x
self.init_camera()
def _get_index(self):
return self._x
index = property(lambda self: self._get_index(),
lambda self, x: self._set_index(x),
doc='Source index of the camera')
def _get_texture(self):
return self._texture
texture = property(lambda self: self._get_texture(),
doc='Return the camera texture with the latest capture')
def init_camera(self):
'''Initialise the camera (internal)'''
pass
def start(self):
'''Start the camera acquire'''
self.stopped = False
def stop(self):
'''Release the camera'''
self.stopped = True
def _update(self, dt):
'''Update the camera (internal)'''
pass
def _copy_to_gpu(self):
'''Copy the the buffer into the texture'''
if self._texture is None:
Logger.debug('Camera: copy_to_gpu() failed, _texture is None !')
return
self._texture.blit_buffer(self._buffer, colorfmt=self._format)
self._buffer = None
self.dispatch('on_texture')
def on_texture(self):
pass
def on_load(self):
pass
# Load the appropriate providers
providers = ()
if platform == 'win':
providers += (('videocapture', 'camera_videocapture',
'CameraVideoCapture'), )
elif platform == 'macosx':
providers += (('avfoundation', 'camera_avfoundation',
'CameraAVFoundation'), )
elif platform == 'android':
providers += (('android', 'camera_android', 'CameraAndroid'), )
else:
#providers += (('gi', 'camera_gi', 'CameraGi'), )
providers += (('pygst', 'camera_pygst', 'CameraPyGst'), )
providers += (('opencv', 'camera_opencv', 'CameraOpenCV'), )
Camera = core_select_lib('camera', (providers))
|
mit
|
tumbl3w33d/ansible
|
lib/ansible/modules/network/check_point/cp_mgmt_threat_exception.py
|
20
|
6207
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage Check Point Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: cp_mgmt_threat_exception
short_description: Manages threat-exception objects on Check Point over Web Services API
description:
- Manages threat-exception objects on Check Point devices including creating, updating and removing objects.
- All operations are performed over Web Services API.
version_added: "2.9"
author: "Or Soffer (@chkp-orso)"
options:
name:
description:
- The name of the exception.
type: str
required: True
position:
description:
- Position in the rulebase.
type: str
exception_group_uid:
description:
- The UID of the exception-group.
type: str
exception_group_name:
description:
- The name of the exception-group.
type: str
layer:
description:
- Layer that the rule belongs to identified by the name or UID.
type: str
rule_name:
description:
- The name of the parent rule.
type: str
action:
description:
- Action-the enforced profile.
type: str
destination:
description:
- Collection of Network objects identified by the name or UID.
type: list
destination_negate:
description:
- True if negate is set for destination.
type: bool
enabled:
description:
- Enable/Disable the rule.
type: bool
install_on:
description:
- Which Gateways identified by the name or UID to install the policy on.
type: list
protected_scope:
description:
- Collection of objects defining Protected Scope identified by the name or UID.
type: list
protected_scope_negate:
description:
- True if negate is set for Protected Scope.
type: bool
protection_or_site:
description:
- Name of the protection or site.
type: list
service:
description:
- Collection of Network objects identified by the name or UID.
type: list
service_negate:
description:
- True if negate is set for Service.
type: bool
source:
description:
- Collection of Network objects identified by the name or UID.
type: list
source_negate:
description:
- True if negate is set for source.
type: bool
track:
description:
- Packet tracking.
type: str
comments:
description:
- Comments string.
type: str
details_level:
description:
- The level of detail for some of the fields in the response can vary from showing only the UID value of the object to a fully detailed
representation of the object.
type: str
choices: ['uid', 'standard', 'full']
ignore_warnings:
description:
- Apply changes ignoring warnings.
type: bool
ignore_errors:
description:
- Apply changes ignoring errors. You won't be able to publish such a changes. If ignore-warnings flag was omitted - warnings will also be ignored.
type: bool
extends_documentation_fragment: checkpoint_objects
"""
EXAMPLES = """
- name: add-threat-exception
cp_mgmt_threat_exception:
layer: New Layer 1
name: Exception Rule
position: 1
protected_scope: All_Internet
rule_name: Threat Rule 1
state: present
track: Log
- name: set-threat-exception
cp_mgmt_threat_exception:
layer: New Layer 1
name: Exception Rule
rule_name: Threat Rule 1
state: present
- name: delete-threat-exception
cp_mgmt_threat_exception:
name: Exception Rule
layer: New Layer 1
rule_name: Threat Rule 1
state: absent
"""
RETURN = """
cp_mgmt_threat_exception:
description: The checkpoint object created or updated.
returned: always, except when deleting the object.
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.checkpoint.checkpoint import checkpoint_argument_spec_for_objects, api_call, api_call_for_rule
def main():
argument_spec = dict(
name=dict(type='str', required=True),
position=dict(type='str'),
exception_group_uid=dict(type='str'),
exception_group_name=dict(type='str'),
layer=dict(type='str'),
rule_name=dict(type='str'),
action=dict(type='str'),
destination=dict(type='list'),
destination_negate=dict(type='bool'),
enabled=dict(type='bool'),
install_on=dict(type='list'),
protected_scope=dict(type='list'),
protected_scope_negate=dict(type='bool'),
protection_or_site=dict(type='list'),
service=dict(type='list'),
service_negate=dict(type='bool'),
source=dict(type='list'),
source_negate=dict(type='bool'),
track=dict(type='str'),
comments=dict(type='str'),
details_level=dict(type='str', choices=['uid', 'standard', 'full']),
ignore_warnings=dict(type='bool'),
ignore_errors=dict(type='bool')
)
argument_spec.update(checkpoint_argument_spec_for_objects)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
api_call_object = 'threat-exception'
if module.params['position'] is None:
result = api_call(module, api_call_object)
else:
result = api_call_for_rule(module, api_call_object)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
repotvsupertuga/tvsupertuga.repository
|
script.vodextende/salts_lib/directstream.py
|
1
|
11260
|
# -*- coding: utf-8 -*-
"""
VOD TVsupertuga Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re, os, urllib, urlparse, json, binascii
from salts_lib import client
def google(url):
try:
if any(x in url for x in ['youtube.', 'docid=']): url = 'https://drive.google.com/file/d/%s/view' % re.compile('docid=([\w-]+)').findall(url)[0]
netloc = urlparse.urlparse(url.strip().lower()).netloc
netloc = netloc.split('.google')[0]
if netloc == 'docs' or netloc == 'drive':
url = url.split('/preview', 1)[0]
url = url.replace('drive.google.com', 'docs.google.com')
headers = {'User-Agent': client.agent()}
result = client.request(url, output='extended', headers=headers)
try:
headers['Cookie'] = result[2]['Set-Cookie']
except:
pass
result = result[0]
if netloc == 'docs' or netloc == 'drive':
result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0]
result = json.loads(result)
result = [i.split('|')[-1] for i in result.split(',')]
result = sum([googletag(i, append_height=True) for i in result], [])
elif netloc == 'photos':
result = result.replace('\r', '').replace('\n', '').replace('\t', '')
result = re.compile('"\d*/\d*x\d*.+?","(.+?)"').findall(result)[0]
result = result.replace('\\u003d', '=').replace('\\u0026', '&')
result = re.compile('url=(.+?)&').findall(result)
result = [urllib.unquote(i) for i in result]
result = sum([googletag(i, append_height=True) for i in result], [])
elif netloc == 'picasaweb':
id = re.compile('#(\d*)').findall(url)[0]
result = re.search('feedPreload:\s*(.*}]}})},', result, re.DOTALL).group(1)
result = json.loads(result)['feed']['entry']
if len(result) > 1:
result = [i for i in result if str(id) in i['link'][0]['href']][0]
elif len(result) == 1:
result = result[0]
result = result['media']['content']
result = [i['url'] for i in result if 'video' in i['type']]
result = sum([googletag(i, append_height=True) for i in result], [])
elif netloc == 'plus':
id = (urlparse.urlparse(url).path).split('/')[-1]
result = result.replace('\r', '').replace('\n', '').replace('\t', '')
result = result.split('"%s"' % id)[-1].split(']]')[0]
result = result.replace('\\u003d', '=').replace('\\u0026', '&')
result = re.compile('url=(.+?)&').findall(result)
result = [urllib.unquote(i) for i in result]
result = sum([googletag(i, append_height=True) for i in result], [])
result = sorted(result, key=lambda i: i.get('height', 0), reverse=True)
url = []
for q in ['4K', '1440p', '1080p', 'HD', 'SD']:
try:
url += [[i for i in result if i.get('quality') == q][0]]
except:
pass
for i in url:
i.pop('height', None)
i.update({'url': i['url'] + '|%s' % urllib.urlencode(headers)})
if not url: return
return url
except:
return
def googletag(url, append_height=False):
quality = re.compile('itag=(\d*)').findall(url)
quality += re.compile('=m(\d*)$').findall(url)
try:
quality = quality[0]
except:
return []
itag_map = {'151': {'quality': 'SD', 'height': 72}, '212': {'quality': 'SD', 'height': 480}, '313': {'quality': '4K', 'height': 2160},
'242': {'quality': 'SD', 'height': 240}, '315': {'quality': '4K', 'height': 2160}, '219': {'quality': 'SD', 'height': 480},
'133': {'quality': 'SD', 'height': 240}, '271': {'quality': '1440p', 'height': 1440}, '272': {'quality': '4K', 'height': 2160},
'137': {'quality': '1080p', 'height': 1080}, '136': {'quality': 'HD', 'height': 720}, '135': {'quality': 'SD', 'height': 480},
'134': {'quality': 'SD', 'height': 360}, '82': {'quality': 'SD', 'height': 360}, '83': {'quality': 'SD', 'height': 480},
'218': {'quality': 'SD', 'height': 480}, '93': {'quality': 'SD', 'height': 360}, '84': {'quality': 'HD', 'height': 720},
'170': {'quality': '1080p', 'height': 1080}, '167': {'quality': 'SD', 'height': 360}, '22': {'quality': 'HD', 'height': 720},
'46': {'quality': '1080p', 'height': 1080}, '160': {'quality': 'SD', 'height': 144}, '44': {'quality': 'SD', 'height': 480},
'45': {'quality': 'HD', 'height': 720}, '43': {'quality': 'SD', 'height': 360}, '94': {'quality': 'SD', 'height': 480},
'5': {'quality': 'SD', 'height': 240}, '6': {'quality': 'SD', 'height': 270}, '92': {'quality': 'SD', 'height': 240},
'85': {'quality': '1080p', 'height': 1080}, '308': {'quality': '1440p', 'height': 1440}, '278': {'quality': 'SD', 'height': 144},
'78': {'quality': 'SD', 'height': 480}, '302': {'quality': 'HD', 'height': 720}, '303': {'quality': '1080p', 'height': 1080},
'245': {'quality': 'SD', 'height': 480}, '244': {'quality': 'SD', 'height': 480}, '247': {'quality': 'HD', 'height': 720},
'246': {'quality': 'SD', 'height': 480}, '168': {'quality': 'SD', 'height': 480}, '266': {'quality': '4K', 'height': 2160},
'243': {'quality': 'SD', 'height': 360}, '264': {'quality': '1440p', 'height': 1440}, '102': {'quality': 'HD', 'height': 720},
'100': {'quality': 'SD', 'height': 360}, '101': {'quality': 'SD', 'height': 480}, '95': {'quality': 'HD', 'height': 720},
'248': {'quality': '1080p', 'height': 1080}, '96': {'quality': '1080p', 'height': 1080}, '91': {'quality': 'SD', 'height': 144},
'38': {'quality': '4K', 'height': 3072}, '59': {'quality': 'SD', 'height': 480}, '17': {'quality': 'SD', 'height': 144},
'132': {'quality': 'SD', 'height': 240}, '18': {'quality': 'SD', 'height': 360}, '37': {'quality': '1080p', 'height': 1080},
'35': {'quality': 'SD', 'height': 480}, '34': {'quality': 'SD', 'height': 360}, '298': {'quality': 'HD', 'height': 720},
'299': {'quality': '1080p', 'height': 1080}, '169': {'quality': 'HD', 'height': 720}}
if quality in itag_map:
quality = itag_map[quality]
if append_height:
return [{'quality': quality['quality'], 'height': quality['height'], 'url': url}]
else:
return [{'quality': quality['quality'], 'url': url}]
else:
return []
def googlepass(url):
try:
try:
headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1]))
except:
headers = None
url = url.split('|')[0].replace('\\', '')
url = client.request(url, headers=headers, output='geturl')
if 'requiressl=yes' in url:
url = url.replace('http://', 'https://')
else:
url = url.replace('https://', 'http://')
if headers: url += '|%s' % urllib.urlencode(headers)
return url
except:
return
def vk(url):
try:
query = urlparse.parse_qs(urlparse.urlparse(url).query)
try:
oid, video_id = query['oid'][0], query['id'][0]
except:
oid, video_id = re.findall('\/video(.*)_(.*)', url)[0]
sources_url = 'http://vk.com/al_video.php?act=show_inline&al=1&video=%s_%s' % (oid, video_id)
html = client.request(sources_url)
html = re.sub(r'[^\x00-\x7F]+', ' ', html)
sources = re.findall('(\d+)x\d+.+?(http.+?\.m3u8.+?)n', html)
if not sources:
sources = re.findall('"url(\d+)"\s*:\s*"(.+?)"', html)
sources = [(i[0], i[1].replace('\\', '')) for i in sources]
sources = dict(sources)
url = []
try:
url += [{'quality': 'HD', 'url': sources['720']}]
except:
pass
try:
url += [{'quality': 'SD', 'url': sources['540']}]
except:
pass
try:
url += [{'quality': 'SD', 'url': sources['480']}]
except:
pass
if not url == []: return url
try:
url += [{'quality': 'SD', 'url': sources['360']}]
except:
pass
if not url == []: return url
try:
url += [{'quality': 'SD', 'url': sources['240']}]
except:
pass
if not url == []: return url
except:
return
def odnoklassniki(url):
try:
media_id = re.compile('//.+?/.+?/([\w]+)').findall(url)[0]
result = client.request('http://ok.ru/dk', post={'cmd': 'videoPlayerMetadata', 'mid': media_id})
result = re.sub(r'[^\x00-\x7F]+', ' ', result)
result = json.loads(result).get('videos', [])
hd = []
for name, quali in {'ultra': '4K', 'quad': '1440p', 'full': '1080p', 'hd': 'HD'}.items():
hd += [{'quality': quali, 'url': i.get('url')} for i in result if i.get('name').lower() == name]
sd = []
for name, quali in {'sd': 'SD', 'low': 'SD', 'lowest': 'SD', 'mobile': 'SD'}.items():
sd += [{'quality': quali, 'url': i.get('url')} for i in result if i.get('name').lower() == name]
url = hd + sd[:1]
if not url == []: return url
except:
return
def cldmailru(url):
try:
v = url.split('public')[-1]
r = client.request(url)
r = re.sub(r'[^\x00-\x7F]+', ' ', r)
tok = re.findall('"tokens"\s*:\s*{\s*"download"\s*:\s*"([^"]+)', r)[0]
url = re.findall('"weblink_get"\s*:\s*\[.+?"url"\s*:\s*"([^"]+)', r)[0]
url = '%s%s?key=%s' % (url, v, tok)
return url
except:
return
def yandex(url):
try:
cookie = client.request(url, output='cookie')
r = client.request(url, cookie=cookie)
r = re.sub(r'[^\x00-\x7F]+', ' ', r)
sk = re.findall('"sk"\s*:\s*"([^"]+)', r)[0]
idstring = re.findall('"id"\s*:\s*"([^"]+)', r)[0]
idclient = binascii.b2a_hex(os.urandom(16))
post = {'idClient': idclient, 'version': '3.9.2', 'sk': sk, '_model.0': 'do-get-resource-url', 'id.0': idstring}
post = urllib.urlencode(post)
r = client.request('https://yadi.sk/models/?_m=do-get-resource-url', post=post, cookie=cookie)
r = json.loads(r)
url = r['models'][0]['data']['file']
return url
except:
return
|
gpl-2.0
|
nirmeshk/oh-mainline
|
mysite/profile/migrations/0016_asheesh_make_project_and_project_name_unique_in_projectexp.py
|
17
|
6100
|
# This file is part of OpenHatch.
# Copyright (C) 2009 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.profile.models import *
class Migration:
def forwards(self, orm):
# Changing field 'Person.time_record_was_created'
db.alter_column('profile_person', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 6, 21, 17, 33, 16, 586743)))
# Changing field 'Link_ProjectExp_Tag.time_record_was_created'
db.alter_column('profile_link_projectexp_tag', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 6, 21, 17, 33, 16, 854235)))
# Changing field 'Link_Project_Tag.time_record_was_created'
db.alter_column('profile_link_project_tag', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 6, 21, 17, 33, 16, 911541)))
# Creating unique_together for [person, project] on ProjectExp.
db.create_unique('profile_projectexp', ['person_id', 'project_id'])
def backwards(self, orm):
# Changing field 'Person.time_record_was_created'
db.alter_column('profile_person', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 6, 21, 12, 45, 32, 593402)))
# Changing field 'Link_ProjectExp_Tag.time_record_was_created'
db.alter_column('profile_link_projectexp_tag', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 6, 21, 12, 45, 32, 288061)))
# Changing field 'Link_Project_Tag.time_record_was_created'
db.alter_column('profile_link_project_tag', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 6, 21, 12, 45, 32, 536696)))
# Deleting unique_together for [person, project] on ProjectExp.
db.delete_unique('profile_projectexp', ['person_id', 'project_id'])
models = {
'profile.person': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'interested_in_working_on': ('models.CharField', [], {'default': "''", 'max_length': '1024'}),
'last_polled': ('models.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_touched': ('models.DateTimeField', [], {'null': 'True'}),
'name': ('models.CharField', [], {'max_length': '200'}),
'password_hash_md5': ('models.CharField', [], {'max_length': '200'}),
'poll_on_next_web_view': ('models.BooleanField', [], {'default': 'True'}),
'time_record_was_created': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 6, 21, 17, 33, 17, 302056)'}),
'username': ('models.CharField', [], {'max_length': '200'})
},
'profile.tag': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'tag_type': ('models.ForeignKey', ["orm['profile.TagType']"], {}),
'text': ('models.CharField', [], {'max_length': '50'})
},
'profile.link_projectexp_tag': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'project_exp': ('models.ForeignKey', ["orm['profile.ProjectExp']"], {}),
'source': ('models.CharField', [], {'max_length': '200'}),
'tag': ('models.ForeignKey', ["orm['profile.Tag']"], {}),
'time_record_was_created': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 6, 21, 17, 33, 16, 984494)'})
},
'search.project': {
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'profile.link_project_tag': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'project': ('models.ForeignKey', ["orm['search.Project']"], {}),
'source': ('models.CharField', [], {'max_length': '200'}),
'tag': ('models.ForeignKey', ["orm['profile.Tag']"], {}),
'time_record_was_created': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 6, 21, 17, 33, 17, 194256)'})
},
'profile.tagtype': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'name': ('models.CharField', [], {'max_length': '100'}),
'prefix': ('models.CharField', [], {'max_length': '20'})
},
'profile.projectexp': {
'Meta': {'unique_together': "[('person','project'),]"},
'description': ('models.TextField', [], {}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'last_touched': ('models.DateTimeField', [], {'null': 'True'}),
'man_months': ('models.PositiveIntegerField', [], {'null': 'True'}),
'person': ('models.ForeignKey', ["orm['profile.Person']"], {}),
'person_role': ('models.CharField', [], {'max_length': '200'}),
'primary_language': ('models.CharField', [], {'max_length': '200', 'null': 'True'}),
'project': ('models.ForeignKey', ["orm['search.Project']"], {}),
'source': ('models.CharField', [], {'max_length': '100', 'null': 'True'}),
'time_record_was_created': ('models.DateTimeField', [], {'null': 'True'}),
'url': ('models.URLField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['profile']
|
agpl-3.0
|
nin042/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/checkout/diff_parser_unittest.py
|
124
|
8145
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cStringIO as StringIO
import unittest2 as unittest
import diff_parser
import re
from webkitpy.common.checkout.diff_test_data import DIFF_TEST_DATA
class DiffParserTest(unittest.TestCase):
maxDiff = None
def test_diff_parser(self, parser = None):
if not parser:
parser = diff_parser.DiffParser(DIFF_TEST_DATA.splitlines())
self.assertEqual(3, len(parser.files))
self.assertTrue('WebCore/rendering/style/StyleFlexibleBoxData.h' in parser.files)
diff = parser.files['WebCore/rendering/style/StyleFlexibleBoxData.h']
self.assertEqual(7, len(diff.lines))
# The first two unchaged lines.
self.assertEqual((47, 47), diff.lines[0][0:2])
self.assertEqual('', diff.lines[0][2])
self.assertEqual((48, 48), diff.lines[1][0:2])
self.assertEqual(' unsigned align : 3; // EBoxAlignment', diff.lines[1][2])
# The deleted line
self.assertEqual((50, 0), diff.lines[3][0:2])
self.assertEqual(' unsigned orient: 1; // EBoxOrient', diff.lines[3][2])
# The first file looks OK. Let's check the next, more complicated file.
self.assertTrue('WebCore/rendering/style/StyleRareInheritedData.cpp' in parser.files)
diff = parser.files['WebCore/rendering/style/StyleRareInheritedData.cpp']
# There are 3 chunks.
self.assertEqual(7 + 7 + 9, len(diff.lines))
# Around an added line.
self.assertEqual((60, 61), diff.lines[9][0:2])
self.assertEqual((0, 62), diff.lines[10][0:2])
self.assertEqual((61, 63), diff.lines[11][0:2])
# Look through the last chunk, which contains both add's and delete's.
self.assertEqual((81, 83), diff.lines[14][0:2])
self.assertEqual((82, 84), diff.lines[15][0:2])
self.assertEqual((83, 85), diff.lines[16][0:2])
self.assertEqual((84, 0), diff.lines[17][0:2])
self.assertEqual((0, 86), diff.lines[18][0:2])
self.assertEqual((0, 87), diff.lines[19][0:2])
self.assertEqual((85, 88), diff.lines[20][0:2])
self.assertEqual((86, 89), diff.lines[21][0:2])
self.assertEqual((87, 90), diff.lines[22][0:2])
# Check if a newly added file is correctly handled.
diff = parser.files['LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum']
self.assertEqual(1, len(diff.lines))
self.assertEqual((0, 1), diff.lines[0][0:2])
def test_diff_converter(self):
comment_lines = [
"Hey guys,\n",
"\n",
"See my awesome patch below!\n",
"\n",
" - Cool Hacker\n",
"\n",
]
revision_lines = [
"Subversion Revision 289799\n",
]
svn_diff_lines = [
"Index: Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n",
"===================================================================\n",
"--- Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n",
"+++ Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n",
"@@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line):\n",
]
self.assertEqual(diff_parser.get_diff_converter(svn_diff_lines), diff_parser.svn_diff_to_svn_diff)
self.assertEqual(diff_parser.get_diff_converter(comment_lines + svn_diff_lines), diff_parser.svn_diff_to_svn_diff)
self.assertEqual(diff_parser.get_diff_converter(revision_lines + svn_diff_lines), diff_parser.svn_diff_to_svn_diff)
git_diff_lines = [
"diff --git a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n",
"index 3c5b45b..0197ead 100644\n",
"--- a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n",
"+++ b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n",
"@@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line):\n",
]
self.assertEqual(diff_parser.get_diff_converter(git_diff_lines), diff_parser.git_diff_to_svn_diff)
self.assertEqual(diff_parser.get_diff_converter(comment_lines + git_diff_lines), diff_parser.git_diff_to_svn_diff)
self.assertEqual(diff_parser.get_diff_converter(revision_lines + git_diff_lines), diff_parser.git_diff_to_svn_diff)
def test_git_mnemonicprefix(self):
p = re.compile(r' ([a|b])/')
prefixes = [
{ 'a' : 'i', 'b' : 'w' }, # git-diff (compares the (i)ndex and the (w)ork tree)
{ 'a' : 'c', 'b' : 'w' }, # git-diff HEAD (compares a (c)ommit and the (w)ork tree)
{ 'a' : 'c', 'b' : 'i' }, # git diff --cached (compares a (c)ommit and the (i)ndex)
{ 'a' : 'o', 'b' : 'w' }, # git-diff HEAD:file1 file2 (compares an (o)bject and a (w)ork tree entity)
{ 'a' : '1', 'b' : '2' }, # git diff --no-index a b (compares two non-git things (1) and (2))
]
for prefix in prefixes:
patch = p.sub(lambda x: " %s/" % prefix[x.group(1)], DIFF_TEST_DATA)
self.test_diff_parser(diff_parser.DiffParser(patch.splitlines()))
def test_git_diff_to_svn_diff(self):
output = """\
Index: Tools/Scripts/webkitpy/common/checkout/diff_parser.py
===================================================================
--- Tools/Scripts/webkitpy/common/checkout/diff_parser.py
+++ Tools/Scripts/webkitpy/common/checkout/diff_parser.py
@@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line):
A
B
C
+D
E
F
"""
inputfmt = StringIO.StringIO("""\
diff --git a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
index 2ed552c4555db72df16b212547f2c125ae301a04..72870482000c0dba64ce4300ed782c03ee79b74f 100644
--- a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
+++ b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
@@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line):
A
B
C
+D
E
F
""")
shortfmt = StringIO.StringIO("""\
diff --git a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
index b48b162..f300960 100644
--- a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
+++ b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
@@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line):
A
B
C
+D
E
F
""")
self.assertMultiLineEqual(output, ''.join(diff_parser.git_diff_to_svn_diff(x) for x in shortfmt.readlines()))
self.assertMultiLineEqual(output, ''.join(diff_parser.git_diff_to_svn_diff(x) for x in inputfmt.readlines()))
|
bsd-3-clause
|
mitsuhiko/sentry
|
src/sentry/south_migrations/0247_migrate_file_blobs.py
|
7
|
50599
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import IntegrityError, models, transaction
class Migration(DataMigration):
def forwards(self, orm):
from sentry.utils.query import RangeQuerySetWrapper
FileBlobIndex = orm['sentry.FileBlobIndex']
queryset = orm['sentry.File'].objects.select_related('blob')
for file in RangeQuerySetWrapper(queryset):
if not file.blob_id:
continue
try:
with transaction.atomic():
FileBlobIndex.objects.create(
file=file,
blob=file.blob,
offset=0,
)
except IntegrityError:
# already exists
pass
def backwards(self, orm):
pass
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2016, 4, 29, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dsymbundle': {
'Meta': {'object_name': 'DSymBundle'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'sdk': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymSDK']"})
},
'sentry.dsymobject': {
'Meta': {'object_name': 'DSymObject'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_path': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'db_index': 'True'}),
'vmaddr': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'vmsize': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'})
},
'sentry.dsymsdk': {
'Meta': {'object_name': 'DSymSDK', 'index_together': "[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"},
'dsym_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'sdk_name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'version_build': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {'unique_together': "[('object', 'address')]", 'object_name': 'DSymSymbol'},
'address': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project', 'ident'), ('project', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {'object_name': 'GlobalDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.helppage': {
'Meta': {'object_name': 'HelpPage'},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True'}),
'priority': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'counter': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdsymfile': {
'Meta': {'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
}
}
complete_apps = ['sentry']
symmetrical = True
|
bsd-3-clause
|
juhnowski/FishingRod
|
production/pygsl-0.9.5/examples/bspline.py
|
1
|
1107
|
from pygsl import bspline, multifit, rng
import pygsl._numobj as numx
N = 200
ncoeffs = 8
nbreak = ncoeffs - 2
bspline = bspline.bspline
def run():
r = rng.rng()
bw = bspline(4, nbreak)
# Data to be fitted
x = 15. / (N-1) * numx.arange(N)
y = numx.cos(x) * numx.exp(0.1 * x)
sigma = .1
w = 1.0 / sigma**2 * numx.ones(N)
dy = r.gaussian(sigma, N)
y = y + dy
# use uniform breakpoints on [0, 15]
bw.knots_uniform(0.0, 15.0)
X = numx.zeros((N, ncoeffs))
for i in range(N):
B = bw.eval(x[i])
X[i,:] = B
# do the fit
c, cov, chisq = multifit.wlinear(X, w, y, multifit.linear_workspace(N, ncoeffs))
# output the smoothed curve
res_y = []
res_y_err = []
for i in range(N):
B = bw.eval(x[i])
yi, yi_err = multifit.linear_est(B, c, cov)
res_y.append(yi)
res_y_err.append(yi_err)
#print yi, yerr
res_y = numx.array(res_y)
res_y_err = numx.array(res_y_err)
return (x, y,), (x, res_y), res_y_err
if __name__ == '__main__':
run()
|
mit
|
ytjiang/django
|
django/contrib/auth/management/__init__.py
|
91
|
6477
|
"""
Creates permissions for all installed apps that need permissions.
"""
from __future__ import unicode_literals
import getpass
import unicodedata
from django.apps import apps
from django.contrib.auth import get_permission_codename
from django.core import exceptions
from django.core.management.base import CommandError
from django.db import DEFAULT_DB_ALIAS, router
from django.utils.encoding import DEFAULT_LOCALE_ENCODING
from django.utils import six
def _get_all_permissions(opts, ctype):
"""
Returns (codename, name) for all permissions in the given opts.
"""
builtin = _get_builtin_permissions(opts)
custom = list(opts.permissions)
_check_permission_clashing(custom, builtin, ctype)
return builtin + custom
def _get_builtin_permissions(opts):
"""
Returns (codename, name) for all autogenerated permissions.
By default, this is ('add', 'change', 'delete')
"""
perms = []
for action in opts.default_permissions:
perms.append((get_permission_codename(action, opts),
'Can %s %s' % (action, opts.verbose_name_raw)))
return perms
def _check_permission_clashing(custom, builtin, ctype):
"""
Check that permissions for a model do not clash. Raises CommandError if
there are duplicate permissions.
"""
pool = set()
builtin_codenames = set(p[0] for p in builtin)
for codename, _name in custom:
if codename in pool:
raise CommandError(
"The permission codename '%s' is duplicated for model '%s.%s'." %
(codename, ctype.app_label, ctype.model_class().__name__))
elif codename in builtin_codenames:
raise CommandError(
"The permission codename '%s' clashes with a builtin permission "
"for model '%s.%s'." %
(codename, ctype.app_label, ctype.model_class().__name__))
pool.add(codename)
def create_permissions(app_config, verbosity=2, interactive=True, using=DEFAULT_DB_ALIAS, **kwargs):
if not app_config.models_module:
return
try:
Permission = apps.get_model('auth', 'Permission')
except LookupError:
return
if not router.allow_migrate_model(using, Permission):
return
from django.contrib.contenttypes.models import ContentType
# This will hold the permissions we're looking for as
# (content_type, (codename, name))
searched_perms = list()
# The codenames and ctypes that should exist.
ctypes = set()
for klass in app_config.get_models():
# Force looking up the content types in the current database
# before creating foreign keys to them.
ctype = ContentType.objects.db_manager(using).get_for_model(klass)
ctypes.add(ctype)
for perm in _get_all_permissions(klass._meta, ctype):
searched_perms.append((ctype, perm))
# Find all the Permissions that have a content_type for a model we're
# looking for. We don't need to check for codenames since we already have
# a list of the ones we're going to create.
all_perms = set(Permission.objects.using(using).filter(
content_type__in=ctypes,
).values_list(
"content_type", "codename"
))
perms = [
Permission(codename=codename, name=name, content_type=ct)
for ct, (codename, name) in searched_perms
if (ct.pk, codename) not in all_perms
]
# Validate the permissions before bulk_creation to avoid cryptic
# database error when the verbose_name is longer than 50 characters
permission_name_max_length = Permission._meta.get_field('name').max_length
verbose_name_max_length = permission_name_max_length - 11 # len('Can change ') prefix
for perm in perms:
if len(perm.name) > permission_name_max_length:
raise exceptions.ValidationError(
"The verbose_name of %s.%s is longer than %s characters" % (
perm.content_type.app_label,
perm.content_type.model,
verbose_name_max_length,
)
)
Permission.objects.using(using).bulk_create(perms)
if verbosity >= 2:
for perm in perms:
print("Adding permission '%s'" % perm)
def get_system_username():
"""
Try to determine the current system user's username.
:returns: The username as a unicode string, or an empty string if the
username could not be determined.
"""
try:
result = getpass.getuser()
except (ImportError, KeyError):
# KeyError will be raised by os.getpwuid() (called by getuser())
# if there is no corresponding entry in the /etc/passwd file
# (a very restricted chroot environment, for example).
return ''
if six.PY2:
try:
result = result.decode(DEFAULT_LOCALE_ENCODING)
except UnicodeDecodeError:
# UnicodeDecodeError - preventive treatment for non-latin Windows.
return ''
return result
def get_default_username(check_db=True):
"""
Try to determine the current system user's username to use as a default.
:param check_db: If ``True``, requires that the username does not match an
existing ``auth.User`` (otherwise returns an empty string).
:returns: The username, or an empty string if no username can be
determined.
"""
# This file is used in apps.py, it should not trigger models import.
from django.contrib.auth import models as auth_app
# If the User model has been swapped out, we can't make any assumptions
# about the default user name.
if auth_app.User._meta.swapped:
return ''
default_username = get_system_username()
try:
default_username = (unicodedata.normalize('NFKD', default_username)
.encode('ascii', 'ignore').decode('ascii')
.replace(' ', '').lower())
except UnicodeDecodeError:
return ''
# Run the username validator
try:
auth_app.User._meta.get_field('username').run_validators(default_username)
except exceptions.ValidationError:
return ''
# Don't return the default username if it is already taken.
if check_db and default_username:
try:
auth_app.User._default_manager.get(username=default_username)
except auth_app.User.DoesNotExist:
pass
else:
return ''
return default_username
|
bsd-3-clause
|
shakamunyi/beam
|
sdks/python/apache_beam/metrics/metric_test.py
|
6
|
5205
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from apache_beam.metrics.cells import DistributionData
from apache_beam.metrics.execution import MetricKey
from apache_beam.metrics.execution import MetricsContainer
from apache_beam.metrics.execution import MetricsEnvironment
from apache_beam.metrics.metric import MetricResults
from apache_beam.metrics.metric import Metrics
from apache_beam.metrics.metric import MetricsFilter
from apache_beam.metrics.metricbase import MetricName
class NameTest(unittest.TestCase):
def test_basic_metric_name(self):
name = MetricName('namespace1', 'name1')
self.assertEqual(name.namespace, 'namespace1')
self.assertEqual(name.name, 'name1')
self.assertEqual(name, MetricName('namespace1', 'name1'))
key = MetricKey('step1', name)
self.assertEqual(key.step, 'step1')
self.assertEqual(key.metric.namespace, 'namespace1')
self.assertEqual(key.metric.name, 'name1')
self.assertEqual(key, MetricKey('step1', MetricName('namespace1', 'name1')))
class MetricResultsTest(unittest.TestCase):
def test_metric_filter_namespace_matching(self):
filter = MetricsFilter().with_namespace('ns1')
name = MetricName('ns1', 'name1')
key = MetricKey('step1', name)
self.assertTrue(MetricResults.matches(filter, key))
def test_metric_filter_name_matching(self):
filter = MetricsFilter().with_name('name1').with_namespace('ns1')
name = MetricName('ns1', 'name1')
key = MetricKey('step1', name)
self.assertTrue(MetricResults.matches(filter, key))
filter = MetricsFilter().with_name('name1')
name = MetricName('ns1', 'name1')
key = MetricKey('step1', name)
self.assertTrue(MetricResults.matches(filter, key))
def test_metric_filter_step_matching(self):
filter = MetricsFilter().with_step('Top1/Outer1/Inner1')
name = MetricName('ns1', 'name1')
key = MetricKey('Top1/Outer1/Inner1', name)
self.assertTrue(MetricResults.matches(filter, key))
filter = MetricsFilter().with_step('step1')
name = MetricName('ns1', 'name1')
key = MetricKey('step1', name)
self.assertTrue(MetricResults.matches(filter, key))
filter = MetricsFilter().with_step('Top1/Outer1')
name = MetricName('ns1', 'name1')
key = MetricKey('Top1/Outer1/Inner1', name)
self.assertTrue(MetricResults.matches(filter, key))
filter = MetricsFilter().with_step('Top1/Inner1')
name = MetricName('ns1', 'name1')
key = MetricKey('Top1/Outer1/Inner1', name)
self.assertFalse(MetricResults.matches(filter, key))
class MetricsTest(unittest.TestCase):
def test_get_namespace_class(self):
class MyClass(object):
pass
self.assertEqual('{}.{}'.format(MyClass.__module__, MyClass.__name__),
Metrics.get_namespace(MyClass))
def test_get_namespace_string(self):
namespace = 'MyNamespace'
self.assertEqual(namespace, Metrics.get_namespace(namespace))
def test_get_namespace_error(self):
with self.assertRaises(ValueError):
Metrics.get_namespace(object())
def test_counter_empty_name(self):
with self.assertRaises(ValueError):
Metrics.counter("namespace", "")
def test_counter_empty_namespace(self):
with self.assertRaises(ValueError):
Metrics.counter("", "names")
def test_distribution_empty_name(self):
with self.assertRaises(ValueError):
Metrics.distribution("namespace", "")
def test_distribution_empty_namespace(self):
with self.assertRaises(ValueError):
Metrics.distribution("", "names")
def test_create_counter_distribution(self):
MetricsEnvironment.set_current_container(MetricsContainer('mystep'))
counter_ns = 'aCounterNamespace'
distro_ns = 'aDistributionNamespace'
name = 'a_name'
counter = Metrics.counter(counter_ns, name)
distro = Metrics.distribution(distro_ns, name)
counter.inc(10)
counter.dec(3)
distro.update(10)
distro.update(2)
self.assertTrue(isinstance(counter, Metrics.DelegatingCounter))
self.assertTrue(isinstance(distro, Metrics.DelegatingDistribution))
del distro
del counter
container = MetricsEnvironment.current_container()
self.assertEqual(
container.counters[MetricName(counter_ns, name)].get_cumulative(),
7)
self.assertEqual(
container.distributions[MetricName(distro_ns, name)].get_cumulative(),
DistributionData(12, 2, 2, 10))
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
grapesmoker/regulations-parser
|
regparser/tree/depth/rules.py
|
7
|
6355
|
"""Namespace for constraints on paragraph depth discovery"""
from regparser.tree.depth import markers
def must_be(value):
"""A constraint that the given variable must matches the value."""
def inner(var):
return var == value
return inner
def type_match(marker):
"""The type of the associated variable must match its marker. Lambda
explanation as in the above rule."""
return lambda typ, idx, m=marker: idx < len(typ) and typ[idx] == m
def same_type(typ, idx, depth, *all_prev):
"""Constraints on sequential markers with the same marker type"""
# Group (type, idx, depth) per marker
all_prev = [tuple(all_prev[i:i+3]) for i in range(0, len(all_prev), 3)]
if all_prev:
prev_typ, prev_idx, prev_depth = all_prev[-1]
# Rule isn't relevant because it's the first marker ...
if not all_prev:
return True
# ... or the previous marker's type doesn't match (see diff_type)
elif typ != prev_typ:
return True
# Stars can't be on the same level in sequence. Can only start a new
# level if the preceding wasn't inline
elif typ == markers.stars:
return depth < prev_depth or (prev_idx == 1
and depth == prev_depth + 1)
# If this marker matches *any* previous marker, we may be continuing
# it's sequence
else:
for prev_type, prev_idx, prev_depth in _ancestors(all_prev):
if (prev_type == typ and prev_depth == depth
and idx == prev_idx + 1):
return True
return False
def diff_type(typ, idx, depth, *all_prev):
"""Constraints on sequential markers with differing types"""
all_prev = [tuple(all_prev[i:i+3]) for i in range(0, len(all_prev), 3)]
# Rule isn't relevant because it's the first marker ...
if not all_prev:
return True
# ... or the previous marker's type matches (see same_type)
elif typ == all_prev[-1][0]:
return True
# Starting a new type
elif idx == 0 and depth == all_prev[-1][2] + 1:
return True
# Stars can't skip levels forward (e.g. _ *, _ _ _ *)
elif typ == markers.stars:
return all_prev[-1][2] - depth >= -1
# If following stars and on the same level, we're good
elif all_prev[-1][0] == markers.stars and depth == all_prev[-1][2]:
return True # Stars
# If this marker matches *any* previous marker, we may be continuing
# it's sequence
else:
for prev_type, prev_idx, prev_depth in _ancestors(all_prev):
if (prev_type == typ and prev_depth == depth
and idx == prev_idx + 1):
return True
return False
def same_depth_same_type(*all_vars):
"""All markers in the same level (with the same parent) should have the
same marker type"""
elements = [tuple(all_vars[i:i+3]) for i in range(0, len(all_vars), 3)]
def per_level(elements, last_type=None):
level, grouped_children = _level_and_children(elements)
if not level:
return True # Base Case
types = set(el[0] for el in level)
types = list(sorted(types, key=lambda t: t == markers.stars))
if len(types) > 2:
return False
if len(types) == 2 and markers.stars not in types:
return False
if last_type in types and last_type != markers.stars:
return False
for children in grouped_children: # Recurse
if not per_level(children, types[0]):
return False
return True
return per_level(elements)
def stars_occupy_space(*all_vars):
"""Star markers can't be ignored in sequence, so 1, *, 2 doesn't make
sense for a single level, unless it's an inline star. In the inline
case, we can think of it as 1, intro-text-to-1, 2"""
elements = [tuple(all_vars[i:i+3]) for i in range(0, len(all_vars), 3)]
def per_level(elements):
level, grouped_children = _level_and_children(elements)
if not level:
return True # Base Case
last_idx = -1
for typ, idx, _ in level:
if typ == markers.stars:
if idx == 0: # STARS_TAG, not INLINE_STARS
last_idx += 1
elif last_idx >= idx:
return False
else:
last_idx = idx
for children in grouped_children: # Recurse
if not per_level(children):
return False
return True
return per_level(elements)
def depth_type_order(order):
"""Create a function which constrains paragraphs depths to a particular
type sequence. For example, we know a priori what regtext and
interpretation markers' order should be. Adding this constrain speeds up
solution finding."""
order = list(order) # defensive copy
def inner(constrain, all_variables):
for i in range(0, len(all_variables) / 3):
constrain(lambda t, d: (d < len(order)
and (t in (markers.stars, order[d])
or t in order[d])),
('type' + str(i), 'depth' + str(i)))
return inner
def _ancestors(all_prev):
"""Given an assignment of values, construct a list of the relevant
parents, e.g. 1, i, a, ii, A gives us 1, ii, A"""
result = [None]*10
for prev_type, prev_idx, prev_depth in all_prev:
result[prev_depth] = (prev_type, prev_idx, prev_depth)
result[prev_depth + 1:] = [None]*(10 - prev_depth)
result = filter(bool, result)
return result
def _level_and_children(elements):
"""Split a list of elements into elements on the current level (i.e.
that share the same depth as the first element) and segmented children
(children of each of those elements)"""
if not elements: # Base Case
return [], []
depth = elements[0][2]
level = []
grouped_children = []
children = []
for el in elements:
if el[2] == depth:
level.append(el)
if children:
grouped_children.append(children)
children = []
else:
children.append(el)
if children:
grouped_children.append(children)
return level, grouped_children
|
cc0-1.0
|
jankeromnes/depot_tools
|
testing_support/super_mox.py
|
3
|
5349
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Simplify unit tests based on pymox."""
import os
import random
import shutil
import string
import StringIO
import subprocess
import sys
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from third_party.pymox import mox
class IsOneOf(mox.Comparator):
def __init__(self, keys):
self._keys = keys
def equals(self, rhs):
return rhs in self._keys
def __repr__(self):
return '<sequence or map containing \'%s\'>' % str(self._keys)
class TestCaseUtils(object):
"""Base class with some additional functionalities. People will usually want
to use SuperMoxTestBase instead."""
# Backup the separator in case it gets mocked
_OS_SEP = os.sep
_RANDOM_CHOICE = random.choice
_RANDOM_RANDINT = random.randint
_STRING_LETTERS = string.letters
## Some utilities for generating arbitrary arguments.
def String(self, max_length):
return ''.join([self._RANDOM_CHOICE(self._STRING_LETTERS)
for _ in xrange(self._RANDOM_RANDINT(1, max_length))])
def Strings(self, max_arg_count, max_arg_length):
return [self.String(max_arg_length) for _ in xrange(max_arg_count)]
def Args(self, max_arg_count=8, max_arg_length=16):
return self.Strings(max_arg_count,
self._RANDOM_RANDINT(1, max_arg_length))
def _DirElts(self, max_elt_count=4, max_elt_length=8):
return self._OS_SEP.join(self.Strings(max_elt_count, max_elt_length))
def Dir(self, max_elt_count=4, max_elt_length=8):
return (self._RANDOM_CHOICE((self._OS_SEP, '')) +
self._DirElts(max_elt_count, max_elt_length))
def SvnUrl(self, max_elt_count=4, max_elt_length=8):
return ('svn://random_host:port/a' +
self._DirElts(max_elt_count, max_elt_length
).replace(self._OS_SEP, '/'))
def RootDir(self, max_elt_count=4, max_elt_length=8):
return self._OS_SEP + self._DirElts(max_elt_count, max_elt_length)
def compareMembers(self, obj, members):
"""If you add a member, be sure to add the relevant test!"""
# Skip over members starting with '_' since they are usually not meant to
# be for public use.
actual_members = [x for x in sorted(dir(obj))
if not x.startswith('_')]
expected_members = sorted(members)
if actual_members != expected_members:
diff = ([i for i in actual_members if i not in expected_members] +
[i for i in expected_members if i not in actual_members])
print >> sys.stderr, diff
# pylint: disable=E1101
self.assertEqual(actual_members, expected_members)
def setUp(self):
self.root_dir = self.Dir()
self.args = self.Args()
self.relpath = self.String(200)
def tearDown(self):
pass
class StdoutCheck(object):
def setUp(self):
# Override the mock with a StringIO, it's much less painful to test.
self._old_stdout = sys.stdout
stdout = StringIO.StringIO()
stdout.flush = lambda: None
sys.stdout = stdout
def tearDown(self):
try:
# If sys.stdout was used, self.checkstdout() must be called.
# pylint: disable=E1101
if not sys.stdout.closed:
self.assertEquals('', sys.stdout.getvalue())
except AttributeError:
pass
sys.stdout = self._old_stdout
def checkstdout(self, expected):
value = sys.stdout.getvalue()
sys.stdout.close()
# pylint: disable=E1101
self.assertEquals(expected, value)
class SuperMoxTestBase(TestCaseUtils, StdoutCheck, mox.MoxTestBase):
def setUp(self):
"""Patch a few functions with know side-effects."""
TestCaseUtils.setUp(self)
mox.MoxTestBase.setUp(self)
os_to_mock = ('chdir', 'chown', 'close', 'closerange', 'dup', 'dup2',
'fchdir', 'fchmod', 'fchown', 'fdopen', 'getcwd', 'getpid', 'lseek',
'makedirs', 'mkdir', 'open', 'popen', 'popen2', 'popen3', 'popen4',
'read', 'remove', 'removedirs', 'rename', 'renames', 'rmdir', 'symlink',
'system', 'tmpfile', 'walk', 'write')
self.MockList(os, os_to_mock)
os_path_to_mock = ('abspath', 'exists', 'getsize', 'isdir', 'isfile',
'islink', 'ismount', 'lexists', 'realpath', 'samefile', 'walk')
self.MockList(os.path, os_path_to_mock)
self.MockList(shutil, ('rmtree'))
self.MockList(subprocess, ('call', 'Popen'))
# Don't mock stderr since it confuses unittests.
self.MockList(sys, ('stdin'))
StdoutCheck.setUp(self)
def tearDown(self):
StdoutCheck.tearDown(self)
TestCaseUtils.tearDown(self)
mox.MoxTestBase.tearDown(self)
def MockList(self, parent, items_to_mock):
for item in items_to_mock:
# Skip over items not present because of OS-specific implementation,
# implemented only in later python version, etc.
if hasattr(parent, item):
try:
self.mox.StubOutWithMock(parent, item)
except TypeError, e:
raise TypeError(
'Couldn\'t mock %s in %s: %s' % (item, parent.__name__, e))
def UnMock(self, obj, name):
"""Restore an object inside a test."""
for (parent, old_child, child_name) in self.mox.stubs.cache:
if parent == obj and child_name == name:
setattr(parent, child_name, old_child)
break
|
bsd-3-clause
|
kaplun/ops
|
modules/websubmit/lib/functions/Retrieve_Data.py
|
35
|
1426
|
## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
## Description: function Get_Field
## This function returns the value of the specified field
## from the specified document
## Author: T.Baron
##
## PARAMETERS: fieldname: marc21 code
## bibrec: system number of the bibliographic record
import string
from invenio.search_engine import print_record
def Get_Field(fieldname,bibrec):
"""
This function returns the value of the specified field
from the specified document
"""
value = string.strip(print_record(int(bibrec),'tm',[fieldname]))
return value
|
gpl-2.0
|
caphrim007/ansible
|
lib/ansible/plugins/inventory/script.py
|
6
|
8443
|
# Copyright (c) 2012-2014, Michael DeHaan <[email protected]>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
inventory: script
version_added: "2.4"
short_description: Executes an inventory script that returns JSON
options:
cache:
description: Toggle the usage of the configured Cache plugin.
default: False
type: boolean
ini:
- section: inventory_plugin_script
key: cache
env:
- name: ANSIBLE_INVENTORY_PLUGIN_SCRIPT_CACHE
always_show_stderr:
description: Toggle display of stderr even when script was successful
version_added: "2.5.1"
default: True
type: boolean
ini:
- section: inventory_plugin_script
key: always_show_stderr
env:
- name: ANSIBLE_INVENTORY_PLUGIN_SCRIPT_STDERR
description:
- The source provided must be an executable that returns Ansible inventory JSON
- The source must accept C(--list) and C(--host <hostname>) as arguments.
C(--host) will only be used if no C(_meta) key is present.
This is a performance optimization as the script would be called per host otherwise.
notes:
- It takes the place of the previously hardcoded script inventory.
- In order to function, it requires being whitelisted in configuration, which is true by default.
'''
import os
import subprocess
from collections import Mapping
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils.basic import json_dict_bytes_to_unicode
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native, to_text
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable
class InventoryModule(BaseInventoryPlugin, Cacheable):
''' Host inventory parser for ansible using external inventory scripts. '''
NAME = 'script'
def __init__(self):
super(InventoryModule, self).__init__()
self._hosts = set()
def verify_file(self, path):
''' Verify if file is usable by this plugin, base does minimal accessibility check '''
valid = super(InventoryModule, self).verify_file(path)
if valid:
# not only accessible, file must be executable and/or have shebang
shebang_present = False
try:
with open(path, 'rb') as inv_file:
initial_chars = inv_file.read(2)
if initial_chars.startswith(b'#!'):
shebang_present = True
except Exception:
pass
if not os.access(path, os.X_OK) and not shebang_present:
valid = False
return valid
def parse(self, inventory, loader, path, cache=None):
super(InventoryModule, self).parse(inventory, loader, path)
self.set_options()
if cache is None:
cache = self.get_option('cache')
# Support inventory scripts that are not prefixed with some
# path information but happen to be in the current working
# directory when '.' is not in PATH.
cmd = [path, "--list"]
try:
cache_key = self._get_cache_prefix(path)
if not cache or cache_key not in self._cache:
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
raise AnsibleParserError("problem running %s (%s)" % (' '.join(cmd), to_native(e)))
(stdout, stderr) = sp.communicate()
path = to_native(path)
err = to_native(stderr or "")
if err and not err.endswith('\n'):
err += '\n'
if sp.returncode != 0:
raise AnsibleError("Inventory script (%s) had an execution error: %s " % (path, err))
# make sure script output is unicode so that json loader will output unicode strings itself
try:
data = to_text(stdout, errors="strict")
except Exception as e:
raise AnsibleError("Inventory {0} contained characters that cannot be interpreted as UTF-8: {1}".format(path, to_native(e)))
try:
self._cache[cache_key] = self.loader.load(data)
except Exception as e:
raise AnsibleError("failed to parse executable inventory script results from {0}: {1}\n{2}".format(path, to_native(e), err))
# if no other errors happened and you want to force displaying stderr, do so now
if stderr and self.get_option('always_show_stderr'):
self.display.error(msg=to_text(err))
processed = self._cache[cache_key]
if not isinstance(processed, Mapping):
raise AnsibleError("failed to parse executable inventory script results from {0}: needs to be a json dict\n{1}".format(path, err))
group = None
data_from_meta = None
# A "_meta" subelement may contain a variable "hostvars" which contains a hash for each host
# if this "hostvars" exists at all then do not call --host for each # host.
# This is for efficiency and scripts should still return data
# if called with --host for backwards compat with 1.2 and earlier.
for (group, gdata) in processed.items():
if group == '_meta':
if 'hostvars' in gdata:
data_from_meta = gdata['hostvars']
else:
self._parse_group(group, gdata)
for host in self._hosts:
got = {}
if data_from_meta is None:
got = self.get_host_variables(path, host)
else:
try:
got = data_from_meta.get(host, {})
except AttributeError as e:
raise AnsibleError("Improperly formatted host information for %s: %s" % (host, to_native(e)))
self._populate_host_vars([host], got)
except Exception as e:
raise AnsibleParserError(to_native(e))
def _parse_group(self, group, data):
self.inventory.add_group(group)
if not isinstance(data, dict):
data = {'hosts': data}
# is not those subkeys, then simplified syntax, host with vars
elif not any(k in data for k in ('hosts', 'vars', 'children')):
data = {'hosts': [group], 'vars': data}
if 'hosts' in data:
if not isinstance(data['hosts'], list):
raise AnsibleError("You defined a group '%s' with bad data for the host list:\n %s" % (group, data))
for hostname in data['hosts']:
self._hosts.add(hostname)
self.inventory.add_host(hostname, group)
if 'vars' in data:
if not isinstance(data['vars'], dict):
raise AnsibleError("You defined a group '%s' with bad data for variables:\n %s" % (group, data))
for k, v in iteritems(data['vars']):
self.inventory.set_variable(group, k, v)
if group != '_meta' and isinstance(data, dict) and 'children' in data:
for child_name in data['children']:
self.inventory.add_group(child_name)
self.inventory.add_child(group, child_name)
def get_host_variables(self, path, host):
""" Runs <script> --host <hostname>, to determine additional host variables """
cmd = [path, "--host", host]
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(out, err) = sp.communicate()
if out.strip() == '':
return {}
try:
return json_dict_bytes_to_unicode(self.loader.load(out, file_name=path))
except ValueError:
raise AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
|
gpl-3.0
|
raymondnoonan/Mpropulator
|
MPropulator/readConfig.py
|
1
|
1536
|
import pandas as pd
import os
from MPropulator import validations as vd
def readConfig(config):
'''
Reads in the config file as a dataframe
and validates the inputs and outputs of
this file.
args: config is the path to the config file csv
output: pandas dataframe that is a parsed and prepped
config file.
'''
vd.validateConfigPath(config)
parseConfig = pd.read_csv(config)
#parseConfig.fillna("", inplace=True)
# users enter rows to skip as 1,2,3,4 and cols to skip as A,B,C
# we need to parse these into lists
split = str.split
def splitFunc(val):
arr = split(str(val).strip(" "),",")
arr = [x for x in arr if x != 'nan']
return arr
#splitFunc = lambda x: split(str(x).strip(" "), ",")
def assertList(x):
assert(isinstance(x,list))
parseConfig['skiprows'] = map(splitFunc, parseConfig['skiprows'])
parseConfig['skipcols'] = map(splitFunc, parseConfig['skipcols'])
map(assertList, parseConfig['skiprows'])
map(assertList, parseConfig['skipcols'])
# in addition, for the skiprows, we want these as ints, not strings
def makeInt(array):
intArray = [int(x) for x in array]
return intArray
try:
parseConfig['skiprows'] = map(makeInt, parseConfig['skiprows'])
except:
raise ValueError("Cannot convert some values in skiprows to ints")
parseConfig['ignore'].fillna(False, inplace=True)
vd.validateConfigRead(parseConfig)
return parseConfig
|
mit
|
PythonNut/servo
|
tests/wpt/web-platform-tests/tools/pywebsocket/src/example/echo_client.py
|
442
|
44484
|
#!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Simple WebSocket client named echo_client just because of historical reason.
mod_pywebsocket directory must be in PYTHONPATH.
Example Usage:
# server setup
% cd $pywebsocket
% PYTHONPATH=$cwd/src python ./mod_pywebsocket/standalone.py -p 8880 \
-d $cwd/src/example
# run client
% PYTHONPATH=$cwd/src python ./src/example/echo_client.py -p 8880 \
-s localhost \
-o http://localhost -r /echo -m test
or
# run echo client to test IETF HyBi 00 protocol
run with --protocol-version=hybi00
"""
import base64
import codecs
import logging
from optparse import OptionParser
import os
import random
import re
import socket
import struct
import sys
from mod_pywebsocket import common
from mod_pywebsocket.extensions import DeflateFrameExtensionProcessor
from mod_pywebsocket.extensions import PerMessageDeflateExtensionProcessor
from mod_pywebsocket.extensions import _PerMessageDeflateFramer
from mod_pywebsocket.extensions import _parse_window_bits
from mod_pywebsocket.stream import Stream
from mod_pywebsocket.stream import StreamHixie75
from mod_pywebsocket.stream import StreamOptions
from mod_pywebsocket import util
_TIMEOUT_SEC = 10
_UNDEFINED_PORT = -1
_UPGRADE_HEADER = 'Upgrade: websocket\r\n'
_UPGRADE_HEADER_HIXIE75 = 'Upgrade: WebSocket\r\n'
_CONNECTION_HEADER = 'Connection: Upgrade\r\n'
# Special message that tells the echo server to start closing handshake
_GOODBYE_MESSAGE = 'Goodbye'
_PROTOCOL_VERSION_HYBI13 = 'hybi13'
_PROTOCOL_VERSION_HYBI08 = 'hybi08'
_PROTOCOL_VERSION_HYBI00 = 'hybi00'
_PROTOCOL_VERSION_HIXIE75 = 'hixie75'
# Constants for the --tls_module flag.
_TLS_BY_STANDARD_MODULE = 'ssl'
_TLS_BY_PYOPENSSL = 'pyopenssl'
# Values used by the --tls-version flag.
_TLS_VERSION_SSL23 = 'ssl23'
_TLS_VERSION_SSL3 = 'ssl3'
_TLS_VERSION_TLS1 = 'tls1'
class ClientHandshakeError(Exception):
pass
def _build_method_line(resource):
return 'GET %s HTTP/1.1\r\n' % resource
def _origin_header(header, origin):
# 4.1 13. concatenation of the string "Origin:", a U+0020 SPACE character,
# and the /origin/ value, converted to ASCII lowercase, to /fields/.
return '%s: %s\r\n' % (header, origin.lower())
def _format_host_header(host, port, secure):
# 4.1 9. Let /hostport/ be an empty string.
# 4.1 10. Append the /host/ value, converted to ASCII lowercase, to
# /hostport/
hostport = host.lower()
# 4.1 11. If /secure/ is false, and /port/ is not 80, or if /secure/
# is true, and /port/ is not 443, then append a U+003A COLON character
# (:) followed by the value of /port/, expressed as a base-ten integer,
# to /hostport/
if ((not secure and port != common.DEFAULT_WEB_SOCKET_PORT) or
(secure and port != common.DEFAULT_WEB_SOCKET_SECURE_PORT)):
hostport += ':' + str(port)
# 4.1 12. concatenation of the string "Host:", a U+0020 SPACE
# character, and /hostport/, to /fields/.
return '%s: %s\r\n' % (common.HOST_HEADER, hostport)
def _receive_bytes(socket, length):
bytes = []
remaining = length
while remaining > 0:
received_bytes = socket.recv(remaining)
if not received_bytes:
raise IOError(
'Connection closed before receiving requested length '
'(requested %d bytes but received only %d bytes)' %
(length, length - remaining))
bytes.append(received_bytes)
remaining -= len(received_bytes)
return ''.join(bytes)
def _get_mandatory_header(fields, name):
"""Gets the value of the header specified by name from fields.
This function expects that there's only one header with the specified name
in fields. Otherwise, raises an ClientHandshakeError.
"""
values = fields.get(name.lower())
if values is None or len(values) == 0:
raise ClientHandshakeError(
'%s header not found: %r' % (name, values))
if len(values) > 1:
raise ClientHandshakeError(
'Multiple %s headers found: %r' % (name, values))
return values[0]
def _validate_mandatory_header(fields, name,
expected_value, case_sensitive=False):
"""Gets and validates the value of the header specified by name from
fields.
If expected_value is specified, compares expected value and actual value
and raises an ClientHandshakeError on failure. You can specify case
sensitiveness in this comparison by case_sensitive parameter. This function
expects that there's only one header with the specified name in fields.
Otherwise, raises an ClientHandshakeError.
"""
value = _get_mandatory_header(fields, name)
if ((case_sensitive and value != expected_value) or
(not case_sensitive and value.lower() != expected_value.lower())):
raise ClientHandshakeError(
'Illegal value for header %s: %r (expected) vs %r (actual)' %
(name, expected_value, value))
class _TLSSocket(object):
"""Wrapper for a TLS connection."""
def __init__(self,
raw_socket, tls_module, tls_version, disable_tls_compression):
self._logger = util.get_class_logger(self)
if tls_module == _TLS_BY_STANDARD_MODULE:
if tls_version == _TLS_VERSION_SSL23:
version = ssl.PROTOCOL_SSLv23
elif tls_version == _TLS_VERSION_SSL3:
version = ssl.PROTOCOL_SSLv3
elif tls_version == _TLS_VERSION_TLS1:
version = ssl.PROTOCOL_TLSv1
else:
raise ValueError(
'Invalid --tls-version flag: %r' % tls_version)
if disable_tls_compression:
raise ValueError(
'--disable-tls-compression is not available for ssl '
'module')
self._tls_socket = ssl.wrap_socket(raw_socket, ssl_version=version)
# Print cipher in use. Handshake is done on wrap_socket call.
self._logger.info("Cipher: %s", self._tls_socket.cipher())
elif tls_module == _TLS_BY_PYOPENSSL:
if tls_version == _TLS_VERSION_SSL23:
version = OpenSSL.SSL.SSLv23_METHOD
elif tls_version == _TLS_VERSION_SSL3:
version = OpenSSL.SSL.SSLv3_METHOD
elif tls_version == _TLS_VERSION_TLS1:
version = OpenSSL.SSL.TLSv1_METHOD
else:
raise ValueError(
'Invalid --tls-version flag: %r' % tls_version)
context = OpenSSL.SSL.Context(version)
if disable_tls_compression:
# OP_NO_COMPRESSION is not defined in OpenSSL module.
context.set_options(0x00020000)
self._tls_socket = OpenSSL.SSL.Connection(context, raw_socket)
# Client mode.
self._tls_socket.set_connect_state()
self._tls_socket.setblocking(True)
# Do handshake now (not necessary).
self._tls_socket.do_handshake()
else:
raise ValueError('No TLS support module is available')
def send(self, data):
return self._tls_socket.write(data)
def sendall(self, data):
return self._tls_socket.sendall(data)
def recv(self, size=-1):
return self._tls_socket.read(size)
def close(self):
return self._tls_socket.close()
def getpeername(self):
return self._tls_socket.getpeername()
class ClientHandshakeBase(object):
"""A base class for WebSocket opening handshake processors for each
protocol version.
"""
def __init__(self):
self._logger = util.get_class_logger(self)
def _read_fields(self):
# 4.1 32. let /fields/ be a list of name-value pairs, initially empty.
fields = {}
while True: # "Field"
# 4.1 33. let /name/ and /value/ be empty byte arrays
name = ''
value = ''
# 4.1 34. read /name/
name = self._read_name()
if name is None:
break
# 4.1 35. read spaces
# TODO(tyoshino): Skip only one space as described in the spec.
ch = self._skip_spaces()
# 4.1 36. read /value/
value = self._read_value(ch)
# 4.1 37. read a byte from the server
ch = _receive_bytes(self._socket, 1)
if ch != '\n': # 0x0A
raise ClientHandshakeError(
'Expected LF but found %r while reading value %r for '
'header %r' % (ch, value, name))
self._logger.debug('Received %r header', name)
# 4.1 38. append an entry to the /fields/ list that has the name
# given by the string obtained by interpreting the /name/ byte
# array as a UTF-8 stream and the value given by the string
# obtained by interpreting the /value/ byte array as a UTF-8 byte
# stream.
fields.setdefault(name, []).append(value)
# 4.1 39. return to the "Field" step above
return fields
def _read_name(self):
# 4.1 33. let /name/ be empty byte arrays
name = ''
while True:
# 4.1 34. read a byte from the server
ch = _receive_bytes(self._socket, 1)
if ch == '\r': # 0x0D
return None
elif ch == '\n': # 0x0A
raise ClientHandshakeError(
'Unexpected LF when reading header name %r' % name)
elif ch == ':': # 0x3A
return name
elif ch >= 'A' and ch <= 'Z': # Range 0x31 to 0x5A
ch = chr(ord(ch) + 0x20)
name += ch
else:
name += ch
def _skip_spaces(self):
# 4.1 35. read a byte from the server
while True:
ch = _receive_bytes(self._socket, 1)
if ch == ' ': # 0x20
continue
return ch
def _read_value(self, ch):
# 4.1 33. let /value/ be empty byte arrays
value = ''
# 4.1 36. read a byte from server.
while True:
if ch == '\r': # 0x0D
return value
elif ch == '\n': # 0x0A
raise ClientHandshakeError(
'Unexpected LF when reading header value %r' % value)
else:
value += ch
ch = _receive_bytes(self._socket, 1)
def _get_permessage_deflate_framer(extension_response):
"""Validate the response and return a framer object using the parameters in
the response. This method doesn't accept the server_.* parameters.
"""
client_max_window_bits = None
client_no_context_takeover = None
client_max_window_bits_name = (
PerMessageDeflateExtensionProcessor.
_CLIENT_MAX_WINDOW_BITS_PARAM)
client_no_context_takeover_name = (
PerMessageDeflateExtensionProcessor.
_CLIENT_NO_CONTEXT_TAKEOVER_PARAM)
# We didn't send any server_.* parameter.
# Handle those parameters as invalid if found in the response.
for param_name, param_value in extension_response.get_parameters():
if param_name == client_max_window_bits_name:
if client_max_window_bits is not None:
raise ClientHandshakeError(
'Multiple %s found' % client_max_window_bits_name)
parsed_value = _parse_window_bits(param_value)
if parsed_value is None:
raise ClientHandshakeError(
'Bad %s: %r' %
(client_max_window_bits_name, param_value))
client_max_window_bits = parsed_value
elif param_name == client_no_context_takeover_name:
if client_no_context_takeover is not None:
raise ClientHandshakeError(
'Multiple %s found' % client_no_context_takeover_name)
if param_value is not None:
raise ClientHandshakeError(
'Bad %s: Has value %r' %
(client_no_context_takeover_name, param_value))
client_no_context_takeover = True
if client_no_context_takeover is None:
client_no_context_takeover = False
return _PerMessageDeflateFramer(client_max_window_bits,
client_no_context_takeover)
class ClientHandshakeProcessor(ClientHandshakeBase):
"""WebSocket opening handshake processor for
draft-ietf-hybi-thewebsocketprotocol-06 and later.
"""
def __init__(self, socket, options):
super(ClientHandshakeProcessor, self).__init__()
self._socket = socket
self._options = options
self._logger = util.get_class_logger(self)
def handshake(self):
"""Performs opening handshake on the specified socket.
Raises:
ClientHandshakeError: handshake failed.
"""
request_line = _build_method_line(self._options.resource)
self._logger.debug('Client\'s opening handshake Request-Line: %r',
request_line)
self._socket.sendall(request_line)
fields = []
fields.append(_format_host_header(
self._options.server_host,
self._options.server_port,
self._options.use_tls))
fields.append(_UPGRADE_HEADER)
fields.append(_CONNECTION_HEADER)
if self._options.origin is not None:
if self._options.protocol_version == _PROTOCOL_VERSION_HYBI08:
fields.append(_origin_header(
common.SEC_WEBSOCKET_ORIGIN_HEADER,
self._options.origin))
else:
fields.append(_origin_header(common.ORIGIN_HEADER,
self._options.origin))
original_key = os.urandom(16)
self._key = base64.b64encode(original_key)
self._logger.debug(
'%s: %r (%s)',
common.SEC_WEBSOCKET_KEY_HEADER,
self._key,
util.hexify(original_key))
fields.append(
'%s: %s\r\n' % (common.SEC_WEBSOCKET_KEY_HEADER, self._key))
if self._options.version_header > 0:
fields.append('%s: %d\r\n' % (common.SEC_WEBSOCKET_VERSION_HEADER,
self._options.version_header))
elif self._options.protocol_version == _PROTOCOL_VERSION_HYBI08:
fields.append('%s: %d\r\n' % (common.SEC_WEBSOCKET_VERSION_HEADER,
common.VERSION_HYBI08))
else:
fields.append('%s: %d\r\n' % (common.SEC_WEBSOCKET_VERSION_HEADER,
common.VERSION_HYBI_LATEST))
extensions_to_request = []
if self._options.deflate_frame:
extensions_to_request.append(
common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION))
if self._options.use_permessage_deflate:
extension = common.ExtensionParameter(
common.PERMESSAGE_DEFLATE_EXTENSION)
# Accept the client_max_window_bits extension parameter by default.
extension.add_parameter(
PerMessageDeflateExtensionProcessor.
_CLIENT_MAX_WINDOW_BITS_PARAM,
None)
extensions_to_request.append(extension)
if len(extensions_to_request) != 0:
fields.append(
'%s: %s\r\n' %
(common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
common.format_extensions(extensions_to_request)))
for field in fields:
self._socket.sendall(field)
self._socket.sendall('\r\n')
self._logger.debug('Sent client\'s opening handshake headers: %r',
fields)
self._logger.debug('Start reading Status-Line')
status_line = ''
while True:
ch = _receive_bytes(self._socket, 1)
status_line += ch
if ch == '\n':
break
m = re.match('HTTP/\\d+\.\\d+ (\\d\\d\\d) .*\r\n', status_line)
if m is None:
raise ClientHandshakeError(
'Wrong status line format: %r' % status_line)
status_code = m.group(1)
if status_code != '101':
self._logger.debug('Unexpected status code %s with following '
'headers: %r', status_code, self._read_fields())
raise ClientHandshakeError(
'Expected HTTP status code 101 but found %r' % status_code)
self._logger.debug('Received valid Status-Line')
self._logger.debug('Start reading headers until we see an empty line')
fields = self._read_fields()
ch = _receive_bytes(self._socket, 1)
if ch != '\n': # 0x0A
raise ClientHandshakeError(
'Expected LF but found %r while reading value %r for header '
'name %r' % (ch, value, name))
self._logger.debug('Received an empty line')
self._logger.debug('Server\'s opening handshake headers: %r', fields)
_validate_mandatory_header(
fields,
common.UPGRADE_HEADER,
common.WEBSOCKET_UPGRADE_TYPE,
False)
_validate_mandatory_header(
fields,
common.CONNECTION_HEADER,
common.UPGRADE_CONNECTION_TYPE,
False)
accept = _get_mandatory_header(
fields, common.SEC_WEBSOCKET_ACCEPT_HEADER)
# Validate
try:
binary_accept = base64.b64decode(accept)
except TypeError, e:
raise HandshakeError(
'Illegal value for header %s: %r' %
(common.SEC_WEBSOCKET_ACCEPT_HEADER, accept))
if len(binary_accept) != 20:
raise ClientHandshakeError(
'Decoded value of %s is not 20-byte long' %
common.SEC_WEBSOCKET_ACCEPT_HEADER)
self._logger.debug(
'Response for challenge : %r (%s)',
accept, util.hexify(binary_accept))
binary_expected_accept = util.sha1_hash(
self._key + common.WEBSOCKET_ACCEPT_UUID).digest()
expected_accept = base64.b64encode(binary_expected_accept)
self._logger.debug(
'Expected response for challenge: %r (%s)',
expected_accept, util.hexify(binary_expected_accept))
if accept != expected_accept:
raise ClientHandshakeError(
'Invalid %s header: %r (expected: %s)' %
(common.SEC_WEBSOCKET_ACCEPT_HEADER, accept, expected_accept))
deflate_frame_accepted = False
permessage_deflate_accepted = False
extensions_header = fields.get(
common.SEC_WEBSOCKET_EXTENSIONS_HEADER.lower())
accepted_extensions = []
if extensions_header is not None and len(extensions_header) != 0:
accepted_extensions = common.parse_extensions(extensions_header[0])
# TODO(bashi): Support the new style perframe compression extension.
for extension in accepted_extensions:
extension_name = extension.name()
if (extension_name == common.DEFLATE_FRAME_EXTENSION and
self._options.deflate_frame):
deflate_frame_accepted = True
processor = DeflateFrameExtensionProcessor(extension)
unused_extension_response = processor.get_extension_response()
self._options.deflate_frame = processor
continue
elif (extension_name == common.PERMESSAGE_DEFLATE_EXTENSION and
self._options.use_permessage_deflate):
permessage_deflate_accepted = True
framer = _get_permessage_deflate_framer(extension)
framer.set_compress_outgoing_enabled(True)
self._options.use_permessage_deflate = framer
continue
raise ClientHandshakeError(
'Unexpected extension %r' % extension_name)
if (self._options.deflate_frame and not deflate_frame_accepted):
raise ClientHandshakeError(
'Requested %s, but the server rejected it' %
common.DEFLATE_FRAME_EXTENSION)
if (self._options.use_permessage_deflate and
not permessage_deflate_accepted):
raise ClientHandshakeError(
'Requested %s, but the server rejected it' %
common.PERMESSAGE_DEFLATE_EXTENSION)
# TODO(tyoshino): Handle Sec-WebSocket-Protocol
# TODO(tyoshino): Handle Cookie, etc.
class ClientHandshakeProcessorHybi00(ClientHandshakeBase):
"""WebSocket opening handshake processor for
draft-ietf-hybi-thewebsocketprotocol-00 (equivalent to
draft-hixie-thewebsocketprotocol-76).
"""
def __init__(self, socket, options):
super(ClientHandshakeProcessorHybi00, self).__init__()
self._socket = socket
self._options = options
self._logger = util.get_class_logger(self)
if (self._options.deflate_frame or
self._options.use_permessage_deflate):
logging.critical('HyBi 00 doesn\'t support extensions.')
sys.exit(1)
def handshake(self):
"""Performs opening handshake on the specified socket.
Raises:
ClientHandshakeError: handshake failed.
"""
# 4.1 5. send request line.
self._socket.sendall(_build_method_line(self._options.resource))
# 4.1 6. Let /fields/ be an empty list of strings.
fields = []
# 4.1 7. Add the string "Upgrade: WebSocket" to /fields/.
fields.append(_UPGRADE_HEADER_HIXIE75)
# 4.1 8. Add the string "Connection: Upgrade" to /fields/.
fields.append(_CONNECTION_HEADER)
# 4.1 9-12. Add Host: field to /fields/.
fields.append(_format_host_header(
self._options.server_host,
self._options.server_port,
self._options.use_tls))
# 4.1 13. Add Origin: field to /fields/.
if not self._options.origin:
raise ClientHandshakeError(
'Specify the origin of the connection by --origin flag')
fields.append(_origin_header(common.ORIGIN_HEADER,
self._options.origin))
# TODO: 4.1 14 Add Sec-WebSocket-Protocol: field to /fields/.
# TODO: 4.1 15 Add cookie headers to /fields/.
# 4.1 16-23. Add Sec-WebSocket-Key<n> to /fields/.
self._number1, key1 = self._generate_sec_websocket_key()
self._logger.debug('Number1: %d', self._number1)
fields.append('%s: %s\r\n' % (common.SEC_WEBSOCKET_KEY1_HEADER, key1))
self._number2, key2 = self._generate_sec_websocket_key()
self._logger.debug('Number2: %d', self._number2)
fields.append('%s: %s\r\n' % (common.SEC_WEBSOCKET_KEY2_HEADER, key2))
fields.append('%s: 0\r\n' % common.SEC_WEBSOCKET_DRAFT_HEADER)
# 4.1 24. For each string in /fields/, in a random order: send the
# string, encoded as UTF-8, followed by a UTF-8 encoded U+000D CARRIAGE
# RETURN U+000A LINE FEED character pair (CRLF).
random.shuffle(fields)
for field in fields:
self._socket.sendall(field)
# 4.1 25. send a UTF-8-encoded U+000D CARRIAGE RETURN U+000A LINE FEED
# character pair (CRLF).
self._socket.sendall('\r\n')
# 4.1 26. let /key3/ be a string consisting of eight random bytes (or
# equivalently, a random 64 bit integer encoded in a big-endian order).
self._key3 = self._generate_key3()
# 4.1 27. send /key3/ to the server.
self._socket.sendall(self._key3)
self._logger.debug(
'Key3: %r (%s)', self._key3, util.hexify(self._key3))
self._logger.info('Sent handshake')
# 4.1 28. Read bytes from the server until either the connection
# closes, or a 0x0A byte is read. let /field/ be these bytes, including
# the 0x0A bytes.
field = ''
while True:
ch = _receive_bytes(self._socket, 1)
field += ch
if ch == '\n':
break
# if /field/ is not at least seven bytes long, or if the last
# two bytes aren't 0x0D and 0x0A respectively, or if it does not
# contain at least two 0x20 bytes, then fail the WebSocket connection
# and abort these steps.
if len(field) < 7 or not field.endswith('\r\n'):
raise ClientHandshakeError('Wrong status line: %r' % field)
m = re.match('[^ ]* ([^ ]*) .*', field)
if m is None:
raise ClientHandshakeError(
'No HTTP status code found in status line: %r' % field)
# 4.1 29. let /code/ be the substring of /field/ that starts from the
# byte after the first 0x20 byte, and ends with the byte before the
# second 0x20 byte.
code = m.group(1)
# 4.1 30. if /code/ is not three bytes long, or if any of the bytes in
# /code/ are not in the range 0x30 to 0x90, then fail the WebSocket
# connection and abort these steps.
if not re.match('[0-9][0-9][0-9]', code):
raise ClientHandshakeError(
'HTTP status code %r is not three digit in status line: %r' %
(code, field))
# 4.1 31. if /code/, interpreted as UTF-8, is "101", then move to the
# next step.
if code != '101':
raise ClientHandshakeError(
'Expected HTTP status code 101 but found %r in status line: '
'%r' % (code, field))
# 4.1 32-39. read fields into /fields/
fields = self._read_fields()
# 4.1 40. _Fields processing_
# read a byte from server
ch = _receive_bytes(self._socket, 1)
if ch != '\n': # 0x0A
raise ClientHandshakeError('Expected LF but found %r' % ch)
# 4.1 41. check /fields/
# TODO(ukai): protocol
# if the entry's name is "upgrade"
# if the value is not exactly equal to the string "WebSocket",
# then fail the WebSocket connection and abort these steps.
_validate_mandatory_header(
fields,
common.UPGRADE_HEADER,
common.WEBSOCKET_UPGRADE_TYPE_HIXIE75,
True)
# if the entry's name is "connection"
# if the value, converted to ASCII lowercase, is not exactly equal
# to the string "upgrade", then fail the WebSocket connection and
# abort these steps.
_validate_mandatory_header(
fields,
common.CONNECTION_HEADER,
common.UPGRADE_CONNECTION_TYPE,
False)
origin = _get_mandatory_header(
fields, common.SEC_WEBSOCKET_ORIGIN_HEADER)
location = _get_mandatory_header(
fields, common.SEC_WEBSOCKET_LOCATION_HEADER)
# TODO(ukai): check origin, location, cookie, ..
# 4.1 42. let /challenge/ be the concatenation of /number_1/,
# expressed as a big endian 32 bit integer, /number_2/, expressed
# as big endian 32 bit integer, and the eight bytes of /key_3/ in the
# order they were sent on the wire.
challenge = struct.pack('!I', self._number1)
challenge += struct.pack('!I', self._number2)
challenge += self._key3
self._logger.debug(
'Challenge: %r (%s)', challenge, util.hexify(challenge))
# 4.1 43. let /expected/ be the MD5 fingerprint of /challenge/ as a
# big-endian 128 bit string.
expected = util.md5_hash(challenge).digest()
self._logger.debug(
'Expected challenge response: %r (%s)',
expected, util.hexify(expected))
# 4.1 44. read sixteen bytes from the server.
# let /reply/ be those bytes.
reply = _receive_bytes(self._socket, 16)
self._logger.debug(
'Actual challenge response: %r (%s)', reply, util.hexify(reply))
# 4.1 45. if /reply/ does not exactly equal /expected/, then fail
# the WebSocket connection and abort these steps.
if expected != reply:
raise ClientHandshakeError(
'Bad challenge response: %r (expected) != %r (actual)' %
(expected, reply))
# 4.1 46. The *WebSocket connection is established*.
def _generate_sec_websocket_key(self):
# 4.1 16. let /spaces_n/ be a random integer from 1 to 12 inclusive.
spaces = random.randint(1, 12)
# 4.1 17. let /max_n/ be the largest integer not greater than
# 4,294,967,295 divided by /spaces_n/.
maxnum = 4294967295 / spaces
# 4.1 18. let /number_n/ be a random integer from 0 to /max_n/
# inclusive.
number = random.randint(0, maxnum)
# 4.1 19. let /product_n/ be the result of multiplying /number_n/ and
# /spaces_n/ together.
product = number * spaces
# 4.1 20. let /key_n/ be a string consisting of /product_n/, expressed
# in base ten using the numerals in the range U+0030 DIGIT ZERO (0) to
# U+0039 DIGIT NINE (9).
key = str(product)
# 4.1 21. insert between one and twelve random characters from the
# range U+0021 to U+002F and U+003A to U+007E into /key_n/ at random
# positions.
available_chars = range(0x21, 0x2f + 1) + range(0x3a, 0x7e + 1)
n = random.randint(1, 12)
for _ in xrange(n):
ch = random.choice(available_chars)
pos = random.randint(0, len(key))
key = key[0:pos] + chr(ch) + key[pos:]
# 4.1 22. insert /spaces_n/ U+0020 SPACE characters into /key_n/ at
# random positions other than start or end of the string.
for _ in xrange(spaces):
pos = random.randint(1, len(key) - 1)
key = key[0:pos] + ' ' + key[pos:]
return number, key
def _generate_key3(self):
# 4.1 26. let /key3/ be a string consisting of eight random bytes (or
# equivalently, a random 64 bit integer encoded in a big-endian order).
return ''.join([chr(random.randint(0, 255)) for _ in xrange(8)])
class ClientConnection(object):
"""A wrapper for socket object to provide the mp_conn interface.
mod_pywebsocket library is designed to be working on Apache mod_python's
mp_conn object.
"""
def __init__(self, socket):
self._socket = socket
def write(self, data):
self._socket.sendall(data)
def read(self, n):
return self._socket.recv(n)
def get_remote_addr(self):
return self._socket.getpeername()
remote_addr = property(get_remote_addr)
class ClientRequest(object):
"""A wrapper class just to make it able to pass a socket object to
functions that expect a mp_request object.
"""
def __init__(self, socket):
self._logger = util.get_class_logger(self)
self._socket = socket
self.connection = ClientConnection(socket)
def _import_ssl():
global ssl
try:
import ssl
return True
except ImportError:
return False
def _import_pyopenssl():
global OpenSSL
try:
import OpenSSL.SSL
return True
except ImportError:
return False
class EchoClient(object):
"""WebSocket echo client."""
def __init__(self, options):
self._options = options
self._socket = None
self._logger = util.get_class_logger(self)
def run(self):
"""Run the client.
Shake hands and then repeat sending message and receiving its echo.
"""
self._socket = socket.socket()
self._socket.settimeout(self._options.socket_timeout)
try:
self._socket.connect((self._options.server_host,
self._options.server_port))
if self._options.use_tls:
self._socket = _TLSSocket(
self._socket,
self._options.tls_module,
self._options.tls_version,
self._options.disable_tls_compression)
version = self._options.protocol_version
if (version == _PROTOCOL_VERSION_HYBI08 or
version == _PROTOCOL_VERSION_HYBI13):
self._handshake = ClientHandshakeProcessor(
self._socket, self._options)
elif version == _PROTOCOL_VERSION_HYBI00:
self._handshake = ClientHandshakeProcessorHybi00(
self._socket, self._options)
else:
raise ValueError(
'Invalid --protocol-version flag: %r' % version)
self._handshake.handshake()
self._logger.info('Connection established')
request = ClientRequest(self._socket)
version_map = {
_PROTOCOL_VERSION_HYBI08: common.VERSION_HYBI08,
_PROTOCOL_VERSION_HYBI13: common.VERSION_HYBI13,
_PROTOCOL_VERSION_HYBI00: common.VERSION_HYBI00}
request.ws_version = version_map[version]
if (version == _PROTOCOL_VERSION_HYBI08 or
version == _PROTOCOL_VERSION_HYBI13):
stream_option = StreamOptions()
stream_option.mask_send = True
stream_option.unmask_receive = False
if self._options.deflate_frame is not False:
processor = self._options.deflate_frame
processor.setup_stream_options(stream_option)
if self._options.use_permessage_deflate is not False:
framer = self._options.use_permessage_deflate
framer.setup_stream_options(stream_option)
self._stream = Stream(request, stream_option)
elif version == _PROTOCOL_VERSION_HYBI00:
self._stream = StreamHixie75(request, True)
for line in self._options.message.split(','):
self._stream.send_message(line)
if self._options.verbose:
print 'Send: %s' % line
try:
received = self._stream.receive_message()
if self._options.verbose:
print 'Recv: %s' % received
except Exception, e:
if self._options.verbose:
print 'Error: %s' % e
raise
self._do_closing_handshake()
finally:
self._socket.close()
def _do_closing_handshake(self):
"""Perform closing handshake using the specified closing frame."""
if self._options.message.split(',')[-1] == _GOODBYE_MESSAGE:
# requested server initiated closing handshake, so
# expecting closing handshake message from server.
self._logger.info('Wait for server-initiated closing handshake')
message = self._stream.receive_message()
if message is None:
print 'Recv close'
print 'Send ack'
self._logger.info(
'Received closing handshake and sent ack')
return
print 'Send close'
self._stream.close_connection()
self._logger.info('Sent closing handshake')
print 'Recv ack'
self._logger.info('Received ack')
def main():
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
parser = OptionParser()
# We accept --command_line_flag style flags which is the same as Google
# gflags in addition to common --command-line-flag style flags.
parser.add_option('-s', '--server-host', '--server_host',
dest='server_host', type='string',
default='localhost', help='server host')
parser.add_option('-p', '--server-port', '--server_port',
dest='server_port', type='int',
default=_UNDEFINED_PORT, help='server port')
parser.add_option('-o', '--origin', dest='origin', type='string',
default=None, help='origin')
parser.add_option('-r', '--resource', dest='resource', type='string',
default='/echo', help='resource path')
parser.add_option('-m', '--message', dest='message', type='string',
help=('comma-separated messages to send. '
'%s will force close the connection from server.' %
_GOODBYE_MESSAGE))
parser.add_option('-q', '--quiet', dest='verbose', action='store_false',
default=True, help='suppress messages')
parser.add_option('-t', '--tls', dest='use_tls', action='store_true',
default=False, help='use TLS (wss://). By default, '
'it looks for ssl and pyOpenSSL module and uses found '
'one. Use --tls-module option to specify which module '
'to use')
parser.add_option('--tls-module', '--tls_module', dest='tls_module',
type='choice',
choices=[_TLS_BY_STANDARD_MODULE, _TLS_BY_PYOPENSSL],
help='Use ssl module if "%s" is specified. '
'Use pyOpenSSL module if "%s" is specified' %
(_TLS_BY_STANDARD_MODULE, _TLS_BY_PYOPENSSL))
parser.add_option('--tls-version', '--tls_version',
dest='tls_version',
type='string', default=_TLS_VERSION_SSL23,
help='TLS/SSL version to use. One of \'' +
_TLS_VERSION_SSL23 + '\' (SSL version 2 or 3), \'' +
_TLS_VERSION_SSL3 + '\' (SSL version 3), \'' +
_TLS_VERSION_TLS1 + '\' (TLS version 1)')
parser.add_option('--disable-tls-compression', '--disable_tls_compression',
dest='disable_tls_compression',
action='store_true', default=False,
help='Disable TLS compression. Available only when '
'pyOpenSSL module is used.')
parser.add_option('-k', '--socket-timeout', '--socket_timeout',
dest='socket_timeout', type='int', default=_TIMEOUT_SEC,
help='Timeout(sec) for sockets')
parser.add_option('--draft75', dest='draft75',
action='store_true', default=False,
help='Obsolete option. Don\'t use this.')
parser.add_option('--protocol-version', '--protocol_version',
dest='protocol_version',
type='string', default=_PROTOCOL_VERSION_HYBI13,
help='WebSocket protocol version to use. One of \'' +
_PROTOCOL_VERSION_HYBI13 + '\', \'' +
_PROTOCOL_VERSION_HYBI08 + '\', \'' +
_PROTOCOL_VERSION_HYBI00 + '\'')
parser.add_option('--version-header', '--version_header',
dest='version_header',
type='int', default=-1,
help='Specify Sec-WebSocket-Version header value')
parser.add_option('--deflate-frame', '--deflate_frame',
dest='deflate_frame',
action='store_true', default=False,
help='Use the deflate-frame extension.')
parser.add_option('--use-permessage-deflate', '--use_permessage_deflate',
dest='use_permessage_deflate',
action='store_true', default=False,
help='Use the permessage-deflate extension.')
parser.add_option('--log-level', '--log_level', type='choice',
dest='log_level', default='warn',
choices=['debug', 'info', 'warn', 'error', 'critical'],
help='Log level.')
(options, unused_args) = parser.parse_args()
logging.basicConfig(level=logging.getLevelName(options.log_level.upper()))
if options.draft75:
logging.critical('--draft75 option is obsolete.')
sys.exit(1)
if options.protocol_version == _PROTOCOL_VERSION_HIXIE75:
logging.critical(
'Value %s is obsolete for --protocol_version options' %
_PROTOCOL_VERSION_HIXIE75)
sys.exit(1)
if options.use_tls:
if options.tls_module is None:
if _import_ssl():
options.tls_module = _TLS_BY_STANDARD_MODULE
logging.debug('Using ssl module')
elif _import_pyopenssl():
options.tls_module = _TLS_BY_PYOPENSSL
logging.debug('Using pyOpenSSL module')
else:
logging.critical(
'TLS support requires ssl or pyOpenSSL module.')
sys.exit(1)
elif options.tls_module == _TLS_BY_STANDARD_MODULE:
if not _import_ssl():
logging.critical('ssl module is not available')
sys.exit(1)
elif options.tls_module == _TLS_BY_PYOPENSSL:
if not _import_pyopenssl():
logging.critical('pyOpenSSL module is not available')
sys.exit(1)
else:
logging.critical('Invalid --tls-module option: %r',
options.tls_module)
sys.exit(1)
if (options.disable_tls_compression and
options.tls_module != _TLS_BY_PYOPENSSL):
logging.critical('You can disable TLS compression only when '
'pyOpenSSL module is used.')
sys.exit(1)
else:
if options.tls_module is not None:
logging.critical('Use --tls-module option only together with '
'--use-tls option.')
sys.exit(1)
if options.disable_tls_compression:
logging.critical('Use --disable-tls-compression only together '
'with --use-tls option.')
sys.exit(1)
# Default port number depends on whether TLS is used.
if options.server_port == _UNDEFINED_PORT:
if options.use_tls:
options.server_port = common.DEFAULT_WEB_SOCKET_SECURE_PORT
else:
options.server_port = common.DEFAULT_WEB_SOCKET_PORT
# optparse doesn't seem to handle non-ascii default values.
# Set default message here.
if not options.message:
options.message = u'Hello,\u65e5\u672c' # "Japan" in Japanese
EchoClient(options).run()
if __name__ == '__main__':
main()
# vi:sts=4 sw=4 et
|
mpl-2.0
|
cloudera/hue
|
desktop/core/ext-py/odfpy-1.4.1/tests/testxmlgen.py
|
4
|
5376
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Søren Roug, European Environment Agency
#
# This is free software. You may redistribute it under the terms
# of the Apache license and the GNU General Public License Version
# 2 or at your option any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
import xml.sax, xml.sax.saxutils
import io
import tempfile
import unittest
import sys
class MyGen(xml.sax.saxutils.XMLGenerator):
def _qname(self, name):
"""Builds a qualified name from a (ns_url, localname) pair"""
if name[0]:
if name[0] == u'http://www.w3.org/XML/1998/namespace':
return u'xml' + ":" + name[1]
# The name is in a non-empty namespace
prefix = self._current_context[name[0]]
if prefix:
# If it is not the default namespace, prepend the prefix
return prefix + ":" + name[1]
# Return the unqualified name
return name[1]
class TestXMLGenerator(unittest.TestCase):
def test_xmlgenerator(self):
""" Test that the xml namespace is understood by XMLGenerator """
outfp = tempfile.TemporaryFile()
c = xml.sax.saxutils.XMLGenerator(outfp,'utf-8')
parser = xml.sax.make_parser()
parser.setFeature(xml.sax.handler.feature_namespaces, 1)
parser.setContentHandler(c)
testcontent="""<?xml version="1.0"?>
<a:greetings xmlns:a="http://example.com/ns" xmlns:xml="http://www.w3.org/XML/1998/namespace">
<a:greet xml:lang="en">Hello world</a:greet>
</a:greetings>"""
parser.feed(testcontent)
parser.close()
expectedresult = """<?xml version="1.0" encoding="utf-8"?>
<a:greetings xmlns:a="http://example.com/ns" xmlns:xml="http://www.w3.org/XML/1998/namespace">
<a:greet xml:lang="en">Hello world</a:greet>
</a:greetings>"""
outfp.seek(0)
self.assertEqual( outfp.read().decode('utf-8'), expectedresult)
outfp.close()
def test_xmlgenerator_wo_ns(self):
""" Test that the missing xml namespace is understood by XMLGenerator """
outfp = tempfile.TemporaryFile()
c = xml.sax.saxutils.XMLGenerator(outfp,'utf-8')
parser = xml.sax.make_parser()
parser.setFeature(xml.sax.handler.feature_namespaces, 1)
parser.setContentHandler(c)
testcontent="""<?xml version="1.0"?>
<a:greetings xmlns:a="http://example.com/ns">
<a:greet xml:lang="en">Hello world</a:greet>
</a:greetings>"""
# There is a bug in older versions of saxutils
if sys.version_info[0] == 2 and sys.version_info[1] == 6:
self.assertRaises(KeyError, parser.feed, testcontent)
else:
parser.feed(testcontent)
parser.close()
expectedresult="""<?xml version="1.0" encoding="utf-8"?>
<a:greetings xmlns:a="http://example.com/ns">
<a:greet xml:lang="en">Hello world</a:greet>
</a:greetings>"""
outfp.seek(0)
self.assertEqual( outfp.read().decode('utf-8'), expectedresult)
outfp.close()
def test_myxml(self):
""" Test that my patch works """
outfp = tempfile.TemporaryFile()
c = MyGen(outfp,'utf-8')
parser = xml.sax.make_parser()
parser.setFeature(xml.sax.handler.feature_namespaces, 1)
parser.setContentHandler(c)
testcontent="""<?xml version="1.0"?>
<a:greetings xmlns:a="http://example.com/ns" xmlns:xml="http://www.w3.org/XML/1998/namespace">
<a:greet xml:lang="en">Hello world</a:greet>
</a:greetings>"""
parser.feed(testcontent)
parser.close()
outfp.seek(0)
expectedresult = """<?xml version="1.0" encoding="utf-8"?>
<a:greetings xmlns:a="http://example.com/ns" xmlns:xml="http://www.w3.org/XML/1998/namespace">
<a:greet xml:lang="en">Hello world</a:greet>
</a:greetings>"""
self.assertEqual( outfp.read().decode('utf-8'), expectedresult)
outfp.close()
def test_myxml_wo_xml(self):
""" Test that my patch understands the missing xml namespace """
outfp = tempfile.TemporaryFile()
c = MyGen(outfp,'utf-8')
parser = xml.sax.make_parser()
parser.setFeature(xml.sax.handler.feature_namespaces, 1)
parser.setContentHandler(c)
testcontent="""<?xml version="1.0"?>
<a:greetings xmlns:a="http://example.com/ns" xmlns:xml="http://www.w3.org/XML/1998/namespace">
<a:greet xml:lang="en">Hello world</a:greet>
</a:greetings>"""
parser.feed(testcontent)
parser.close()
outfp.seek(0)
expectedresult = """<?xml version="1.0" encoding="utf-8"?>
<a:greetings xmlns:a="http://example.com/ns" xmlns:xml="http://www.w3.org/XML/1998/namespace">
<a:greet xml:lang="en">Hello world</a:greet>
</a:greetings>"""
self.assertEqual( outfp.read().decode('utf-8'), expectedresult)
outfp.close()
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
kontrafiktion/ansible
|
docsite/build-site.py
|
56
|
2986
|
#!/usr/bin/env python
# (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of the Ansible Documentation
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
__docformat__ = 'restructuredtext'
import os
import sys
import traceback
try:
from sphinx.application import Sphinx
except ImportError:
print("#################################")
print("Dependency missing: Python Sphinx")
print("#################################")
sys.exit(1)
import os
class SphinxBuilder(object):
"""
Creates HTML documentation using Sphinx.
"""
def __init__(self):
"""
Run the DocCommand.
"""
print("Creating html documentation ...")
try:
buildername = 'html'
outdir = os.path.abspath(os.path.join(os.getcwd(), "htmlout"))
# Create the output directory if it doesn't exist
if not os.access(outdir, os.F_OK):
os.mkdir(outdir)
doctreedir = os.path.join('./', '.doctrees')
confdir = os.path.abspath('./')
srcdir = os.path.abspath('rst')
freshenv = True
# Create the builder
app = Sphinx(srcdir,
confdir,
outdir,
doctreedir,
buildername,
{},
sys.stdout,
sys.stderr,
freshenv)
app.builder.build_all()
except ImportError:
traceback.print_exc()
except Exception as ex:
print("FAIL! exiting ... (%s)" % ex, file=sys.stderr)
def build_docs(self):
self.app.builder.build_all()
def build_rst_docs():
docgen = SphinxBuilder()
if __name__ == '__main__':
if '-h' in sys.argv or '--help' in sys.argv:
print("This script builds the html documentation from rst/asciidoc sources.\n")
print(" Run 'make docs' to build everything.")
print(" Run 'make viewdocs' to build and then preview in a web browser.")
sys.exit(0)
build_rst_docs()
if "view" in sys.argv:
import webbrowser
if not webbrowser.open('htmlout/index.html'):
print("Could not open on your webbrowser.", file=sys.stderr)
|
gpl-3.0
|
TeachAtTUM/edx-platform
|
openedx/core/djangoapps/oauth_dispatch/tests/test_dop_adapter.py
|
50
|
2439
|
"""
Tests for DOP Adapter
"""
from datetime import timedelta
import ddt
from django.test import TestCase
from django.utils.timezone import now
from provider.oauth2 import models
from provider import constants
from student.tests.factories import UserFactory
from ..adapters import DOPAdapter
from .constants import DUMMY_REDIRECT_URL
@ddt.ddt
class DOPAdapterTestCase(TestCase):
"""
Test class for DOPAdapter.
"""
adapter = DOPAdapter()
def setUp(self):
super(DOPAdapterTestCase, self).setUp()
self.user = UserFactory()
self.public_client = self.adapter.create_public_client(
name='public client',
user=self.user,
redirect_uri=DUMMY_REDIRECT_URL,
client_id='public-client-id',
)
self.confidential_client = self.adapter.create_confidential_client(
name='confidential client',
user=self.user,
redirect_uri=DUMMY_REDIRECT_URL,
client_id='confidential-client-id',
)
@ddt.data(
('confidential', constants.CONFIDENTIAL),
('public', constants.PUBLIC),
)
@ddt.unpack
def test_create_client(self, client_name, client_type):
client = getattr(self, '{}_client'.format(client_name))
self.assertIsInstance(client, models.Client)
self.assertEqual(client.client_id, '{}-client-id'.format(client_name))
self.assertEqual(client.client_type, client_type)
def test_get_client(self):
client = self.adapter.get_client(client_type=constants.CONFIDENTIAL)
self.assertIsInstance(client, models.Client)
self.assertEqual(client.client_type, constants.CONFIDENTIAL)
def test_get_client_not_found(self):
with self.assertRaises(models.Client.DoesNotExist):
self.adapter.get_client(client_id='not-found')
def test_get_client_for_token(self):
token = models.AccessToken(
user=self.user,
client=self.public_client,
)
self.assertEqual(self.adapter.get_client_for_token(token), self.public_client)
def test_get_access_token(self):
token = models.AccessToken.objects.create(
token='token-id',
client=self.public_client,
user=self.user,
expires=now() + timedelta(days=30),
)
self.assertEqual(self.adapter.get_access_token(token_string='token-id'), token)
|
agpl-3.0
|
librasungirl/openthread
|
tools/harness-automation/autothreadharness/__init__.py
|
36
|
1580
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
|
bsd-3-clause
|
DefyVentures/edx-platform
|
lms/djangoapps/shoppingcart/migrations/0005_auto__add_paidcourseregistrationannotation__add_field_orderitem_report.py
|
114
|
9808
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PaidCourseRegistrationAnnotation'
db.create_table('shoppingcart_paidcourseregistrationannotation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('course_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=128, db_index=True)),
('annotation', self.gf('django.db.models.fields.TextField')(null=True)),
))
db.send_create_signal('shoppingcart', ['PaidCourseRegistrationAnnotation'])
# Adding field 'OrderItem.report_comments'
db.add_column('shoppingcart_orderitem', 'report_comments',
self.gf('django.db.models.fields.TextField')(default=''),
keep_default=False)
def backwards(self, orm):
# Deleting model 'PaidCourseRegistrationAnnotation'
db.delete_table('shoppingcart_paidcourseregistrationannotation')
# Deleting field 'OrderItem.report_comments'
db.delete_column('shoppingcart_orderitem', 'report_comments')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'fulfilled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'report_comments': ('django.db.models.fields.TextField', [], {'default': "''"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.paidcourseregistrationannotation': {
'Meta': {'object_name': 'PaidCourseRegistrationAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart']
|
agpl-3.0
|
solvebio/xmltodict
|
tests/test_xmltodict.py
|
2
|
8864
|
import threading
import xml.parsers.expat
from xmltodict import parse, ParsingInterrupted
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from io import BytesIO as StringIO
except ImportError:
from xmltodict import StringIO
def _encode(s):
try:
return bytes(s, 'ascii')
except (NameError, TypeError):
return s
class XMLToDictTestCase(unittest.TestCase):
def test_string_vs_file(self):
xml = '<a>data</a>'
self.assertEqual(parse(xml),
parse(StringIO(_encode(xml))))
def test_minimal(self):
self.assertEqual(parse('<a/>'),
{'a': None})
self.assertEqual(parse('<a/>', force_cdata=True),
{'a': None})
def test_simple(self):
self.assertEqual(parse('<a>data</a>'),
{'a': 'data'})
def test_force_cdata(self):
self.assertEqual(parse('<a>data</a>', force_cdata=True),
{'a': {'#text': 'data'}})
def test_custom_cdata(self):
self.assertEqual(parse('<a>data</a>',
force_cdata=True,
cdata_key='_CDATA_'),
{'a': {'_CDATA_': 'data'}})
def test_list(self):
self.assertEqual(parse('<a><b>1</b><b>2</b><b>3</b></a>'),
{'a': {'b': ['1', '2', '3']}})
def test_attrib(self):
self.assertEqual(parse('<a href="xyz"/>'),
{'a': {'@href': 'xyz'}})
def test_skip_attrib(self):
self.assertEqual(parse('<a href="xyz"/>', xml_attribs=False),
{'a': None})
def test_custom_attrib(self):
self.assertEqual(parse('<a href="xyz"/>',
attr_prefix='!'),
{'a': {'!href': 'xyz'}})
def test_attrib_and_cdata(self):
self.assertEqual(parse('<a href="xyz">123</a>'),
{'a': {'@href': 'xyz', '#text': '123'}})
def test_semi_structured(self):
self.assertEqual(parse('<a>abc<b/>def</a>'),
{'a': {'b': None, '#text': 'abcdef'}})
self.assertEqual(parse('<a>abc<b/>def</a>',
cdata_separator='\n'),
{'a': {'b': None, '#text': 'abc\ndef'}})
def test_nested_semi_structured(self):
self.assertEqual(parse('<a>abc<b>123<c/>456</b>def</a>'),
{'a': {'#text': 'abcdef', 'b': {
'#text': '123456', 'c': None}}})
def test_skip_whitespace(self):
xml = """
<root>
<emptya> </emptya>
<emptyb attr="attrvalue">
</emptyb>
<value>hello</value>
</root>
"""
self.assertEqual(
parse(xml),
{'root': {'emptya': None,
'emptyb': {'@attr': 'attrvalue'},
'value': 'hello'}})
def test_keep_whitespace(self):
xml = "<root> </root>"
self.assertEqual(parse(xml), dict(root=None))
self.assertEqual(parse(xml, strip_whitespace=False),
dict(root=' '))
def test_streaming(self):
def cb(path, item):
cb.count += 1
self.assertEqual(path, [('a', {'x': 'y'}), ('b', None)])
self.assertEqual(item, str(cb.count))
return True
cb.count = 0
parse('<a x="y"><b>1</b><b>2</b><b>3</b></a>',
item_depth=2, item_callback=cb)
self.assertEqual(cb.count, 3)
def test_streaming_interrupt(self):
cb = lambda path, item: False
self.assertRaises(ParsingInterrupted,
parse, '<a>x</a>',
item_depth=1, item_callback=cb)
def test_postprocessor(self):
def postprocessor(path, key, value):
try:
return key + ':int', int(value)
except (ValueError, TypeError):
return key, value
self.assertEqual({'a': {'b:int': [1, 2], 'b': 'x'}},
parse('<a><b>1</b><b>2</b><b>x</b></a>',
postprocessor=postprocessor))
def test_postprocessor_skip(self):
def postprocessor(path, key, value):
if key == 'b':
value = int(value)
if value == 3:
return None
return key, value
self.assertEqual({'a': {'b': [1, 2]}},
parse('<a><b>1</b><b>2</b><b>3</b></a>',
postprocessor=postprocessor))
def test_unicode(self):
try:
value = unichr(39321)
except NameError:
value = chr(39321)
self.assertEqual({'a': value},
parse('<a>%s</a>' % value))
def test_encoded_string(self):
try:
value = unichr(39321)
except NameError:
value = chr(39321)
xml = '<a>%s</a>' % value
self.assertEqual(parse(xml),
parse(xml.encode('utf-8')))
def test_namespace_support(self):
xml = """
<root xmlns="http://defaultns.com/"
xmlns:a="http://a.com/"
xmlns:b="http://b.com/">
<x a:attr="val">1</x>
<a:y>2</a:y>
<b:z>3</b:z>
</root>
"""
d = {
'http://defaultns.com/:root': {
'http://defaultns.com/:x': {
'@http://a.com/:attr': 'val',
'#text': '1',
},
'http://a.com/:y': '2',
'http://b.com/:z': '3',
}
}
self.assertEqual(parse(xml, process_namespaces=True), d)
def test_namespace_collapse(self):
xml = """
<root xmlns="http://defaultns.com/"
xmlns:a="http://a.com/"
xmlns:b="http://b.com/">
<x a:attr="val">1</x>
<a:y>2</a:y>
<b:z>3</b:z>
</root>
"""
namespaces = {
'http://defaultns.com/': None,
'http://a.com/': 'ns_a',
}
d = {
'root': {
'x': {
'@ns_a:attr': 'val',
'#text': '1',
},
'ns_a:y': '2',
'http://b.com/:z': '3',
},
}
self.assertEqual(
parse(xml, process_namespaces=True, namespaces=namespaces), d)
def test_namespace_ignore(self):
xml = """
<root xmlns="http://defaultns.com/"
xmlns:a="http://a.com/"
xmlns:b="http://b.com/">
<x>1</x>
<a:y>2</a:y>
<b:z>3</b:z>
</root>
"""
d = {
'root': {
'@xmlns': 'http://defaultns.com/',
'@xmlns:a': 'http://a.com/',
'@xmlns:b': 'http://b.com/',
'x': '1',
'a:y': '2',
'b:z': '3',
},
}
self.assertEqual(parse(xml), d)
class StreamingGeneratorTests(unittest.TestCase):
def test_generator(self):
active_threads = threading.active_count()
data = '<a x="y"><b>1</b><b>2</b><b>3</b></a>'
with parse(data, item_depth=2) as gen:
result = [item for path, item in gen]
self.assertEqual(result, ['1', '2', '3'])
self.assertEqual(threading.active_count(), active_threads)
def test_empty_generator(self):
active_threads = threading.active_count()
data = '<a x="y"></a>'
with parse(data, item_depth=2) as gen:
result = [item for path, item in gen]
self.assertEqual(result, [])
self.assertEqual(threading.active_count(), active_threads)
def test_exception_handling(self):
active_threads = threading.active_count()
def test():
with parse('', item_depth=2) as gen:
list(gen)
self.assertRaises(xml.parsers.expat.ExpatError, test)
self.assertEqual(threading.active_count(), active_threads)
def test_incomplete_iteration(self):
active_threads = threading.active_count()
data = '<a x="y"><b>1</b><b>2</b><b>3</b></a>'
with parse(data, item_depth=2) as gen:
for path, item in gen:
if item == '2':
break
self.assertEqual(threading.active_count(), active_threads)
def test_incomplete_iteration_without_close(self):
active_threads = threading.active_count()
data = '<a x="y"><b>1</b><b>2</b><b>3</b></a>'
with parse(data, item_depth=2) as gen:
next(gen)
self.assertEqual(threading.active_count(), active_threads)
|
mit
|
embeddedarm/android_external_chromium_org
|
tools/telemetry/telemetry/value/value_backcompat.py
|
36
|
1984
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Backward compatibility for old results API.
This module helps convert the old PageMeasurementResults API into the new
style one. This exists as a bridging solution so we can change the underlying
implementation and update the PageMeasurementResults API once we know the
underlying implementation is solid.
"""
from telemetry import value as value_module
from telemetry.value import histogram
from telemetry.value import list_of_scalar_values
from telemetry.value import scalar
def ConvertOldCallingConventionToValue(page, trace_name, units,
value, chart_name, data_type):
value_name = value_module.ValueNameFromTraceAndChartName(
trace_name, chart_name)
if data_type == 'default':
if isinstance(value, list):
return list_of_scalar_values.ListOfScalarValues(
page, value_name, units, value, important=True)
else:
return scalar.ScalarValue(page, value_name, units,
value, important=True)
elif data_type == 'unimportant':
if isinstance(value, list):
return list_of_scalar_values.ListOfScalarValues(
page, value_name, units, value, important=False)
else:
return scalar.ScalarValue(page, value_name, units,
value, important=False)
elif data_type == 'histogram':
assert isinstance(value, basestring)
return histogram.HistogramValue(
page, value_name, units, raw_value_json=value, important=True)
elif data_type == 'unimportant-histogram':
assert isinstance(value, basestring)
return histogram.HistogramValue(
page, value_name, units, raw_value_json=value, important=False)
elif data_type == 'informational':
raise NotImplementedError()
else:
raise ValueError('Unrecognized data type %s', data_type)
|
bsd-3-clause
|
wskplho/sl4a
|
python/gdata/src/gdata/urlfetch.py
|
276
|
9330
|
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides HTTP functions for gdata.service to use on Google App Engine
AppEngineHttpClient: Provides an HTTP request method which uses App Engine's
urlfetch API. Set the http_client member of a GDataService object to an
instance of an AppEngineHttpClient to allow the gdata library to run on
Google App Engine.
run_on_appengine: Function which will modify an existing GDataService object
to allow it to run on App Engine. It works by creating a new instance of
the AppEngineHttpClient and replacing the GDataService object's
http_client.
HttpRequest: Function that wraps google.appengine.api.urlfetch.Fetch in a
common interface which is used by gdata.service.GDataService. In other
words, this module can be used as the gdata service request handler so
that all HTTP requests will be performed by the hosting Google App Engine
server.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import StringIO
import atom.service
import atom.http_interface
from google.appengine.api import urlfetch
def run_on_appengine(gdata_service):
"""Modifies a GDataService object to allow it to run on App Engine.
Args:
gdata_service: An instance of AtomService, GDataService, or any
of their subclasses which has an http_client member.
"""
gdata_service.http_client = AppEngineHttpClient()
class AppEngineHttpClient(atom.http_interface.GenericHttpClient):
def __init__(self, headers=None):
self.debug = False
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and
DELETE.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.http
client = atom.http.HttpClient()
http_response = client.request('GET', 'http://www.google.com/')
Args:
operation: str The HTTP operation to be performed. This is usually one
of 'GET', 'POST', 'PUT', or 'DELETE'
data: filestream, list of parts, or other object which can be converted
to a string. Should be set to None when performing a GET or DELETE.
If data is a file-like object which can be read, this method will
read a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be
evaluated and sent.
url: The full URL to which the request should be sent. Can be a string
or atom.url.Url.
headers: dict of strings. HTTP headers which should be sent
in the request.
"""
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
# Construct the full payload.
# Assume that data is None or a string.
data_str = data
if data:
if isinstance(data, list):
# If data is a list of different objects, convert them all to strings
# and join them together.
converted_parts = [__ConvertDataPart(x) for x in data]
data_str = ''.join(converted_parts)
else:
data_str = __ConvertDataPart(data)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
all_headers['Content-Length'] = len(data_str)
# Set the content type to the default value if none was set.
if 'Content-Type' not in all_headers:
all_headers['Content-Type'] = 'application/atom+xml'
# Lookup the urlfetch operation which corresponds to the desired HTTP verb.
if operation == 'GET':
method = urlfetch.GET
elif operation == 'POST':
method = urlfetch.POST
elif operation == 'PUT':
method = urlfetch.PUT
elif operation == 'DELETE':
method = urlfetch.DELETE
else:
method = None
return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str,
method=method, headers=all_headers))
def HttpRequest(service, operation, data, uri, extra_headers=None,
url_params=None, escape_params=True, content_type='application/atom+xml'):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and DELETE.
This function is deprecated, use AppEngineHttpClient.request instead.
To use this module with gdata.service, you can set this module to be the
http_request_handler so that HTTP requests use Google App Engine's urlfetch.
import gdata.service
import gdata.urlfetch
gdata.service.http_request_handler = gdata.urlfetch
Args:
service: atom.AtomService object which contains some of the parameters
needed to make the request. The following members are used to
construct the HTTP call: server (str), additional_headers (dict),
port (int), and ssl (bool).
operation: str The HTTP operation to be performed. This is usually one of
'GET', 'POST', 'PUT', or 'DELETE'
data: filestream, list of parts, or other object which can be
converted to a string.
Should be set to None when performing a GET or PUT.
If data is a file-like object which can be read, this method will read
a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be evaluated
and sent.
uri: The beginning of the URL to which the request should be sent.
Examples: '/', '/base/feeds/snippets',
'/m8/feeds/contacts/default/base'
extra_headers: dict of strings. HTTP headers which should be sent
in the request. These headers are in addition to those stored in
service.additional_headers.
url_params: dict of strings. Key value pairs to be added to the URL as
URL parameters. For example {'foo':'bar', 'test':'param'} will
become ?foo=bar&test=param.
escape_params: bool default True. If true, the keys and values in
url_params will be URL escaped when the form is constructed
(Special characters converted to %XX form.)
content_type: str The MIME type for the data being sent. Defaults to
'application/atom+xml', this is only used if data is set.
"""
full_uri = atom.service.BuildUri(uri, url_params, escape_params)
(server, port, ssl, partial_uri) = atom.service.ProcessUrl(service, full_uri)
# Construct the full URL for the request.
if ssl:
full_url = 'https://%s%s' % (server, partial_uri)
else:
full_url = 'http://%s%s' % (server, partial_uri)
# Construct the full payload.
# Assume that data is None or a string.
data_str = data
if data:
if isinstance(data, list):
# If data is a list of different objects, convert them all to strings
# and join them together.
converted_parts = [__ConvertDataPart(x) for x in data]
data_str = ''.join(converted_parts)
else:
data_str = __ConvertDataPart(data)
# Construct the dictionary of HTTP headers.
headers = {}
if isinstance(service.additional_headers, dict):
headers = service.additional_headers.copy()
if isinstance(extra_headers, dict):
for header, value in extra_headers.iteritems():
headers[header] = value
# Add the content type header (we don't need to calculate content length,
# since urlfetch.Fetch will calculate for us).
if content_type:
headers['Content-Type'] = content_type
# Lookup the urlfetch operation which corresponds to the desired HTTP verb.
if operation == 'GET':
method = urlfetch.GET
elif operation == 'POST':
method = urlfetch.POST
elif operation == 'PUT':
method = urlfetch.PUT
elif operation == 'DELETE':
method = urlfetch.DELETE
else:
method = None
return HttpResponse(urlfetch.Fetch(url=full_url, payload=data_str,
method=method, headers=headers))
def __ConvertDataPart(data):
if not data or isinstance(data, str):
return data
elif hasattr(data, 'read'):
# data is a file like object, so read it completely.
return data.read()
# The data object was not a file.
# Try to convert to a string and send the data.
return str(data)
class HttpResponse(object):
"""Translates a urlfetch resoinse to look like an hhtplib resoinse.
Used to allow the resoinse from HttpRequest to be usable by gdata.service
methods.
"""
def __init__(self, urlfetch_response):
self.body = StringIO.StringIO(urlfetch_response.content)
self.headers = urlfetch_response.headers
self.status = urlfetch_response.status_code
self.reason = ''
def read(self, length=None):
if not length:
return self.body.read()
else:
return self.body.read(length)
def getheader(self, name):
if not self.headers.has_key(name):
return self.headers[name.lower()]
return self.headers[name]
|
apache-2.0
|
slightstone/SickRage
|
lib/unidecode/x0c4.py
|
253
|
5024
|
data = (
'sswals', # 0x00
'sswalt', # 0x01
'sswalp', # 0x02
'sswalh', # 0x03
'sswam', # 0x04
'sswab', # 0x05
'sswabs', # 0x06
'sswas', # 0x07
'sswass', # 0x08
'sswang', # 0x09
'sswaj', # 0x0a
'sswac', # 0x0b
'sswak', # 0x0c
'sswat', # 0x0d
'sswap', # 0x0e
'sswah', # 0x0f
'sswae', # 0x10
'sswaeg', # 0x11
'sswaegg', # 0x12
'sswaegs', # 0x13
'sswaen', # 0x14
'sswaenj', # 0x15
'sswaenh', # 0x16
'sswaed', # 0x17
'sswael', # 0x18
'sswaelg', # 0x19
'sswaelm', # 0x1a
'sswaelb', # 0x1b
'sswaels', # 0x1c
'sswaelt', # 0x1d
'sswaelp', # 0x1e
'sswaelh', # 0x1f
'sswaem', # 0x20
'sswaeb', # 0x21
'sswaebs', # 0x22
'sswaes', # 0x23
'sswaess', # 0x24
'sswaeng', # 0x25
'sswaej', # 0x26
'sswaec', # 0x27
'sswaek', # 0x28
'sswaet', # 0x29
'sswaep', # 0x2a
'sswaeh', # 0x2b
'ssoe', # 0x2c
'ssoeg', # 0x2d
'ssoegg', # 0x2e
'ssoegs', # 0x2f
'ssoen', # 0x30
'ssoenj', # 0x31
'ssoenh', # 0x32
'ssoed', # 0x33
'ssoel', # 0x34
'ssoelg', # 0x35
'ssoelm', # 0x36
'ssoelb', # 0x37
'ssoels', # 0x38
'ssoelt', # 0x39
'ssoelp', # 0x3a
'ssoelh', # 0x3b
'ssoem', # 0x3c
'ssoeb', # 0x3d
'ssoebs', # 0x3e
'ssoes', # 0x3f
'ssoess', # 0x40
'ssoeng', # 0x41
'ssoej', # 0x42
'ssoec', # 0x43
'ssoek', # 0x44
'ssoet', # 0x45
'ssoep', # 0x46
'ssoeh', # 0x47
'ssyo', # 0x48
'ssyog', # 0x49
'ssyogg', # 0x4a
'ssyogs', # 0x4b
'ssyon', # 0x4c
'ssyonj', # 0x4d
'ssyonh', # 0x4e
'ssyod', # 0x4f
'ssyol', # 0x50
'ssyolg', # 0x51
'ssyolm', # 0x52
'ssyolb', # 0x53
'ssyols', # 0x54
'ssyolt', # 0x55
'ssyolp', # 0x56
'ssyolh', # 0x57
'ssyom', # 0x58
'ssyob', # 0x59
'ssyobs', # 0x5a
'ssyos', # 0x5b
'ssyoss', # 0x5c
'ssyong', # 0x5d
'ssyoj', # 0x5e
'ssyoc', # 0x5f
'ssyok', # 0x60
'ssyot', # 0x61
'ssyop', # 0x62
'ssyoh', # 0x63
'ssu', # 0x64
'ssug', # 0x65
'ssugg', # 0x66
'ssugs', # 0x67
'ssun', # 0x68
'ssunj', # 0x69
'ssunh', # 0x6a
'ssud', # 0x6b
'ssul', # 0x6c
'ssulg', # 0x6d
'ssulm', # 0x6e
'ssulb', # 0x6f
'ssuls', # 0x70
'ssult', # 0x71
'ssulp', # 0x72
'ssulh', # 0x73
'ssum', # 0x74
'ssub', # 0x75
'ssubs', # 0x76
'ssus', # 0x77
'ssuss', # 0x78
'ssung', # 0x79
'ssuj', # 0x7a
'ssuc', # 0x7b
'ssuk', # 0x7c
'ssut', # 0x7d
'ssup', # 0x7e
'ssuh', # 0x7f
'ssweo', # 0x80
'ssweog', # 0x81
'ssweogg', # 0x82
'ssweogs', # 0x83
'ssweon', # 0x84
'ssweonj', # 0x85
'ssweonh', # 0x86
'ssweod', # 0x87
'ssweol', # 0x88
'ssweolg', # 0x89
'ssweolm', # 0x8a
'ssweolb', # 0x8b
'ssweols', # 0x8c
'ssweolt', # 0x8d
'ssweolp', # 0x8e
'ssweolh', # 0x8f
'ssweom', # 0x90
'ssweob', # 0x91
'ssweobs', # 0x92
'ssweos', # 0x93
'ssweoss', # 0x94
'ssweong', # 0x95
'ssweoj', # 0x96
'ssweoc', # 0x97
'ssweok', # 0x98
'ssweot', # 0x99
'ssweop', # 0x9a
'ssweoh', # 0x9b
'sswe', # 0x9c
'ssweg', # 0x9d
'sswegg', # 0x9e
'sswegs', # 0x9f
'sswen', # 0xa0
'sswenj', # 0xa1
'sswenh', # 0xa2
'sswed', # 0xa3
'sswel', # 0xa4
'sswelg', # 0xa5
'sswelm', # 0xa6
'sswelb', # 0xa7
'sswels', # 0xa8
'sswelt', # 0xa9
'sswelp', # 0xaa
'sswelh', # 0xab
'sswem', # 0xac
'ssweb', # 0xad
'sswebs', # 0xae
'sswes', # 0xaf
'sswess', # 0xb0
'ssweng', # 0xb1
'sswej', # 0xb2
'sswec', # 0xb3
'sswek', # 0xb4
'sswet', # 0xb5
'sswep', # 0xb6
'ssweh', # 0xb7
'sswi', # 0xb8
'sswig', # 0xb9
'sswigg', # 0xba
'sswigs', # 0xbb
'sswin', # 0xbc
'sswinj', # 0xbd
'sswinh', # 0xbe
'sswid', # 0xbf
'sswil', # 0xc0
'sswilg', # 0xc1
'sswilm', # 0xc2
'sswilb', # 0xc3
'sswils', # 0xc4
'sswilt', # 0xc5
'sswilp', # 0xc6
'sswilh', # 0xc7
'sswim', # 0xc8
'sswib', # 0xc9
'sswibs', # 0xca
'sswis', # 0xcb
'sswiss', # 0xcc
'sswing', # 0xcd
'sswij', # 0xce
'sswic', # 0xcf
'sswik', # 0xd0
'sswit', # 0xd1
'sswip', # 0xd2
'sswih', # 0xd3
'ssyu', # 0xd4
'ssyug', # 0xd5
'ssyugg', # 0xd6
'ssyugs', # 0xd7
'ssyun', # 0xd8
'ssyunj', # 0xd9
'ssyunh', # 0xda
'ssyud', # 0xdb
'ssyul', # 0xdc
'ssyulg', # 0xdd
'ssyulm', # 0xde
'ssyulb', # 0xdf
'ssyuls', # 0xe0
'ssyult', # 0xe1
'ssyulp', # 0xe2
'ssyulh', # 0xe3
'ssyum', # 0xe4
'ssyub', # 0xe5
'ssyubs', # 0xe6
'ssyus', # 0xe7
'ssyuss', # 0xe8
'ssyung', # 0xe9
'ssyuj', # 0xea
'ssyuc', # 0xeb
'ssyuk', # 0xec
'ssyut', # 0xed
'ssyup', # 0xee
'ssyuh', # 0xef
'sseu', # 0xf0
'sseug', # 0xf1
'sseugg', # 0xf2
'sseugs', # 0xf3
'sseun', # 0xf4
'sseunj', # 0xf5
'sseunh', # 0xf6
'sseud', # 0xf7
'sseul', # 0xf8
'sseulg', # 0xf9
'sseulm', # 0xfa
'sseulb', # 0xfb
'sseuls', # 0xfc
'sseult', # 0xfd
'sseulp', # 0xfe
'sseulh', # 0xff
)
|
gpl-3.0
|
frostynova/calico-docker
|
calico_containers/adapter/docker_restart.py
|
1
|
5239
|
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import sh
import fileinput
import sys
"""
Update docker to use a different unix socket, so powerstrip can run
its proxy on the "normal" one. This provides simple access for
existing tools to the powerstrip proxy.
Set the docker daemon to listen on the docker.real.sock by updating
the config, clearing old sockets and restarting.
Currently "support" upstart (Debian/Ubuntu) and systemd (Redhat/Centos) but
the logic for detecting and modifying config is brittle and lightly tested.
Use with caution...
"""
REAL_SOCK = "/var/run/docker.real.sock"
POWERSTRIP_SOCK = "/var/run/docker.sock"
def _replace_all(filename, search, replace):
for line in fileinput.input(filename, inplace=1):
if search in line:
line = line.replace(search, replace)
sys.stdout.write(line)
def create_restarter():
"""
Detect what init system is being used and return the appropriate handler.
:return: A "restarter" object.
"""
if os.path.exists(SystemdRestarter.DOCKER_SYSTEMD_SERVICE):
return SystemdRestarter()
elif os.path.exists(UpstartRestarter.DOCKER_DEFAULT_FILENAME):
return UpstartRestarter()
else:
return NullRestarter()
class NullRestarter():
def is_using_alternative_socket(self):
return False
def restart_docker_with_alternative_unix_socket(self):
print "Unsupported"
def restart_docker_without_alternative_unix_socket(self):
print "Unsupported"
def _clean_socks():
if os.path.exists(REAL_SOCK):
os.remove(REAL_SOCK)
if os.path.exists(POWERSTRIP_SOCK):
os.remove(POWERSTRIP_SOCK)
class SystemdRestarter():
DOCKER_SYSTEMD_SERVICE = "/usr/lib/systemd/system/docker.service"
SYSTEMD_DEFAULT = "ExecStart=/usr/bin/docker -d $OPTIONS \\"
SYSTEMD_MODIFIED = "ExecStart=/usr/bin/docker -d $OPTIONS " \
"-H unix://%s \\" % REAL_SOCK
def _clean_restart_docker(self, sock_to_wait_on):
_clean_socks()
systemctl = sh.Command._create("systemctl")
systemctl("daemon-reload")
systemctl("restart", "docker.service")
# Wait for docker to create the socket
while not os.path.exists(sock_to_wait_on):
time.sleep(0.1)
def is_using_alternative_socket(self):
if self.SYSTEMD_MODIFIED in open(self.DOCKER_SYSTEMD_SERVICE).read():
return True
def restart_docker_with_alternative_unix_socket(self):
if not self.is_using_alternative_socket():
_replace_all(self.DOCKER_SYSTEMD_SERVICE,
self.SYSTEMD_DEFAULT,
self.SYSTEMD_MODIFIED)
self._clean_restart_docker(REAL_SOCK)
# Always remove the socket that powerstrip will use, as it gets upset
# otherwise.
if os.path.exists(POWERSTRIP_SOCK):
os.remove(POWERSTRIP_SOCK)
def restart_docker_without_alternative_unix_socket(self):
if self.is_using_alternative_socket():
_replace_all(self.DOCKER_SYSTEMD_SERVICE,
self.SYSTEMD_MODIFIED,
self.SYSTEMD_DEFAULT)
self._clean_restart_docker(POWERSTRIP_SOCK)
class UpstartRestarter():
DOCKER_DEFAULT_FILENAME = "/etc/default/docker"
DOCKER_OPTIONS = 'DOCKER_OPTS="-H unix://%s"' % REAL_SOCK
def _clean_restart_docker(self, sock_to_wait_on):
_clean_socks()
restart = sh.Command._create("restart")
restart("docker")
# Wait for docker to create the socket
while not os.path.exists(sock_to_wait_on):
time.sleep(0.1)
def is_using_alternative_socket(self):
if self.DOCKER_OPTIONS in open(self.DOCKER_DEFAULT_FILENAME).read():
return True
def restart_docker_with_alternative_unix_socket(self):
if not self.is_using_alternative_socket():
with open(self.DOCKER_DEFAULT_FILENAME, "a") as docker_config:
docker_config.write(self.DOCKER_OPTIONS)
self._clean_restart_docker(REAL_SOCK)
# Always remove the socket that powerstrip will use, as it gets upset
# otherwise.
if os.path.exists(POWERSTRIP_SOCK):
os.remove(POWERSTRIP_SOCK)
def restart_docker_without_alternative_unix_socket(self):
if self.is_using_alternative_socket():
good_lines = [line for line in open(
self.DOCKER_DEFAULT_FILENAME)
if self.DOCKER_OPTIONS not in line]
open(self.DOCKER_DEFAULT_FILENAME, 'w').writelines(good_lines)
self._clean_restart_docker(POWERSTRIP_SOCK)
|
apache-2.0
|
DanteOnline/free-art
|
venv/lib/python3.4/site-packages/django/contrib/postgres/aggregates/statistics.py
|
493
|
2033
|
from django.db.models import FloatField, IntegerField
from django.db.models.aggregates import Aggregate
__all__ = [
'CovarPop', 'Corr', 'RegrAvgX', 'RegrAvgY', 'RegrCount', 'RegrIntercept',
'RegrR2', 'RegrSlope', 'RegrSXX', 'RegrSXY', 'RegrSYY', 'StatAggregate',
]
class StatAggregate(Aggregate):
def __init__(self, y, x, output_field=FloatField()):
if not x or not y:
raise ValueError('Both y and x must be provided.')
super(StatAggregate, self).__init__(y=y, x=x, output_field=output_field)
self.x = x
self.y = y
self.source_expressions = self._parse_expressions(self.y, self.x)
def get_source_expressions(self):
return self.y, self.x
def set_source_expressions(self, exprs):
self.y, self.x = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
return super(Aggregate, self).resolve_expression(query, allow_joins, reuse, summarize)
class Corr(StatAggregate):
function = 'CORR'
class CovarPop(StatAggregate):
def __init__(self, y, x, sample=False):
self.function = 'COVAR_SAMP' if sample else 'COVAR_POP'
super(CovarPop, self).__init__(y, x)
class RegrAvgX(StatAggregate):
function = 'REGR_AVGX'
class RegrAvgY(StatAggregate):
function = 'REGR_AVGY'
class RegrCount(StatAggregate):
function = 'REGR_COUNT'
def __init__(self, y, x):
super(RegrCount, self).__init__(y=y, x=x, output_field=IntegerField())
def convert_value(self, value, expression, connection, context):
if value is None:
return 0
return int(value)
class RegrIntercept(StatAggregate):
function = 'REGR_INTERCEPT'
class RegrR2(StatAggregate):
function = 'REGR_R2'
class RegrSlope(StatAggregate):
function = 'REGR_SLOPE'
class RegrSXX(StatAggregate):
function = 'REGR_SXX'
class RegrSXY(StatAggregate):
function = 'REGR_SXY'
class RegrSYY(StatAggregate):
function = 'REGR_SYY'
|
gpl-3.0
|
slyphon/pants
|
tests/python/pants_test/base/test_build_environment.py
|
33
|
1581
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import unittest
from pants.base.build_environment import get_pants_cachedir, get_pants_configdir
from pants.util.contextutil import environment_as
from pants.util.fileutil import temporary_file
class TestBuildEnvironment(unittest.TestCase):
"""Test class for pants.base.build_environment."""
def test_get_configdir(self):
with environment_as(XDG_CONFIG_HOME=''):
self.assertEquals(os.path.expanduser('~/.config/pants'), get_pants_configdir())
def test_get_cachedir(self):
with environment_as(XDG_CACHE_HOME=''):
self.assertEquals(os.path.expanduser('~/.cache/pants'), get_pants_cachedir())
def test_set_configdir(self):
with temporary_file() as temp:
with environment_as(XDG_CONFIG_HOME=temp.name):
self.assertEquals(os.path.join(temp.name, 'pants'), get_pants_configdir())
def test_set_cachedir(self):
with temporary_file() as temp:
with environment_as(XDG_CACHE_HOME=temp.name):
self.assertEquals(os.path.join(temp.name, 'pants'), get_pants_cachedir())
def test_expand_home_configdir(self):
with environment_as(XDG_CONFIG_HOME='~/somewhere/in/home'):
self.assertEquals(os.path.expanduser(os.path.join('~/somewhere/in/home', 'pants')),
get_pants_configdir())
|
apache-2.0
|
jlcarmic/producthunt_simulator
|
venv/lib/python2.7/site-packages/pip/locations.py
|
173
|
5632
|
"""Locations where we look for configs, install stuff, etc"""
from __future__ import absolute_import
import os
import os.path
import site
import sys
from distutils import sysconfig
from distutils.command.install import install, SCHEME_KEYS # noqa
from pip.compat import WINDOWS, expanduser
from pip.utils import appdirs
# Application Directories
USER_CACHE_DIR = appdirs.user_cache_dir("pip")
DELETE_MARKER_MESSAGE = '''\
This file is placed here by pip to indicate the source was put
here by pip.
Once this package is successfully installed this source code will be
deleted (unless you remove this file).
'''
PIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt'
def write_delete_marker_file(directory):
"""
Write the pip delete marker file into this directory.
"""
filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME)
with open(filepath, 'w') as marker_fp:
marker_fp.write(DELETE_MARKER_MESSAGE)
def running_under_virtualenv():
"""
Return True if we're running inside a virtualenv, False otherwise.
"""
if hasattr(sys, 'real_prefix'):
return True
elif sys.prefix != getattr(sys, "base_prefix", sys.prefix):
return True
return False
def virtualenv_no_global():
"""
Return True if in a venv and no system site packages.
"""
# this mirrors the logic in virtualenv.py for locating the
# no-global-site-packages.txt file
site_mod_dir = os.path.dirname(os.path.abspath(site.__file__))
no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt')
if running_under_virtualenv() and os.path.isfile(no_global_file):
return True
if running_under_virtualenv():
src_prefix = os.path.join(sys.prefix, 'src')
else:
# FIXME: keep src in cwd for now (it is not a temporary folder)
try:
src_prefix = os.path.join(os.getcwd(), 'src')
except OSError:
# In case the current working directory has been renamed or deleted
sys.exit(
"The folder you are executing pip from can no longer be found."
)
# under Mac OS X + virtualenv sys.prefix is not properly resolved
# it is something like /path/to/python/bin/..
# Note: using realpath due to tmp dirs on OSX being symlinks
src_prefix = os.path.abspath(src_prefix)
# FIXME doesn't account for venv linked to global site-packages
site_packages = sysconfig.get_python_lib()
user_site = site.USER_SITE
user_dir = expanduser('~')
if WINDOWS:
bin_py = os.path.join(sys.prefix, 'Scripts')
bin_user = os.path.join(user_site, 'Scripts')
# buildout uses 'bin' on Windows too?
if not os.path.exists(bin_py):
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin')
config_basename = 'pip.ini'
legacy_storage_dir = os.path.join(user_dir, 'pip')
legacy_config_file = os.path.join(
legacy_storage_dir,
config_basename,
)
else:
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin')
config_basename = 'pip.conf'
legacy_storage_dir = os.path.join(user_dir, '.pip')
legacy_config_file = os.path.join(
legacy_storage_dir,
config_basename,
)
# Forcing to use /usr/local/bin for standard Mac OS X framework installs
# Also log to ~/Library/Logs/ for use with the Console.app log viewer
if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/':
bin_py = '/usr/local/bin'
site_config_files = [
os.path.join(path, config_basename)
for path in appdirs.site_config_dirs('pip')
]
def distutils_scheme(dist_name, user=False, home=None, root=None,
isolated=False, prefix=None):
"""
Return a distutils install scheme
"""
from distutils.dist import Distribution
scheme = {}
if isolated:
extra_dist_args = {"script_args": ["--no-user-cfg"]}
else:
extra_dist_args = {}
dist_args = {'name': dist_name}
dist_args.update(extra_dist_args)
d = Distribution(dist_args)
d.parse_config_files()
i = d.get_command_obj('install', create=True)
# NOTE: setting user or home has the side-effect of creating the home dir
# or user base for installations during finalize_options()
# ideally, we'd prefer a scheme class that has no side-effects.
assert not (user and prefix), "user={0} prefix={1}".format(user, prefix)
i.user = user or i.user
if user:
i.prefix = ""
i.prefix = prefix or i.prefix
i.home = home or i.home
i.root = root or i.root
i.finalize_options()
for key in SCHEME_KEYS:
scheme[key] = getattr(i, 'install_' + key)
# install_lib specified in setup.cfg should install *everything*
# into there (i.e. it takes precedence over both purelib and
# platlib). Note, i.install_lib is *always* set after
# finalize_options(); we only want to override here if the user
# has explicitly requested it hence going back to the config
if 'install_lib' in d.get_option_dict('install'):
scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib))
if running_under_virtualenv():
scheme['headers'] = os.path.join(
sys.prefix,
'include',
'site',
'python' + sys.version[:3],
dist_name,
)
if root is not None:
path_no_drive = os.path.splitdrive(
os.path.abspath(scheme["headers"]))[1]
scheme["headers"] = os.path.join(
root,
path_no_drive[1:],
)
return scheme
|
mit
|
angdraug/nova
|
nova/api/ec2/cloud.py
|
4
|
86495
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cloud Controller: Implementation of EC2 REST API calls, which are
dispatched to other nodes via AMQP RPC. State is via distributed
datastore.
"""
import base64
import time
from oslo.config import cfg
from oslo.utils import timeutils
from nova.api.ec2 import ec2utils
from nova.api.ec2 import inst_state
from nova.api.metadata import password
from nova.api.openstack import extensions
from nova.api import validator
from nova import availability_zones
from nova import block_device
from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import api as compute_api
from nova.compute import vm_states
from nova import exception
from nova.i18n import _
from nova.i18n import _LW
from nova.image import s3
from nova import network
from nova.network.security_group import neutron_driver
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import log as logging
from nova import quota
from nova import servicegroup
from nova import utils
from nova import volume
ec2_opts = [
cfg.StrOpt('ec2_host',
default='$my_ip',
help='The IP address of the EC2 API server'),
cfg.StrOpt('ec2_dmz_host',
default='$my_ip',
help='The internal IP address of the EC2 API server'),
cfg.IntOpt('ec2_port',
default=8773,
help='The port of the EC2 API server'),
cfg.StrOpt('ec2_scheme',
default='http',
help='The protocol to use when connecting to the EC2 API '
'server (http, https)'),
cfg.StrOpt('ec2_path',
default='/services/Cloud',
help='The path prefix used to call the ec2 API server'),
cfg.ListOpt('region_list',
default=[],
help='List of region=fqdn pairs separated by commas'),
]
CONF = cfg.CONF
CONF.register_opts(ec2_opts)
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('vpn_key_suffix', 'nova.cloudpipe.pipelib')
CONF.import_opt('internal_service_availability_zone',
'nova.availability_zones')
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
# EC2 ID can return the following error codes:
# http://docs.aws.amazon.com/AWSEC2/latest/APIReference/api-error-codes.html
# Validate methods are split to return valid EC2 error codes for different
# resource types
def _validate_ec2_id(val):
if not validator.validate_str()(val):
raise exception.InvalidEc2Id(ec2_id=val)
ec2utils.ec2_id_to_id(val)
def validate_volume_id(volume_id):
try:
_validate_ec2_id(volume_id)
except exception.InvalidEc2Id:
raise exception.InvalidVolumeIDMalformed(volume_id=volume_id)
def validate_instance_id(instance_id):
try:
_validate_ec2_id(instance_id)
except exception.InvalidEc2Id:
raise exception.InvalidInstanceIDMalformed(instance_id=instance_id)
# EC2 API can return the following values as documented in the EC2 API
# http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/
# ApiReference-ItemType-InstanceStateType.html
# pending 0 | running 16 | shutting-down 32 | terminated 48 | stopping 64 |
# stopped 80
_STATE_DESCRIPTION_MAP = {
None: inst_state.PENDING,
vm_states.ACTIVE: inst_state.RUNNING,
vm_states.BUILDING: inst_state.PENDING,
vm_states.DELETED: inst_state.TERMINATED,
vm_states.SOFT_DELETED: inst_state.TERMINATED,
vm_states.STOPPED: inst_state.STOPPED,
vm_states.PAUSED: inst_state.PAUSE,
vm_states.SUSPENDED: inst_state.SUSPEND,
vm_states.RESCUED: inst_state.RESCUE,
vm_states.RESIZED: inst_state.RESIZE,
}
def _state_description(vm_state, _shutdown_terminate):
"""Map the vm state to the server status string."""
# Note(maoy): We do not provide EC2 compatibility
# in shutdown_terminate flag behavior. So we ignore
# it here.
name = _STATE_DESCRIPTION_MAP.get(vm_state, vm_state)
return {'code': inst_state.name_to_code(name),
'name': name}
def _parse_block_device_mapping(bdm):
"""Parse BlockDeviceMappingItemType into flat hash
BlockDevicedMapping.<N>.DeviceName
BlockDevicedMapping.<N>.Ebs.SnapshotId
BlockDevicedMapping.<N>.Ebs.VolumeSize
BlockDevicedMapping.<N>.Ebs.DeleteOnTermination
BlockDevicedMapping.<N>.Ebs.NoDevice
BlockDevicedMapping.<N>.VirtualName
=> remove .Ebs and allow volume id in SnapshotId
"""
ebs = bdm.pop('ebs', None)
if ebs:
ec2_id = ebs.pop('snapshot_id', None)
if ec2_id:
if ec2_id.startswith('snap-'):
bdm['snapshot_id'] = ec2utils.ec2_snap_id_to_uuid(ec2_id)
elif ec2_id.startswith('vol-'):
bdm['volume_id'] = ec2utils.ec2_vol_id_to_uuid(ec2_id)
ebs.setdefault('delete_on_termination', True)
bdm.update(ebs)
return bdm
def _properties_get_mappings(properties):
return block_device.mappings_prepend_dev(properties.get('mappings', []))
def _format_block_device_mapping(bdm):
"""Construct BlockDeviceMappingItemType
{'device_name': '...', 'snapshot_id': , ...}
=> BlockDeviceMappingItemType
"""
keys = (('deviceName', 'device_name'),
('virtualName', 'virtual_name'))
item = {}
for name, k in keys:
if k in bdm:
item[name] = bdm[k]
if bdm.get('no_device'):
item['noDevice'] = True
if ('snapshot_id' in bdm) or ('volume_id' in bdm):
ebs_keys = (('snapshotId', 'snapshot_id'),
('snapshotId', 'volume_id'), # snapshotId is abused
('volumeSize', 'volume_size'),
('deleteOnTermination', 'delete_on_termination'))
ebs = {}
for name, k in ebs_keys:
if bdm.get(k) is not None:
if k == 'snapshot_id':
ebs[name] = ec2utils.id_to_ec2_snap_id(bdm[k])
elif k == 'volume_id':
ebs[name] = ec2utils.id_to_ec2_vol_id(bdm[k])
else:
ebs[name] = bdm[k]
assert 'snapshotId' in ebs
item['ebs'] = ebs
return item
def _format_mappings(properties, result):
"""Format multiple BlockDeviceMappingItemType."""
mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']}
for m in _properties_get_mappings(properties)
if block_device.is_swap_or_ephemeral(m['virtual'])]
block_device_mapping = [_format_block_device_mapping(bdm) for bdm in
properties.get('block_device_mapping', [])]
# NOTE(yamahata): overwrite mappings with block_device_mapping
for bdm in block_device_mapping:
for i in range(len(mappings)):
if bdm.get('deviceName') == mappings[i].get('deviceName'):
del mappings[i]
break
mappings.append(bdm)
# NOTE(yamahata): trim ebs.no_device == true. Is this necessary?
mappings = [bdm for bdm in mappings if not (bdm.get('noDevice', False))]
if mappings:
result['blockDeviceMapping'] = mappings
class CloudController(object):
"""CloudController provides the critical dispatch between
inbound API calls through the endpoint and messages
sent to the other nodes.
"""
def __init__(self):
self.image_service = s3.S3ImageService()
self.network_api = network.API()
self.volume_api = volume.API()
self.security_group_api = get_cloud_security_group_api()
self.compute_api = compute.API(network_api=self.network_api,
volume_api=self.volume_api,
security_group_api=self.security_group_api)
self.keypair_api = compute_api.KeypairAPI()
self.servicegroup_api = servicegroup.API()
def __str__(self):
return 'CloudController'
def _enforce_valid_instance_ids(self, context, instance_ids):
# NOTE(mikal): Amazon's implementation of the EC2 API requires that
# _all_ instance ids passed in be valid.
instances = {}
if instance_ids:
for ec2_id in instance_ids:
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
instances[ec2_id] = instance
return instances
def _get_image_state(self, image):
# NOTE(vish): fallback status if image_state isn't set
state = image.get('status')
if state == 'active':
state = 'available'
return image['properties'].get('image_state', state)
def describe_availability_zones(self, context, **kwargs):
if ('zone_name' in kwargs and
'verbose' in kwargs['zone_name'] and
context.is_admin):
return self._describe_availability_zones_verbose(context,
**kwargs)
else:
return self._describe_availability_zones(context, **kwargs)
def _describe_availability_zones(self, context, **kwargs):
ctxt = context.elevated()
available_zones, not_available_zones = \
availability_zones.get_availability_zones(ctxt)
result = []
for zone in available_zones:
# Hide internal_service_availability_zone
if zone == CONF.internal_service_availability_zone:
continue
result.append({'zoneName': zone,
'zoneState': "available"})
for zone in not_available_zones:
result.append({'zoneName': zone,
'zoneState': "not available"})
return {'availabilityZoneInfo': result}
def _describe_availability_zones_verbose(self, context, **kwargs):
ctxt = context.elevated()
available_zones, not_available_zones = \
availability_zones.get_availability_zones(ctxt)
# Available services
enabled_services = objects.ServiceList.get_all(context,
disabled=False, set_zones=True)
zone_hosts = {}
host_services = {}
for service in enabled_services:
zone_hosts.setdefault(service.availability_zone, [])
if service.host not in zone_hosts[service.availability_zone]:
zone_hosts[service.availability_zone].append(
service.host)
host_services.setdefault(service.availability_zone +
service.host, [])
host_services[service.availability_zone + service.host].\
append(service)
result = []
for zone in available_zones:
result.append({'zoneName': zone,
'zoneState': "available"})
for host in zone_hosts[zone]:
result.append({'zoneName': '|- %s' % host,
'zoneState': ''})
for service in host_services[zone + host]:
alive = self.servicegroup_api.service_is_up(service)
art = (alive and ":-)") or "XXX"
active = 'enabled'
if service.disabled:
active = 'disabled'
result.append({'zoneName': '| |- %s' % service.binary,
'zoneState': ('%s %s %s'
% (active, art,
service.updated_at))})
for zone in not_available_zones:
result.append({'zoneName': zone,
'zoneState': "not available"})
return {'availabilityZoneInfo': result}
def describe_regions(self, context, region_name=None, **kwargs):
if CONF.region_list:
regions = []
for region in CONF.region_list:
name, _sep, host = region.partition('=')
endpoint = '%s://%s:%s%s' % (CONF.ec2_scheme,
host,
CONF.ec2_port,
CONF.ec2_path)
regions.append({'regionName': name,
'regionEndpoint': endpoint})
else:
regions = [{'regionName': 'nova',
'regionEndpoint': '%s://%s:%s%s' % (CONF.ec2_scheme,
CONF.ec2_host,
CONF.ec2_port,
CONF.ec2_path)}]
return {'regionInfo': regions}
def describe_snapshots(self,
context,
snapshot_id=None,
owner=None,
restorable_by=None,
**kwargs):
if snapshot_id:
snapshots = []
for ec2_id in snapshot_id:
internal_id = ec2utils.ec2_snap_id_to_uuid(ec2_id)
snapshot = self.volume_api.get_snapshot(
context,
snapshot_id=internal_id)
snapshots.append(snapshot)
else:
snapshots = self.volume_api.get_all_snapshots(context)
formatted_snapshots = []
for s in snapshots:
formatted = self._format_snapshot(context, s)
if formatted:
formatted_snapshots.append(formatted)
return {'snapshotSet': formatted_snapshots}
def _format_snapshot(self, context, snapshot):
# NOTE(mikal): this is just a set of strings in cinder. If they
# implement an enum, then we should move this code to use it. The
# valid ec2 statuses are "pending", "completed", and "error".
status_map = {'new': 'pending',
'creating': 'pending',
'available': 'completed',
'active': 'completed',
'deleting': 'pending',
'deleted': None,
'error': 'error'}
mapped_status = status_map.get(snapshot['status'], snapshot['status'])
if not mapped_status:
return None
s = {}
s['snapshotId'] = ec2utils.id_to_ec2_snap_id(snapshot['id'])
s['volumeId'] = ec2utils.id_to_ec2_vol_id(snapshot['volume_id'])
s['status'] = mapped_status
s['startTime'] = snapshot['created_at']
s['progress'] = snapshot['progress']
s['ownerId'] = snapshot['project_id']
s['volumeSize'] = snapshot['volume_size']
s['description'] = snapshot['display_description']
return s
def create_snapshot(self, context, volume_id, **kwargs):
validate_volume_id(volume_id)
LOG.audit(_("Create snapshot of volume %s"), volume_id,
context=context)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
args = (context, volume_id, kwargs.get('name'),
kwargs.get('description'))
if kwargs.get('force', False):
snapshot = self.volume_api.create_snapshot_force(*args)
else:
snapshot = self.volume_api.create_snapshot(*args)
smap = objects.EC2SnapshotMapping(context, uuid=snapshot['id'])
smap.create()
return self._format_snapshot(context, snapshot)
def delete_snapshot(self, context, snapshot_id, **kwargs):
snapshot_id = ec2utils.ec2_snap_id_to_uuid(snapshot_id)
self.volume_api.delete_snapshot(context, snapshot_id)
return True
def describe_key_pairs(self, context, key_name=None, **kwargs):
key_pairs = self.keypair_api.get_key_pairs(context, context.user_id)
if key_name is not None:
key_pairs = [x for x in key_pairs if x['name'] in key_name]
# If looking for non existent key pair
if key_name is not None and not key_pairs:
msg = _('Could not find key pair(s): %s') % ','.join(key_name)
raise exception.KeypairNotFound(message=msg)
result = []
for key_pair in key_pairs:
# filter out the vpn keys
suffix = CONF.vpn_key_suffix
if context.is_admin or not key_pair['name'].endswith(suffix):
result.append({
'keyName': key_pair['name'],
'keyFingerprint': key_pair['fingerprint'],
})
return {'keySet': result}
def create_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Create key pair %s"), key_name, context=context)
keypair, private_key = self.keypair_api.create_key_pair(
context, context.user_id, key_name)
return {'keyName': key_name,
'keyFingerprint': keypair['fingerprint'],
'keyMaterial': private_key}
# TODO(vish): when context is no longer an object, pass it here
def import_key_pair(self, context, key_name, public_key_material,
**kwargs):
LOG.audit(_("Import key %s"), key_name, context=context)
public_key = base64.b64decode(public_key_material)
keypair = self.keypair_api.import_key_pair(context,
context.user_id,
key_name,
public_key)
return {'keyName': key_name,
'keyFingerprint': keypair['fingerprint']}
def delete_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Delete key pair %s"), key_name, context=context)
try:
self.keypair_api.delete_key_pair(context, context.user_id,
key_name)
except exception.NotFound:
# aws returns true even if the key doesn't exist
pass
return True
def describe_security_groups(self, context, group_name=None, group_id=None,
**kwargs):
search_opts = ec2utils.search_opts_from_filters(kwargs.get('filter'))
raw_groups = self.security_group_api.list(context,
group_name,
group_id,
context.project_id,
search_opts=search_opts)
groups = [self._format_security_group(context, g) for g in raw_groups]
return {'securityGroupInfo':
list(sorted(groups,
key=lambda k: (k['ownerId'], k['groupName'])))}
def _format_security_group(self, context, group):
g = {}
g['groupDescription'] = group['description']
g['groupName'] = group['name']
g['ownerId'] = group['project_id']
g['ipPermissions'] = []
for rule in group['rules']:
r = {}
r['groups'] = []
r['ipRanges'] = []
if rule['group_id']:
if rule.get('grantee_group'):
source_group = rule['grantee_group']
r['groups'] += [{'groupName': source_group['name'],
'userId': source_group['project_id']}]
else:
# rule is not always joined with grantee_group
# for example when using neutron driver.
source_group = self.security_group_api.get(
context, id=rule['group_id'])
r['groups'] += [{'groupName': source_group.get('name'),
'userId': source_group.get('project_id')}]
if rule['protocol']:
r['ipProtocol'] = rule['protocol'].lower()
r['fromPort'] = rule['from_port']
r['toPort'] = rule['to_port']
g['ipPermissions'] += [dict(r)]
else:
for protocol, min_port, max_port in (('icmp', -1, -1),
('tcp', 1, 65535),
('udp', 1, 65535)):
r['ipProtocol'] = protocol
r['fromPort'] = min_port
r['toPort'] = max_port
g['ipPermissions'] += [dict(r)]
else:
r['ipProtocol'] = rule['protocol']
r['fromPort'] = rule['from_port']
r['toPort'] = rule['to_port']
r['ipRanges'] += [{'cidrIp': rule['cidr']}]
g['ipPermissions'] += [r]
return g
def _rule_args_to_dict(self, context, kwargs):
rules = []
if 'groups' not in kwargs and 'ip_ranges' not in kwargs:
rule = self._rule_dict_last_step(context, **kwargs)
if rule:
rules.append(rule)
return rules
if 'ip_ranges' in kwargs:
rules = self._cidr_args_split(kwargs)
else:
rules = [kwargs]
finalset = []
for rule in rules:
if 'groups' in rule:
groups_values = self._groups_args_split(rule)
for groups_value in groups_values:
final = self._rule_dict_last_step(context, **groups_value)
finalset.append(final)
else:
final = self._rule_dict_last_step(context, **rule)
finalset.append(final)
return finalset
def _cidr_args_split(self, kwargs):
cidr_args_split = []
cidrs = kwargs['ip_ranges']
for key, cidr in cidrs.iteritems():
mykwargs = kwargs.copy()
del mykwargs['ip_ranges']
mykwargs['cidr_ip'] = cidr['cidr_ip']
cidr_args_split.append(mykwargs)
return cidr_args_split
def _groups_args_split(self, kwargs):
groups_args_split = []
groups = kwargs['groups']
for key, group in groups.iteritems():
mykwargs = kwargs.copy()
del mykwargs['groups']
if 'group_name' in group:
mykwargs['source_security_group_name'] = group['group_name']
if 'user_id' in group:
mykwargs['source_security_group_owner_id'] = group['user_id']
if 'group_id' in group:
mykwargs['source_security_group_id'] = group['group_id']
groups_args_split.append(mykwargs)
return groups_args_split
def _rule_dict_last_step(self, context, to_port=None, from_port=None,
ip_protocol=None, cidr_ip=None, user_id=None,
source_security_group_name=None,
source_security_group_owner_id=None):
if source_security_group_name:
source_project_id = self._get_source_project_id(context,
source_security_group_owner_id)
source_security_group = objects.SecurityGroup.get_by_name(
context.elevated(),
source_project_id,
source_security_group_name)
notfound = exception.SecurityGroupNotFound
if not source_security_group:
raise notfound(security_group_id=source_security_group_name)
group_id = source_security_group.id
return self.security_group_api.new_group_ingress_rule(
group_id, ip_protocol, from_port, to_port)
else:
cidr = self.security_group_api.parse_cidr(cidr_ip)
return self.security_group_api.new_cidr_ingress_rule(
cidr, ip_protocol, from_port, to_port)
def _validate_group_identifier(self, group_name, group_id):
if not group_name and not group_id:
err = _("need group_name or group_id")
raise exception.MissingParameter(reason=err)
def _validate_rulevalues(self, rulesvalues):
if not rulesvalues:
err = _("can't build a valid rule")
raise exception.MissingParameter(reason=err)
def _validate_security_group_protocol(self, values):
validprotocols = ['tcp', 'udp', 'icmp', '6', '17', '1']
if 'ip_protocol' in values and \
values['ip_protocol'] not in validprotocols:
protocol = values['ip_protocol']
err = _("Invalid IP protocol %(protocol)s") % \
{'protocol': protocol}
raise exception.InvalidParameterValue(message=err)
def revoke_security_group_ingress(self, context, group_name=None,
group_id=None, **kwargs):
self._validate_group_identifier(group_name, group_id)
security_group = self.security_group_api.get(context, group_name,
group_id)
extensions.check_compute_policy(context, 'security_groups',
security_group, 'compute_extension')
prevalues = kwargs.get('ip_permissions', [kwargs])
rule_ids = []
for values in prevalues:
rulesvalues = self._rule_args_to_dict(context, values)
self._validate_rulevalues(rulesvalues)
for values_for_rule in rulesvalues:
values_for_rule['parent_group_id'] = security_group['id']
rule_ids.append(self.security_group_api.rule_exists(
security_group, values_for_rule))
rule_ids = [id for id in rule_ids if id]
if rule_ids:
self.security_group_api.remove_rules(context, security_group,
rule_ids)
return True
msg = _("No rule for the specified parameters.")
raise exception.InvalidParameterValue(message=msg)
# TODO(soren): This has only been tested with Boto as the client.
# Unfortunately, it seems Boto is using an old API
# for these operations, so support for newer API versions
# is sketchy.
def authorize_security_group_ingress(self, context, group_name=None,
group_id=None, **kwargs):
self._validate_group_identifier(group_name, group_id)
security_group = self.security_group_api.get(context, group_name,
group_id)
extensions.check_compute_policy(context, 'security_groups',
security_group, 'compute_extension')
prevalues = kwargs.get('ip_permissions', [kwargs])
postvalues = []
for values in prevalues:
self._validate_security_group_protocol(values)
rulesvalues = self._rule_args_to_dict(context, values)
self._validate_rulevalues(rulesvalues)
for values_for_rule in rulesvalues:
values_for_rule['parent_group_id'] = security_group['id']
if self.security_group_api.rule_exists(security_group,
values_for_rule):
raise exception.SecurityGroupRuleExists(
rule=values_for_rule)
postvalues.append(values_for_rule)
if postvalues:
self.security_group_api.add_rules(context, security_group['id'],
security_group['name'], postvalues)
return True
msg = _("No rule for the specified parameters.")
raise exception.InvalidParameterValue(message=msg)
def _get_source_project_id(self, context, source_security_group_owner_id):
if source_security_group_owner_id:
# Parse user:project for source group.
source_parts = source_security_group_owner_id.split(':')
# If no project name specified, assume it's same as user name.
# Since we're looking up by project name, the user name is not
# used here. It's only read for EC2 API compatibility.
if len(source_parts) == 2:
source_project_id = source_parts[1]
else:
source_project_id = source_parts[0]
else:
source_project_id = context.project_id
return source_project_id
def create_security_group(self, context, group_name, group_description):
if isinstance(group_name, unicode):
group_name = utils.utf8(group_name)
if CONF.ec2_strict_validation:
# EC2 specification gives constraints for name and description:
# Accepts alphanumeric characters, spaces, dashes, and underscores
allowed = '^[a-zA-Z0-9_\- ]+$'
self.security_group_api.validate_property(group_name, 'name',
allowed)
self.security_group_api.validate_property(group_description,
'description', allowed)
else:
# Amazon accepts more symbols.
# So, allow POSIX [:print:] characters.
allowed = r'^[\x20-\x7E]+$'
self.security_group_api.validate_property(group_name, 'name',
allowed)
group_ref = self.security_group_api.create_security_group(
context, group_name, group_description)
return {'securityGroupSet': [self._format_security_group(context,
group_ref)]}
def delete_security_group(self, context, group_name=None, group_id=None,
**kwargs):
if not group_name and not group_id:
err = _("need group_name or group_id")
raise exception.MissingParameter(reason=err)
security_group = self.security_group_api.get(context, group_name,
group_id)
extensions.check_compute_policy(context, 'security_groups',
security_group, 'compute_extension')
self.security_group_api.destroy(context, security_group)
return True
def get_password_data(self, context, instance_id, **kwargs):
# instance_id may be passed in as a list of instances
if isinstance(instance_id, list):
ec2_id = instance_id[0]
else:
ec2_id = instance_id
validate_instance_id(ec2_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
output = password.extract_password(instance)
# NOTE(vish): this should be timestamp from the metadata fields
# but it isn't important enough to implement properly
now = timeutils.utcnow()
return {"InstanceId": ec2_id,
"Timestamp": now,
"passwordData": output}
def get_console_output(self, context, instance_id, **kwargs):
LOG.audit(_("Get console output for instance %s"), instance_id,
context=context)
# instance_id may be passed in as a list of instances
if isinstance(instance_id, list):
ec2_id = instance_id[0]
else:
ec2_id = instance_id
validate_instance_id(ec2_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid,
want_objects=True)
output = self.compute_api.get_console_output(context, instance)
now = timeutils.utcnow()
return {"InstanceId": ec2_id,
"Timestamp": now,
"output": base64.b64encode(output)}
def describe_volumes(self, context, volume_id=None, **kwargs):
if volume_id:
volumes = []
for ec2_id in volume_id:
validate_volume_id(ec2_id)
internal_id = ec2utils.ec2_vol_id_to_uuid(ec2_id)
volume = self.volume_api.get(context, internal_id)
volumes.append(volume)
else:
volumes = self.volume_api.get_all(context)
volumes = [self._format_volume(context, v) for v in volumes]
return {'volumeSet': volumes}
def _format_volume(self, context, volume):
valid_ec2_api_volume_status_map = {
'attaching': 'in-use',
'detaching': 'in-use'}
instance_ec2_id = None
if volume.get('instance_uuid', None):
instance_uuid = volume['instance_uuid']
# Make sure instance exists
objects.Instance.get_by_uuid(context.elevated(), instance_uuid)
instance_ec2_id = ec2utils.id_to_ec2_inst_id(instance_uuid)
v = {}
v['volumeId'] = ec2utils.id_to_ec2_vol_id(volume['id'])
v['status'] = valid_ec2_api_volume_status_map.get(volume['status'],
volume['status'])
v['size'] = volume['size']
v['availabilityZone'] = volume['availability_zone']
v['createTime'] = volume['created_at']
if volume['attach_status'] == 'attached':
v['attachmentSet'] = [{'attachTime': volume['attach_time'],
'deleteOnTermination': False,
'device': volume['mountpoint'],
'instanceId': instance_ec2_id,
'status': 'attached',
'volumeId': v['volumeId']}]
else:
v['attachmentSet'] = [{}]
if volume.get('snapshot_id') is not None:
v['snapshotId'] = ec2utils.id_to_ec2_snap_id(volume['snapshot_id'])
else:
v['snapshotId'] = None
return v
def create_volume(self, context, **kwargs):
snapshot_ec2id = kwargs.get('snapshot_id', None)
if snapshot_ec2id is not None:
snapshot_id = ec2utils.ec2_snap_id_to_uuid(kwargs['snapshot_id'])
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
LOG.audit(_("Create volume from snapshot %s"), snapshot_ec2id,
context=context)
else:
snapshot = None
LOG.audit(_("Create volume of %s GB"),
kwargs.get('size'),
context=context)
create_kwargs = dict(snapshot=snapshot,
volume_type=kwargs.get('volume_type'),
metadata=kwargs.get('metadata'),
availability_zone=kwargs.get('availability_zone'))
volume = self.volume_api.create(context,
kwargs.get('size'),
kwargs.get('name'),
kwargs.get('description'),
**create_kwargs)
vmap = objects.EC2VolumeMapping(context)
vmap.uuid = volume['id']
vmap.create()
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
return self._format_volume(context, dict(volume))
def delete_volume(self, context, volume_id, **kwargs):
validate_volume_id(volume_id)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
self.volume_api.delete(context, volume_id)
return True
def attach_volume(self, context,
volume_id,
instance_id,
device, **kwargs):
validate_instance_id(instance_id)
validate_volume_id(volume_id)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id)
instance = self.compute_api.get(context, instance_uuid,
want_objects=True)
LOG.audit(_('Attach volume %(volume_id)s to instance %(instance_id)s '
'at %(device)s'),
{'volume_id': volume_id,
'instance_id': instance_id,
'device': device},
context=context)
self.compute_api.attach_volume(context, instance, volume_id, device)
volume = self.volume_api.get(context, volume_id)
ec2_attach_status = ec2utils.status_to_ec2_attach_status(volume)
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
'instanceId': ec2utils.id_to_ec2_inst_id(instance_uuid),
'requestId': context.request_id,
'status': ec2_attach_status,
'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)}
def _get_instance_from_volume(self, context, volume):
if volume.get('instance_uuid'):
try:
inst_uuid = volume['instance_uuid']
return objects.Instance.get_by_uuid(context, inst_uuid)
except exception.InstanceNotFound:
pass
raise exception.VolumeUnattached(volume_id=volume['id'])
def detach_volume(self, context, volume_id, **kwargs):
validate_volume_id(volume_id)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
LOG.audit(_("Detach volume %s"), volume_id, context=context)
volume = self.volume_api.get(context, volume_id)
instance = self._get_instance_from_volume(context, volume)
self.compute_api.detach_volume(context, instance, volume)
resp_volume = self.volume_api.get(context, volume_id)
ec2_attach_status = ec2utils.status_to_ec2_attach_status(resp_volume)
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
'instanceId': ec2utils.id_to_ec2_inst_id(
volume['instance_uuid']),
'requestId': context.request_id,
'status': ec2_attach_status,
'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)}
def _format_kernel_id(self, context, instance_ref, result, key):
kernel_uuid = instance_ref['kernel_id']
if kernel_uuid is None or kernel_uuid == '':
return
result[key] = ec2utils.glance_id_to_ec2_id(context, kernel_uuid, 'aki')
def _format_ramdisk_id(self, context, instance_ref, result, key):
ramdisk_uuid = instance_ref['ramdisk_id']
if ramdisk_uuid is None or ramdisk_uuid == '':
return
result[key] = ec2utils.glance_id_to_ec2_id(context, ramdisk_uuid,
'ari')
def describe_instance_attribute(self, context, instance_id, attribute,
**kwargs):
def _unsupported_attribute(instance, result):
raise exception.InvalidAttribute(attr=attribute)
def _format_attr_block_device_mapping(instance, result):
tmp = {}
self._format_instance_root_device_name(instance, tmp)
self._format_instance_bdm(context, instance['uuid'],
tmp['rootDeviceName'], result)
def _format_attr_disable_api_termination(instance, result):
result['disableApiTermination'] = instance['disable_terminate']
def _format_attr_group_set(instance, result):
CloudController._format_group_set(instance, result)
def _format_attr_instance_initiated_shutdown_behavior(instance,
result):
if instance['shutdown_terminate']:
result['instanceInitiatedShutdownBehavior'] = 'terminate'
else:
result['instanceInitiatedShutdownBehavior'] = 'stop'
def _format_attr_instance_type(instance, result):
self._format_instance_type(instance, result)
def _format_attr_kernel(instance, result):
self._format_kernel_id(context, instance, result, 'kernel')
def _format_attr_ramdisk(instance, result):
self._format_ramdisk_id(context, instance, result, 'ramdisk')
def _format_attr_root_device_name(instance, result):
self._format_instance_root_device_name(instance, result)
def _format_attr_source_dest_check(instance, result):
_unsupported_attribute(instance, result)
def _format_attr_user_data(instance, result):
result['userData'] = base64.b64decode(instance['user_data'])
attribute_formatter = {
'blockDeviceMapping': _format_attr_block_device_mapping,
'disableApiTermination': _format_attr_disable_api_termination,
'groupSet': _format_attr_group_set,
'instanceInitiatedShutdownBehavior':
_format_attr_instance_initiated_shutdown_behavior,
'instanceType': _format_attr_instance_type,
'kernel': _format_attr_kernel,
'ramdisk': _format_attr_ramdisk,
'rootDeviceName': _format_attr_root_device_name,
'sourceDestCheck': _format_attr_source_dest_check,
'userData': _format_attr_user_data,
}
fn = attribute_formatter.get(attribute)
if fn is None:
raise exception.InvalidAttribute(attr=attribute)
validate_instance_id(instance_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id)
instance = self.compute_api.get(context, instance_uuid,
want_objects=True)
result = {'instance_id': instance_id}
fn(instance, result)
return result
def describe_instances(self, context, **kwargs):
# Optional DescribeInstances argument
instance_id = kwargs.get('instance_id', None)
filters = kwargs.get('filter', None)
instances = self._enforce_valid_instance_ids(context, instance_id)
return self._format_describe_instances(context,
instance_id=instance_id,
instance_cache=instances,
filter=filters)
def describe_instances_v6(self, context, **kwargs):
# Optional DescribeInstancesV6 argument
instance_id = kwargs.get('instance_id', None)
filters = kwargs.get('filter', None)
instances = self._enforce_valid_instance_ids(context, instance_id)
return self._format_describe_instances(context,
instance_id=instance_id,
instance_cache=instances,
filter=filters,
use_v6=True)
def _format_describe_instances(self, context, **kwargs):
return {'reservationSet': self._format_instances(context, **kwargs)}
def _format_run_instances(self, context, reservation_id):
i = self._format_instances(context, reservation_id=reservation_id)
assert len(i) == 1
return i[0]
def _format_terminate_instances(self, context, instance_id,
previous_states):
instances_set = []
for (ec2_id, previous_state) in zip(instance_id, previous_states):
i = {}
i['instanceId'] = ec2_id
i['previousState'] = _state_description(previous_state['vm_state'],
previous_state['shutdown_terminate'])
try:
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
i['currentState'] = _state_description(instance['vm_state'],
instance['shutdown_terminate'])
except exception.NotFound:
i['currentState'] = _state_description(
inst_state.SHUTTING_DOWN, True)
instances_set.append(i)
return {'instancesSet': instances_set}
def _format_stop_instances(self, context, instance_ids, previous_states):
instances_set = []
for (ec2_id, previous_state) in zip(instance_ids, previous_states):
i = {}
i['instanceId'] = ec2_id
i['previousState'] = _state_description(previous_state['vm_state'],
previous_state['shutdown_terminate'])
i['currentState'] = _state_description(inst_state.STOPPING, True)
instances_set.append(i)
return {'instancesSet': instances_set}
def _format_start_instances(self, context, instance_id, previous_states):
instances_set = []
for (ec2_id, previous_state) in zip(instance_id, previous_states):
i = {}
i['instanceId'] = ec2_id
i['previousState'] = _state_description(previous_state['vm_state'],
previous_state['shutdown_terminate'])
i['currentState'] = _state_description(None, True)
instances_set.append(i)
return {'instancesSet': instances_set}
def _format_instance_bdm(self, context, instance_uuid, root_device_name,
result):
"""Format InstanceBlockDeviceMappingResponseItemType."""
root_device_type = 'instance-store'
root_device_short_name = block_device.strip_dev(root_device_name)
if root_device_name == root_device_short_name:
root_device_name = block_device.prepend_dev(root_device_name)
mapping = []
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance_uuid)
for bdm in bdms:
volume_id = bdm.volume_id
if volume_id is None or bdm.no_device:
continue
if (bdm.is_volume and
(bdm.device_name == root_device_name or
bdm.device_name == root_device_short_name)):
root_device_type = 'ebs'
vol = self.volume_api.get(context, volume_id)
LOG.debug("vol = %s\n", vol)
# TODO(yamahata): volume attach time
ebs = {'volumeId': ec2utils.id_to_ec2_vol_id(volume_id),
'deleteOnTermination': bdm.delete_on_termination,
'attachTime': vol['attach_time'] or '',
'status': vol['attach_status'], }
res = {'deviceName': bdm.device_name,
'ebs': ebs, }
mapping.append(res)
if mapping:
result['blockDeviceMapping'] = mapping
result['rootDeviceType'] = root_device_type
@staticmethod
def _format_instance_root_device_name(instance, result):
result['rootDeviceName'] = (instance.get('root_device_name') or
block_device.DEFAULT_ROOT_DEV_NAME)
@staticmethod
def _format_instance_type(instance, result):
flavor = instance.get_flavor()
result['instanceType'] = flavor.name
@staticmethod
def _format_group_set(instance, result):
security_group_names = []
if instance.get('security_groups'):
for security_group in instance['security_groups']:
security_group_names.append(security_group['name'])
result['groupSet'] = utils.convert_to_list_dict(
security_group_names, 'groupId')
def _format_instances(self, context, instance_id=None, use_v6=False,
instances_cache=None, **search_opts):
# TODO(termie): this method is poorly named as its name does not imply
# that it will be making a variety of database calls
# rather than simply formatting a bunch of instances that
# were handed to it
reservations = {}
if not instances_cache:
instances_cache = {}
# NOTE(vish): instance_id is an optional list of ids to filter by
if instance_id:
instances = []
for ec2_id in instance_id:
if ec2_id in instances_cache:
instances.append(instances_cache[ec2_id])
else:
try:
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context,
ec2_id)
instance = self.compute_api.get(context, instance_uuid,
want_objects=True)
except exception.NotFound:
continue
instances.append(instance)
else:
try:
# always filter out deleted instances
search_opts['deleted'] = False
instances = self.compute_api.get_all(context,
search_opts=search_opts,
sort_dir='asc',
want_objects=True)
except exception.NotFound:
instances = []
for instance in instances:
if not context.is_admin:
if pipelib.is_vpn_image(instance['image_ref']):
continue
i = {}
instance_uuid = instance['uuid']
ec2_id = ec2utils.id_to_ec2_inst_id(instance_uuid)
i['instanceId'] = ec2_id
image_uuid = instance['image_ref']
i['imageId'] = ec2utils.glance_id_to_ec2_id(context, image_uuid)
self._format_kernel_id(context, instance, i, 'kernelId')
self._format_ramdisk_id(context, instance, i, 'ramdiskId')
i['instanceState'] = _state_description(
instance['vm_state'], instance['shutdown_terminate'])
fixed_ip = None
floating_ip = None
ip_info = ec2utils.get_ip_info_for_instance(context, instance)
if ip_info['fixed_ips']:
fixed_ip = ip_info['fixed_ips'][0]
if ip_info['floating_ips']:
floating_ip = ip_info['floating_ips'][0]
if ip_info['fixed_ip6s']:
i['dnsNameV6'] = ip_info['fixed_ip6s'][0]
if CONF.ec2_private_dns_show_ip:
i['privateDnsName'] = fixed_ip
else:
i['privateDnsName'] = instance['hostname']
i['privateIpAddress'] = fixed_ip
if floating_ip is not None:
i['ipAddress'] = floating_ip
i['dnsName'] = floating_ip
i['keyName'] = instance['key_name']
i['tagSet'] = []
for k, v in utils.instance_meta(instance).iteritems():
i['tagSet'].append({'key': k, 'value': v})
client_token = self._get_client_token(context, instance_uuid)
if client_token:
i['clientToken'] = client_token
if context.is_admin:
i['keyName'] = '%s (%s, %s)' % (i['keyName'],
instance['project_id'],
instance['host'])
i['productCodesSet'] = utils.convert_to_list_dict([],
'product_codes')
self._format_instance_type(instance, i)
i['launchTime'] = instance['created_at']
i['amiLaunchIndex'] = instance['launch_index']
self._format_instance_root_device_name(instance, i)
self._format_instance_bdm(context, instance['uuid'],
i['rootDeviceName'], i)
host = instance['host']
zone = ec2utils.get_availability_zone_by_host(host)
i['placement'] = {'availabilityZone': zone}
if instance['reservation_id'] not in reservations:
r = {}
r['reservationId'] = instance['reservation_id']
r['ownerId'] = instance['project_id']
self._format_group_set(instance, r)
r['instancesSet'] = []
reservations[instance['reservation_id']] = r
reservations[instance['reservation_id']]['instancesSet'].append(i)
return list(reservations.values())
def describe_addresses(self, context, public_ip=None, **kwargs):
if public_ip:
floatings = []
for address in public_ip:
floating = self.network_api.get_floating_ip_by_address(context,
address)
floatings.append(floating)
else:
floatings = self.network_api.get_floating_ips_by_project(context)
addresses = [self._format_address(context, f) for f in floatings]
return {'addressesSet': addresses}
def _format_address(self, context, floating_ip):
ec2_id = None
if floating_ip['fixed_ip_id']:
if utils.is_neutron():
fixed_vm_uuid = floating_ip['instance']['uuid']
if fixed_vm_uuid is not None:
ec2_id = ec2utils.id_to_ec2_inst_id(fixed_vm_uuid)
else:
fixed_id = floating_ip['fixed_ip_id']
fixed = self.network_api.get_fixed_ip(context, fixed_id)
if fixed['instance_uuid'] is not None:
ec2_id = ec2utils.id_to_ec2_inst_id(fixed['instance_uuid'])
address = {'public_ip': floating_ip['address'],
'instance_id': ec2_id}
if context.is_admin:
details = "%s (%s)" % (address['instance_id'],
floating_ip['project_id'])
address['instance_id'] = details
return address
def allocate_address(self, context, **kwargs):
LOG.audit(_("Allocate address"), context=context)
public_ip = self.network_api.allocate_floating_ip(context)
return {'publicIp': public_ip}
def release_address(self, context, public_ip, **kwargs):
LOG.audit(_('Release address %s'), public_ip, context=context)
self.network_api.release_floating_ip(context, address=public_ip)
return {'return': "true"}
def associate_address(self, context, instance_id, public_ip, **kwargs):
LOG.audit(_("Associate address %(public_ip)s to instance "
"%(instance_id)s"),
{'public_ip': public_ip, 'instance_id': instance_id},
context=context)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id)
instance = self.compute_api.get(context, instance_uuid)
cached_ipinfo = ec2utils.get_ip_info_for_instance(context, instance)
fixed_ips = cached_ipinfo['fixed_ips'] + cached_ipinfo['fixed_ip6s']
if not fixed_ips:
msg = _('Unable to associate IP Address, no fixed_ips.')
raise exception.NoMoreFixedIps(message=msg)
# TODO(tr3buchet): this will associate the floating IP with the
# first fixed_ip an instance has. This should be
# changed to support specifying a particular fixed_ip if
# multiple exist but this may not apply to ec2..
if len(fixed_ips) > 1:
LOG.warn(_LW('multiple fixed_ips exist, using the first: %s'),
fixed_ips[0])
self.network_api.associate_floating_ip(context, instance,
floating_address=public_ip,
fixed_address=fixed_ips[0])
return {'return': 'true'}
def disassociate_address(self, context, public_ip, **kwargs):
instance_id = self.network_api.get_instance_id_by_floating_address(
context, public_ip)
if instance_id:
instance = self.compute_api.get(context, instance_id)
LOG.audit(_("Disassociate address %s"), public_ip, context=context)
self.network_api.disassociate_floating_ip(context, instance,
address=public_ip)
else:
msg = _('Floating ip is not associated.')
raise exception.InvalidAssociation(message=msg)
return {'return': "true"}
def run_instances(self, context, **kwargs):
min_count = int(kwargs.get('min_count', 1))
max_count = int(kwargs.get('max_count', min_count))
try:
min_count = utils.validate_integer(
min_count, "min_count", min_value=1)
max_count = utils.validate_integer(
max_count, "max_count", min_value=1)
except exception.InvalidInput as e:
raise exception.InvalidInput(message=e.format_message())
if min_count > max_count:
msg = _('min_count must be <= max_count')
raise exception.InvalidInput(message=msg)
client_token = kwargs.get('client_token')
if client_token:
resv_id = self._resv_id_from_token(context, client_token)
if resv_id:
# since this client_token already corresponds to a reservation
# id, this returns a proper response without creating a new
# instance
return self._format_run_instances(context, resv_id)
if kwargs.get('kernel_id'):
kernel = self._get_image(context, kwargs['kernel_id'])
kwargs['kernel_id'] = ec2utils.id_to_glance_id(context,
kernel['id'])
if kwargs.get('ramdisk_id'):
ramdisk = self._get_image(context, kwargs['ramdisk_id'])
kwargs['ramdisk_id'] = ec2utils.id_to_glance_id(context,
ramdisk['id'])
for bdm in kwargs.get('block_device_mapping', []):
_parse_block_device_mapping(bdm)
image = self._get_image(context, kwargs['image_id'])
image_uuid = ec2utils.id_to_glance_id(context, image['id'])
if image:
image_state = self._get_image_state(image)
else:
raise exception.ImageNotFoundEC2(image_id=kwargs['image_id'])
if image_state != 'available':
msg = _('Image must be available')
raise exception.ImageNotActive(message=msg)
iisb = kwargs.get('instance_initiated_shutdown_behavior', 'stop')
shutdown_terminate = (iisb == 'terminate')
flavor = objects.Flavor.get_by_name(context,
kwargs.get('instance_type', None))
(instances, resv_id) = self.compute_api.create(context,
instance_type=obj_base.obj_to_primitive(flavor),
image_href=image_uuid,
max_count=int(kwargs.get('max_count', min_count)),
min_count=min_count,
kernel_id=kwargs.get('kernel_id'),
ramdisk_id=kwargs.get('ramdisk_id'),
key_name=kwargs.get('key_name'),
user_data=kwargs.get('user_data'),
security_group=kwargs.get('security_group'),
availability_zone=kwargs.get('placement', {}).get(
'availability_zone'),
block_device_mapping=kwargs.get('block_device_mapping', {}),
shutdown_terminate=shutdown_terminate)
instances = self._format_run_instances(context, resv_id)
if instances:
instance_ids = [i['instanceId'] for i in instances['instancesSet']]
self._add_client_token(context, client_token, instance_ids)
return instances
def _add_client_token(self, context, client_token, instance_ids):
"""Add client token to reservation ID mapping."""
if client_token:
for ec2_id in instance_ids:
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = objects.Instance.get_by_uuid(context,
instance_uuid, expected_attrs=['system_metadata'])
instance.system_metadata.update(
{'EC2_client_token': client_token})
instance.save()
def _get_client_token(self, context, instance_uuid):
"""Get client token for a given instance."""
instance = objects.Instance.get_by_uuid(context,
instance_uuid, expected_attrs=['system_metadata'])
return instance.system_metadata.get('EC2_client_token')
def _remove_client_token(self, context, instance_ids):
"""Remove client token to reservation ID mapping."""
for ec2_id in instance_ids:
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = objects.Instance.get_by_uuid(context,
instance_uuid, expected_attrs=['system_metadata'])
instance.system_metadata.pop('EC2_client_token', None)
instance.save()
def _resv_id_from_token(self, context, client_token):
"""Get reservation ID from db."""
resv_id = None
sys_metas = self.compute_api.get_all_system_metadata(
context, search_filts=[{'key': ['EC2_client_token']},
{'value': [client_token]}])
for sys_meta in sys_metas:
if sys_meta and sys_meta.get('value') == client_token:
instance = objects.Instance.get_by_uuid(
context, sys_meta['instance_id'], expected_attrs=None)
resv_id = instance.get('reservation_id')
break
return resv_id
def _ec2_ids_to_instances(self, context, instance_id):
"""Get all instances first, to prevent partial executions."""
instances = []
extra = ['system_metadata', 'metadata', 'info_cache']
for ec2_id in instance_id:
validate_instance_id(ec2_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = objects.Instance.get_by_uuid(
context, instance_uuid, expected_attrs=extra)
instances.append(instance)
return instances
def terminate_instances(self, context, instance_id, **kwargs):
"""Terminate each instance in instance_id, which is a list of ec2 ids.
instance_id is a kwarg so its name cannot be modified.
"""
previous_states = self._ec2_ids_to_instances(context, instance_id)
self._remove_client_token(context, instance_id)
LOG.debug("Going to start terminating instances")
for instance in previous_states:
self.compute_api.delete(context, instance)
return self._format_terminate_instances(context,
instance_id,
previous_states)
def reboot_instances(self, context, instance_id, **kwargs):
"""instance_id is a list of instance ids."""
instances = self._ec2_ids_to_instances(context, instance_id)
LOG.audit(_("Reboot instance %r"), instance_id, context=context)
for instance in instances:
self.compute_api.reboot(context, instance, 'HARD')
return True
def stop_instances(self, context, instance_id, **kwargs):
"""Stop each instances in instance_id.
Here instance_id is a list of instance ids
"""
instances = self._ec2_ids_to_instances(context, instance_id)
LOG.debug("Going to stop instances")
for instance in instances:
extensions.check_compute_policy(context, 'stop', instance)
self.compute_api.stop(context, instance)
return self._format_stop_instances(context, instance_id,
instances)
def start_instances(self, context, instance_id, **kwargs):
"""Start each instances in instance_id.
Here instance_id is a list of instance ids
"""
instances = self._ec2_ids_to_instances(context, instance_id)
LOG.debug("Going to start instances")
for instance in instances:
extensions.check_compute_policy(context, 'start', instance)
self.compute_api.start(context, instance)
return self._format_start_instances(context, instance_id,
instances)
def _get_image(self, context, ec2_id):
try:
internal_id = ec2utils.ec2_id_to_id(ec2_id)
image = self.image_service.show(context, internal_id)
except (exception.InvalidEc2Id, exception.ImageNotFound):
filters = {'name': ec2_id}
images = self.image_service.detail(context, filters=filters)
try:
return images[0]
except IndexError:
raise exception.ImageNotFound(image_id=ec2_id)
image_type = ec2_id.split('-')[0]
if ec2utils.image_type(image.get('container_format')) != image_type:
raise exception.ImageNotFound(image_id=ec2_id)
return image
def _format_image(self, image):
"""Convert from format defined by GlanceImageService to S3 format."""
i = {}
image_type = ec2utils.image_type(image.get('container_format'))
ec2_id = ec2utils.image_ec2_id(image.get('id'), image_type)
name = image.get('name')
i['imageId'] = ec2_id
kernel_id = image['properties'].get('kernel_id')
if kernel_id:
i['kernelId'] = ec2utils.image_ec2_id(kernel_id, 'aki')
ramdisk_id = image['properties'].get('ramdisk_id')
if ramdisk_id:
i['ramdiskId'] = ec2utils.image_ec2_id(ramdisk_id, 'ari')
i['imageOwnerId'] = image.get('owner')
img_loc = image['properties'].get('image_location')
if img_loc:
i['imageLocation'] = img_loc
else:
i['imageLocation'] = "%s (%s)" % (img_loc, name)
i['name'] = name
if not name and img_loc:
# This should only occur for images registered with ec2 api
# prior to that api populating the glance name
i['name'] = img_loc
i['imageState'] = self._get_image_state(image)
i['description'] = image.get('description')
display_mapping = {'aki': 'kernel',
'ari': 'ramdisk',
'ami': 'machine'}
i['imageType'] = display_mapping.get(image_type)
i['isPublic'] = not not image.get('is_public')
i['architecture'] = image['properties'].get('architecture')
properties = image['properties']
root_device_name = block_device.properties_root_device_name(properties)
root_device_type = 'instance-store'
for bdm in properties.get('block_device_mapping', []):
if (block_device.strip_dev(bdm.get('device_name')) ==
block_device.strip_dev(root_device_name) and
('snapshot_id' in bdm or 'volume_id' in bdm) and
not bdm.get('no_device')):
root_device_type = 'ebs'
i['rootDeviceName'] = (root_device_name or
block_device.DEFAULT_ROOT_DEV_NAME)
i['rootDeviceType'] = root_device_type
_format_mappings(properties, i)
return i
def describe_images(self, context, image_id=None, **kwargs):
# NOTE: image_id is a list!
if image_id:
images = []
for ec2_id in image_id:
try:
image = self._get_image(context, ec2_id)
except exception.NotFound:
raise exception.ImageNotFound(image_id=ec2_id)
images.append(image)
else:
images = self.image_service.detail(context)
images = [self._format_image(i) for i in images]
return {'imagesSet': images}
def deregister_image(self, context, image_id, **kwargs):
LOG.audit(_("De-registering image %s"), image_id, context=context)
image = self._get_image(context, image_id)
internal_id = image['id']
self.image_service.delete(context, internal_id)
return True
def _register_image(self, context, metadata):
image = self.image_service.create(context, metadata)
image_type = ec2utils.image_type(image.get('container_format'))
image_id = ec2utils.image_ec2_id(image['id'], image_type)
return image_id
def register_image(self, context, image_location=None, **kwargs):
if image_location is None and kwargs.get('name'):
image_location = kwargs['name']
if image_location is None:
msg = _('imageLocation is required')
raise exception.MissingParameter(reason=msg)
metadata = {'properties': {'image_location': image_location}}
if kwargs.get('name'):
metadata['name'] = kwargs['name']
else:
metadata['name'] = image_location
if 'root_device_name' in kwargs:
metadata['properties']['root_device_name'] = kwargs.get(
'root_device_name')
mappings = [_parse_block_device_mapping(bdm) for bdm in
kwargs.get('block_device_mapping', [])]
if mappings:
metadata['properties']['block_device_mapping'] = mappings
image_id = self._register_image(context, metadata)
LOG.audit(_('Registered image %(image_location)s with id '
'%(image_id)s'),
{'image_location': image_location, 'image_id': image_id},
context=context)
return {'imageId': image_id}
def describe_image_attribute(self, context, image_id, attribute, **kwargs):
def _block_device_mapping_attribute(image, result):
_format_mappings(image['properties'], result)
def _launch_permission_attribute(image, result):
result['launchPermission'] = []
if image['is_public']:
result['launchPermission'].append({'group': 'all'})
def _root_device_name_attribute(image, result):
_prop_root_dev_name = block_device.properties_root_device_name
result['rootDeviceName'] = _prop_root_dev_name(image['properties'])
if result['rootDeviceName'] is None:
result['rootDeviceName'] = block_device.DEFAULT_ROOT_DEV_NAME
def _kernel_attribute(image, result):
kernel_id = image['properties'].get('kernel_id')
if kernel_id:
result['kernel'] = {
'value': ec2utils.image_ec2_id(kernel_id, 'aki')
}
def _ramdisk_attribute(image, result):
ramdisk_id = image['properties'].get('ramdisk_id')
if ramdisk_id:
result['ramdisk'] = {
'value': ec2utils.image_ec2_id(ramdisk_id, 'ari')
}
supported_attributes = {
'blockDeviceMapping': _block_device_mapping_attribute,
'launchPermission': _launch_permission_attribute,
'rootDeviceName': _root_device_name_attribute,
'kernel': _kernel_attribute,
'ramdisk': _ramdisk_attribute,
}
fn = supported_attributes.get(attribute)
if fn is None:
raise exception.InvalidAttribute(attr=attribute)
try:
image = self._get_image(context, image_id)
except exception.NotFound:
raise exception.ImageNotFound(image_id=image_id)
result = {'imageId': image_id}
fn(image, result)
return result
def modify_image_attribute(self, context, image_id, attribute,
operation_type, **kwargs):
# TODO(devcamcar): Support users and groups other than 'all'.
if attribute != 'launchPermission':
raise exception.InvalidAttribute(attr=attribute)
if 'user_group' not in kwargs:
msg = _('user or group not specified')
raise exception.MissingParameter(reason=msg)
if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all':
msg = _('only group "all" is supported')
raise exception.InvalidParameterValue(message=msg)
if operation_type not in ['add', 'remove']:
msg = _('operation_type must be add or remove')
raise exception.InvalidParameterValue(message=msg)
LOG.audit(_("Updating image %s publicity"), image_id, context=context)
try:
image = self._get_image(context, image_id)
except exception.NotFound:
raise exception.ImageNotFound(image_id=image_id)
internal_id = image['id']
del(image['id'])
image['is_public'] = (operation_type == 'add')
try:
return self.image_service.update(context, internal_id, image)
except exception.ImageNotAuthorized:
msg = _('Not allowed to modify attributes for image %s') % image_id
raise exception.Forbidden(message=msg)
def update_image(self, context, image_id, **kwargs):
internal_id = ec2utils.ec2_id_to_id(image_id)
result = self.image_service.update(context, internal_id, dict(kwargs))
return result
# TODO(yamahata): race condition
# At the moment there is no way to prevent others from
# manipulating instances/volumes/snapshots.
# As other code doesn't take it into consideration, here we don't
# care of it for now. Ostrich algorithm
# TODO(mriedem): Consider auto-locking the instance when stopping it and
# doing the snapshot, then unlock it when that is done. Locking the
# instance in the database would prevent other APIs from changing the state
# of the instance during this operation for non-admin users.
def create_image(self, context, instance_id, **kwargs):
# NOTE(yamahata): name/description are ignored by register_image(),
# do so here
no_reboot = kwargs.get('no_reboot', False)
name = kwargs.get('name')
validate_instance_id(instance_id)
ec2_instance_id = instance_id
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_instance_id)
instance = self.compute_api.get(context, instance_uuid,
want_objects=True)
# CreateImage only supported for the analogue of EBS-backed instances
if not self.compute_api.is_volume_backed_instance(context, instance):
msg = _("Invalid value '%(ec2_instance_id)s' for instanceId. "
"Instance does not have a volume attached at root "
"(%(root)s)") % {'root': instance.root_device_name,
'ec2_instance_id': ec2_instance_id}
raise exception.InvalidParameterValue(err=msg)
# stop the instance if necessary
restart_instance = False
if not no_reboot:
vm_state = instance.vm_state
# if the instance is in subtle state, refuse to proceed.
if vm_state not in (vm_states.ACTIVE, vm_states.STOPPED):
raise exception.InstanceNotRunning(instance_id=ec2_instance_id)
if vm_state == vm_states.ACTIVE:
restart_instance = True
# NOTE(mriedem): We do a call here so that we're sure the
# stop request is complete before we begin polling the state.
self.compute_api.stop(context, instance, do_cast=False)
# wait instance for really stopped (and not transitioning tasks)
start_time = time.time()
while (vm_state != vm_states.STOPPED and
instance.task_state is not None):
time.sleep(1)
instance.refresh()
vm_state = instance.vm_state
# NOTE(yamahata): timeout and error. 1 hour for now for safety.
# Is it too short/long?
# Or is there any better way?
timeout = 1 * 60 * 60
if time.time() > start_time + timeout:
err = (_("Couldn't stop instance %(instance)s within "
"1 hour. Current vm_state: %(vm_state)s, "
"current task_state: %(task_state)s") %
{'instance': instance_uuid,
'vm_state': vm_state,
'task_state': instance.task_state})
raise exception.InternalError(message=err)
glance_uuid = instance.image_ref
ec2_image_id = ec2utils.glance_id_to_ec2_id(context, glance_uuid)
src_image = self._get_image(context, ec2_image_id)
image_meta = dict(src_image)
def _unmap_id_property(properties, name):
if properties[name]:
properties[name] = ec2utils.id_to_glance_id(context,
properties[name])
# ensure the ID properties are unmapped back to the glance UUID
_unmap_id_property(image_meta['properties'], 'kernel_id')
_unmap_id_property(image_meta['properties'], 'ramdisk_id')
# meaningful image name
name_map = dict(instance=instance_uuid, now=timeutils.isotime())
name = name or _('image of %(instance)s at %(now)s') % name_map
new_image = self.compute_api.snapshot_volume_backed(context,
instance,
image_meta,
name)
ec2_id = ec2utils.glance_id_to_ec2_id(context, new_image['id'])
if restart_instance:
self.compute_api.start(context, instance)
return {'imageId': ec2_id}
def create_tags(self, context, **kwargs):
"""Add tags to a resource
Returns True on success, error on failure.
:param context: context under which the method is called
"""
resources = kwargs.get('resource_id', None)
tags = kwargs.get('tag', None)
if resources is None or tags is None:
msg = _('resource_id and tag are required')
raise exception.MissingParameter(reason=msg)
if not isinstance(resources, (tuple, list, set)):
msg = _('Expecting a list of resources')
raise exception.InvalidParameterValue(message=msg)
for r in resources:
if ec2utils.resource_type_from_id(context, r) != 'instance':
msg = _('Only instances implemented')
raise exception.InvalidParameterValue(message=msg)
if not isinstance(tags, (tuple, list, set)):
msg = _('Expecting a list of tagSets')
raise exception.InvalidParameterValue(message=msg)
metadata = {}
for tag in tags:
if not isinstance(tag, dict):
err = _('Expecting tagSet to be key/value pairs')
raise exception.InvalidParameterValue(message=err)
key = tag.get('key', None)
val = tag.get('value', None)
if key is None or val is None:
err = _('Expecting both key and value to be set')
raise exception.InvalidParameterValue(message=err)
metadata[key] = val
for ec2_id in resources:
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid,
want_objects=True)
self.compute_api.update_instance_metadata(context,
instance, metadata)
return True
def delete_tags(self, context, **kwargs):
"""Delete tags
Returns True on success, error on failure.
:param context: context under which the method is called
"""
resources = kwargs.get('resource_id', None)
tags = kwargs.get('tag', None)
if resources is None or tags is None:
msg = _('resource_id and tag are required')
raise exception.MissingParameter(reason=msg)
if not isinstance(resources, (tuple, list, set)):
msg = _('Expecting a list of resources')
raise exception.InvalidParameterValue(message=msg)
for r in resources:
if ec2utils.resource_type_from_id(context, r) != 'instance':
msg = _('Only instances implemented')
raise exception.InvalidParameterValue(message=msg)
if not isinstance(tags, (tuple, list, set)):
msg = _('Expecting a list of tagSets')
raise exception.InvalidParameterValue(message=msg)
for ec2_id in resources:
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid,
want_objects=True)
for tag in tags:
if not isinstance(tag, dict):
msg = _('Expecting tagSet to be key/value pairs')
raise exception.InvalidParameterValue(message=msg)
key = tag.get('key', None)
if key is None:
msg = _('Expecting key to be set')
raise exception.InvalidParameterValue(message=msg)
self.compute_api.delete_instance_metadata(context,
instance, key)
return True
def describe_tags(self, context, **kwargs):
"""List tags
Returns a dict with a single key 'tagSet' on success, error on failure.
:param context: context under which the method is called
"""
filters = kwargs.get('filter', None)
search_filts = []
if filters:
for filter_block in filters:
key_name = filter_block.get('name', None)
val = filter_block.get('value', None)
if val:
if isinstance(val, dict):
val = val.values()
if not isinstance(val, (tuple, list, set)):
val = (val,)
if key_name:
search_block = {}
if key_name in ('resource_id', 'resource-id'):
search_block['resource_id'] = []
for res_id in val:
search_block['resource_id'].append(
ec2utils.ec2_inst_id_to_uuid(context, res_id))
elif key_name in ['key', 'value']:
search_block[key_name] = \
[ec2utils.regex_from_ec2_regex(v) for v in val]
elif key_name in ('resource_type', 'resource-type'):
for res_type in val:
if res_type != 'instance':
raise exception.InvalidParameterValue(
message=_('Only instances implemented'))
search_block[key_name] = 'instance'
if len(search_block.keys()) > 0:
search_filts.append(search_block)
ts = []
for tag in self.compute_api.get_all_instance_metadata(context,
search_filts):
ts.append({
'resource_id': ec2utils.id_to_ec2_inst_id(tag['instance_id']),
'resource_type': 'instance',
'key': tag['key'],
'value': tag['value']
})
return {"tagSet": ts}
class EC2SecurityGroupExceptions(object):
@staticmethod
def raise_invalid_property(msg):
raise exception.InvalidParameterValue(message=msg)
@staticmethod
def raise_group_already_exists(msg):
raise exception.SecurityGroupExists(message=msg)
@staticmethod
def raise_invalid_group(msg):
raise exception.InvalidGroup(reason=msg)
@staticmethod
def raise_invalid_cidr(cidr, decoding_exception=None):
if decoding_exception:
raise decoding_exception
else:
raise exception.InvalidParameterValue(message=_("Invalid CIDR"))
@staticmethod
def raise_over_quota(msg):
raise exception.SecurityGroupLimitExceeded(msg)
@staticmethod
def raise_not_found(msg):
pass
class CloudSecurityGroupNovaAPI(EC2SecurityGroupExceptions,
compute_api.SecurityGroupAPI):
pass
class CloudSecurityGroupNeutronAPI(EC2SecurityGroupExceptions,
neutron_driver.SecurityGroupAPI):
pass
def get_cloud_security_group_api():
if cfg.CONF.security_group_api.lower() == 'nova':
return CloudSecurityGroupNovaAPI()
elif cfg.CONF.security_group_api.lower() in ('neutron', 'quantum'):
return CloudSecurityGroupNeutronAPI()
else:
raise NotImplementedError()
|
apache-2.0
|
openstack/horizon
|
openstack_dashboard/test/unit/api/rest/test_utils.py
|
4
|
8967
|
# Copyright 2014, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from openstack_dashboard.api.rest import json_encoder
from openstack_dashboard.api.rest import utils
from openstack_dashboard.test import helpers as test
class RestUtilsTestCase(test.TestCase):
def test_api_success(self):
@utils.ajax()
def f(self, request):
return 'ok'
request = self.mock_rest_request()
response = f(None, request)
self.assertStatusCode(response, 200)
self.assertEqual("ok", response.json)
def test_api_success_no_auth_ok(self):
@utils.ajax(authenticated=False)
def f(self, request):
return 'ok'
request = self.mock_rest_request()
response = f(None, request)
self.assertStatusCode(response, 200)
self.assertEqual("ok", response.json)
def test_api_auth_required(self):
@utils.ajax()
def f(self, request):
return 'ok'
request = self.mock_rest_request(**{
'user.is_authenticated': False
})
response = f(None, request)
self.assertStatusCode(response, 401)
self.assertEqual("not logged in", response.json)
def test_api_success_204(self):
@utils.ajax()
def f(self, request):
pass
request = self.mock_rest_request()
response = f(None, request)
self.assertStatusCode(response, 204)
self.assertEqual(b'', response.content)
def test_api_error(self):
@utils.ajax()
def f(self, request):
raise utils.AjaxError(500, 'b0rk')
request = self.mock_rest_request()
response = f(None, request)
self.assertStatusCode(response, 500)
self.assertEqual("b0rk", response.json)
def test_api_malformed_json(self):
@utils.ajax()
def f(self, request):
assert False, "don't get here"
request = self.mock_rest_request(**{'body': 'spam'})
response = f(None, request)
self.assertStatusCode(response, 400)
self.assertIn(b'"malformed JSON request: ', response.content)
def test_api_not_found(self):
@utils.ajax()
def f(self, request):
raise utils.AjaxError(404, 'b0rk')
request = self.mock_rest_request()
response = f(None, request)
self.assertStatusCode(response, 404)
self.assertEqual("b0rk", response.json)
def test_data_required_with_no_data(self):
@utils.ajax(data_required=True)
def f(self, request):
assert False, "don't get here"
request = self.mock_rest_request()
response = f(None, request)
self.assertStatusCode(response, 400)
self.assertEqual("request requires JSON body", response.json)
def test_valid_data_required(self):
@utils.ajax(data_required=True)
def f(self, request):
return 'OK'
request = self.mock_rest_request(**{'body': '''
{"current": true, "update": true}
'''})
response = f(None, request)
self.assertStatusCode(response, 200)
self.assertEqual("OK", response.json)
def test_api_created_response(self):
@utils.ajax()
def f(self, request):
return utils.CreatedResponse('/api/spam/spam123')
request = self.mock_rest_request()
response = f(None, request)
self.assertStatusCode(response, 201)
self.assertEqual('/api/spam/spam123', response['location'])
self.assertEqual(b'', response.content)
def test_api_created_response_content(self):
@utils.ajax()
def f(self, request):
return utils.CreatedResponse('/api/spam/spam123', 'spam!')
request = self.mock_rest_request()
response = f(None, request)
self.assertStatusCode(response, 201)
self.assertEqual('/api/spam/spam123', response['location'])
self.assertEqual("spam!", response.json)
def test_parse_filters_keywords(self):
kwargs = {
'sort_dir': '1',
'sort_key': '2',
}
filters = {
'filter1': '1',
'filter2': '2',
}
# Combined
request_params = dict(kwargs)
request_params.update(filters)
request = self.mock_rest_request(**{'GET': dict(request_params)})
output_filters, output_kwargs = utils.parse_filters_kwargs(
request, kwargs)
self.assertDictEqual(kwargs, output_kwargs)
self.assertDictEqual(filters, output_filters)
# Empty Filters
request = self.mock_rest_request(**{'GET': dict(kwargs)})
output_filters, output_kwargs = utils.parse_filters_kwargs(
request, kwargs)
self.assertDictEqual(kwargs, output_kwargs)
self.assertDictEqual({}, output_filters)
# Empty keywords
request = self.mock_rest_request(**{'GET': dict(filters)})
output_filters, output_kwargs = utils.parse_filters_kwargs(
request)
self.assertDictEqual({}, output_kwargs)
self.assertDictEqual(filters, output_filters)
# Empty both
request = self.mock_rest_request(**{'GET': dict()})
output_filters, output_kwargs = utils.parse_filters_kwargs(
request)
self.assertDictEqual({}, output_kwargs)
self.assertDictEqual({}, output_filters)
class JSONEncoderTestCase(test.TestCase):
# NOTE(tsufiev): NaN numeric is "conventional" in a sense that the custom
# NaNJSONEncoder encoder translates it to the same token that the standard
# JSONEncoder encoder does
conventional_data = {'key1': 'string', 'key2': 10, 'key4': [1, 'some'],
'key5': {'subkey': 7}, 'nanKey': float('nan')}
data_nan = float('nan')
data_inf = float('inf')
data_neginf = -float('inf')
def test_custom_encoder_on_nan(self):
@utils.ajax(json_encoder=json_encoder.NaNJSONEncoder)
def f(self, request):
return self.data_nan
request = self.mock_rest_request()
response = f(self, request)
self.assertStatusCode(response, 200)
self.assertEqual('application/json', response['content-type'])
self.assertEqual(b'NaN', response.content)
def test_custom_encoder_on_infinity(self):
@utils.ajax(json_encoder=json_encoder.NaNJSONEncoder)
def f(self, request):
return self.data_inf
request = self.mock_rest_request()
response = f(self, request)
self.assertStatusCode(response, 200)
self.assertEqual('application/json', response['content-type'])
self.assertEqual(b'1e+999', response.content)
def test_custom_encoder_on_negative_infinity(self):
@utils.ajax(json_encoder=json_encoder.NaNJSONEncoder)
def f(self, request):
return self.data_neginf
request = self.mock_rest_request()
response = f(self, request)
self.assertStatusCode(response, 200)
self.assertEqual('application/json', response['content-type'])
self.assertEqual(b'-1e+999', response.content)
def test_custom_encoder_yields_standard_json_for_conventional_data(self):
@utils.ajax()
def f(self, request):
return self.conventional_data
@utils.ajax(json_encoder=json_encoder.NaNJSONEncoder)
def g(self, request):
return self.conventional_data
request = self.mock_rest_request()
default_encoder_response = f(self, request)
custom_encoder_response = g(self, request)
self.assertEqual(default_encoder_response.content,
custom_encoder_response.content)
def test_custom_encoder_yields_different_json_for_enhanced_data(self):
@utils.ajax()
def f(self, request):
return dict(tuple(self.conventional_data.items()) +
(('key3', self.data_inf),))
@utils.ajax(json_encoder=json_encoder.NaNJSONEncoder)
def g(self, request):
return dict(tuple(self.conventional_data.items()) +
(('key3', self.data_inf),))
request = self.mock_rest_request()
default_encoder_response = f(self, request)
custom_encoder_response = g(self, request)
self.assertNotEqual(default_encoder_response.content,
custom_encoder_response.content)
|
apache-2.0
|
sparkslabs/kamaelia
|
Code/Python/Kamaelia/Kamaelia/Visualisation/ER/ERLaws.py
|
6
|
4531
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
import Kamaelia.Visualisation
from Kamaelia.Visualisation.PhysicsGraph import TopologyViewerServer, BaseParticle
from Kamaelia.Support.Particles import SimpleLaws, MultipleLaws
from pygame.locals import *
_COMPONENT_RADIUS = 32
class AxonLaws(MultipleLaws):
def __init__(self, relationBondLength = 100):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
damp = 1.0 - 0.8
dampcutoff = 0.4
maxvel = 32
forceScaler = 1.0
entity_entity = SimpleLaws( bondLength = relationBondLength,
maxRepelRadius = 2.3 * relationBondLength,
repulsionStrength = 10.0 * forceScaler,
maxBondForce = 0.0 * forceScaler,
damp = damp,
dampcutoff = dampcutoff,
maxVelocity = maxvel
)
relation_relation = SimpleLaws( bondLength = relationBondLength,
maxRepelRadius = _COMPONENT_RADIUS * 2.0,
repulsionStrength = 1 * forceScaler,
maxBondForce = 3.0 * forceScaler,
damp = damp,
dampcutoff = dampcutoff,
maxVelocity = maxvel
)
entity_attribute = SimpleLaws( bondLength = _COMPONENT_RADIUS*2,
maxRepelRadius = _COMPONENT_RADIUS*2,
repulsionStrength = 2.0 * forceScaler,
maxBondForce = 10.0 * forceScaler,
damp = damp,
dampcutoff = dampcutoff,
maxVelocity = maxvel
)
entity_relation = SimpleLaws( bondLength = _COMPONENT_RADIUS*3,
maxRepelRadius = _COMPONENT_RADIUS*3,
repulsionStrength = 2.0 * forceScaler,
maxBondForce = 10.0 * forceScaler,
damp = damp,
dampcutoff = dampcutoff,
maxVelocity = maxvel
)
typesToLaws = { ("entity", "entity") : entity_entity,
("relation", "relation") : relation_relation,
("isa", "relation") : relation_relation,
("relation", "isa") : relation_relation,
("isa", "isa") : relation_relation,
("entity", "relation") : entity_relation,
("entity", "isa") : entity_relation,
("relation", "entity") : entity_relation,
("isa", "entity") : entity_relation,
("entity", "attribute") : entity_attribute,
}
super(AxonLaws, self).__init__( typesToLaws = typesToLaws,defaultLaw = entity_relation )
|
apache-2.0
|
zurcoin/zurcoin
|
qa/rpc-tests/rest.py
|
45
|
15163
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test REST interface
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from struct import *
from io import BytesIO
from codecs import encode
import http.client
import urllib.parse
def deser_uint256(f):
r = 0
for i in range(8):
t = unpack(b"<I", f.read(4))[0]
r += t << (i * 32)
return r
#allows simple http get calls
def http_get_call(host, port, path, response_object = 0):
conn = http.client.HTTPConnection(host, port)
conn.request('GET', path)
if response_object:
return conn.getresponse()
return conn.getresponse().read().decode('utf-8')
#allows simple http post calls with a request body
def http_post_call(host, port, path, requestdata = '', response_object = 0):
conn = http.client.HTTPConnection(host, port)
conn.request('POST', path, requestdata)
if response_object:
return conn.getresponse()
return conn.getresponse().read()
class RESTTest (BitcoinTestFramework):
FORMAT_SEPARATOR = "."
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test(self):
url = urllib.parse.urlparse(self.nodes[0].url)
print("Mining blocks...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[2].generate(100)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.nodes[2].generate(1)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1")) #balance now should be 0.1 on node 1
# load the latest 0.1 tx over the REST API
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
######################################
# GETUTXOS: query a unspent outpoint #
######################################
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is one utxo
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], 0.1)
################################################
# GETUTXOS: now query a already spent outpoint #
################################################
json_request = '/checkmempool/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is no utox in the response because this oupoint has been spent
assert_equal(len(json_obj['utxos']), 0)
#check bitmap
assert_equal(json_obj['bitmap'], "0")
##################################################
# GETUTXOS: now check both with the same request #
##################################################
json_request = '/checkmempool/'+txid+'-'+str(n)+'/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
#test binary response
bb_hash = self.nodes[0].getbestblockhash()
binaryRequest = b'\x01\x02'
binaryRequest += hex_str_to_bytes(txid)
binaryRequest += pack("i", n)
binaryRequest += hex_str_to_bytes(vintx)
binaryRequest += pack("i", 0)
bin_response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', binaryRequest)
output = BytesIO()
output.write(bin_response)
output.seek(0)
chainHeight = unpack("i", output.read(4))[0]
hashFromBinResponse = hex(deser_uint256(output))[2:].zfill(64)
assert_equal(bb_hash, hashFromBinResponse) #check if getutxo's chaintip during calculation was fine
assert_equal(chainHeight, 102) #chain height must be 102
############################
# GETUTXOS: mempool checks #
############################
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
json_request = '/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 0) #there should be a outpoint because it has just added to the mempool
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1) #there should be a outpoint because it has just added to the mempool
#do some invalid requests
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid json request
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', json_request, True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid bin request
response = http_post_call(url.hostname, url.port, '/rest/getutxos/checkmempool'+self.FORMAT_SEPARATOR+'bin', '', True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid bin request
#test limits
json_request = '/checkmempool/'
for x in range(0, 20):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 500) #must be a 500 because we exceeding the limits
json_request = '/checkmempool/'
for x in range(0, 15):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 200) #must be a 500 because we exceeding the limits
self.nodes[0].generate(1) #generate block to not affect upcoming tests
self.sync_all()
################
# /rest/block/ #
################
# check binary format
response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 80)
response_str = response.read()
# compare with block header
response_header = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response_header.status, 200)
assert_equal(int(response_header.getheader('content-length')), 80)
response_header_str = response_header.read()
assert_equal(response_str[0:80], response_header_str)
# check block hex format
response_hex = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response_hex.status, 200)
assert_greater_than(int(response_hex.getheader('content-length')), 160)
response_hex_str = response_hex.read()
assert_equal(encode(response_str, "hex_codec")[0:160], response_hex_str[0:160])
# compare with hex block header
response_header_hex = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response_header_hex.status, 200)
assert_greater_than(int(response_header_hex.getheader('content-length')), 160)
response_header_hex_str = response_header_hex.read()
assert_equal(response_hex_str[0:160], response_header_hex_str[0:160])
assert_equal(encode(response_header_str, "hex_codec")[0:160], response_header_hex_str[0:160])
# check json format
block_json_string = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+'json')
block_json_obj = json.loads(block_json_string)
assert_equal(block_json_obj['hash'], bb_hash)
# compare with json block header
response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
assert_equal(response_header_json.status, 200)
response_header_json_str = response_header_json.read().decode('utf-8')
json_obj = json.loads(response_header_json_str, parse_float=Decimal)
assert_equal(len(json_obj), 1) #ensure that there is one header in the json response
assert_equal(json_obj[0]['hash'], bb_hash) #request/response hash should be the same
#compare with normal RPC block response
rpc_block_json = self.nodes[0].getblock(bb_hash)
assert_equal(json_obj[0]['hash'], rpc_block_json['hash'])
assert_equal(json_obj[0]['confirmations'], rpc_block_json['confirmations'])
assert_equal(json_obj[0]['height'], rpc_block_json['height'])
assert_equal(json_obj[0]['version'], rpc_block_json['version'])
assert_equal(json_obj[0]['merkleroot'], rpc_block_json['merkleroot'])
assert_equal(json_obj[0]['time'], rpc_block_json['time'])
assert_equal(json_obj[0]['nonce'], rpc_block_json['nonce'])
assert_equal(json_obj[0]['bits'], rpc_block_json['bits'])
assert_equal(json_obj[0]['difficulty'], rpc_block_json['difficulty'])
assert_equal(json_obj[0]['chainwork'], rpc_block_json['chainwork'])
assert_equal(json_obj[0]['previousblockhash'], rpc_block_json['previousblockhash'])
#see if we can get 5 headers in one response
self.nodes[1].generate(5)
self.sync_all()
response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/5/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
assert_equal(response_header_json.status, 200)
response_header_json_str = response_header_json.read().decode('utf-8')
json_obj = json.loads(response_header_json_str)
assert_equal(len(json_obj), 5) #now we should have 5 header objects
# do tx test
tx_hash = block_json_obj['tx'][0]['txid']
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
assert_equal(json_obj['txid'], tx_hash)
# check hex format response
hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(hex_string.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check block tx details
# let's make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
self.sync_all()
# check that there are exactly 3 transactions in the TX memory pool before generating the block
json_string = http_get_call(url.hostname, url.port, '/rest/mempool/info'+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(json_obj['size'], 3)
# the size of the memory pool should be greater than 3x ~100 bytes
assert_greater_than(json_obj['bytes'], 300)
# check that there are our submitted transactions in the TX memory pool
json_string = http_get_call(url.hostname, url.port, '/rest/mempool/contents'+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj, True)
# now mine the transactions
newblockhash = self.nodes[1].generate(1)
self.sync_all()
#check if the 3 tx show up in the new block
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in json_obj['tx']:
if not 'coinbase' in tx['vin'][0]: #exclude coinbase
assert_equal(tx['txid'] in txs, True)
#check the same but without tx details
json_string = http_get_call(url.hostname, url.port, '/rest/block/notxdetails/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj['tx'], True)
#test rest bestblock
bb_hash = self.nodes[0].getbestblockhash()
json_string = http_get_call(url.hostname, url.port, '/rest/chaininfo.json')
json_obj = json.loads(json_string)
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest ().main ()
|
mit
|
yanheven/nova
|
nova/tests/unit/image/test_s3.py
|
69
|
11230
|
# Copyright 2011 Isaku Yamahata
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import binascii
import os
import tempfile
import eventlet
import fixtures
from mox3 import mox
from nova.api.ec2 import ec2utils
from nova import context
from nova import db
from nova import exception
from nova.image import s3
from nova import test
from nova.tests.unit.image import fake
ami_manifest_xml = """<?xml version="1.0" ?>
<manifest>
<version>2011-06-17</version>
<bundler>
<name>test-s3</name>
<version>0</version>
<release>0</release>
</bundler>
<machine_configuration>
<architecture>x86_64</architecture>
<block_device_mapping>
<mapping>
<virtual>ami</virtual>
<device>sda1</device>
</mapping>
<mapping>
<virtual>root</virtual>
<device>/dev/sda1</device>
</mapping>
<mapping>
<virtual>ephemeral0</virtual>
<device>sda2</device>
</mapping>
<mapping>
<virtual>swap</virtual>
<device>sda3</device>
</mapping>
</block_device_mapping>
<kernel_id>aki-00000001</kernel_id>
<ramdisk_id>ari-00000001</ramdisk_id>
</machine_configuration>
</manifest>
"""
file_manifest_xml = """<?xml version="1.0" ?>
<manifest>

</manifest>
"""
class TestS3ImageService(test.TestCase):
def setUp(self):
super(TestS3ImageService, self).setUp()
self.context = context.RequestContext(None, None)
self.useFixture(fixtures.FakeLogger('boto'))
# set up 3 fixtures to test shows, should have id '1', '2', and '3'
db.s3_image_create(self.context,
'155d900f-4e14-4e4c-a73d-069cbf4541e6')
db.s3_image_create(self.context,
'a2459075-d96c-40d5-893e-577ff92e721c')
db.s3_image_create(self.context,
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
fake.stub_out_image_service(self.stubs)
self.image_service = s3.S3ImageService()
ec2utils.reset_cache()
def tearDown(self):
super(TestS3ImageService, self).tearDown()
fake.FakeImageService_reset()
def _assertEqualList(self, list0, list1, keys):
self.assertEqual(len(list0), len(list1))
key = keys[0]
for x in list0:
self.assertEqual(len(x), len(keys))
self.assertIn(key, x)
for y in list1:
self.assertIn(key, y)
if x[key] == y[key]:
for k in keys:
self.assertEqual(x[k], y[k])
def test_show_cannot_use_uuid(self):
self.assertRaises(exception.ImageNotFound,
self.image_service.show, self.context,
'155d900f-4e14-4e4c-a73d-069cbf4541e6')
def test_show_translates_correctly(self):
self.image_service.show(self.context, '1')
def test_show_translates_image_state_correctly(self):
def my_fake_show(self, context, image_id, **kwargs):
fake_state_map = {
'155d900f-4e14-4e4c-a73d-069cbf4541e6': 'downloading',
'a2459075-d96c-40d5-893e-577ff92e721c': 'failed_decrypt',
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6': 'available'}
return {'id': image_id,
'name': 'fakeimage123456',
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'raw',
'disk_format': 'raw',
'size': '25165824',
'properties': {'image_state': fake_state_map[image_id]}}
# Override part of the fake image service as well just for
# this test so we can set the image_state to various values
# and test that S3ImageService does the correct mapping for
# us. We can't put fake bad or pending states in the real fake
# image service as it causes other tests to fail
self.stubs.Set(fake._FakeImageService, 'show', my_fake_show)
ret_image = self.image_service.show(self.context, '1')
self.assertEqual(ret_image['properties']['image_state'], 'pending')
ret_image = self.image_service.show(self.context, '2')
self.assertEqual(ret_image['properties']['image_state'], 'failed')
ret_image = self.image_service.show(self.context, '3')
self.assertEqual(ret_image['properties']['image_state'], 'available')
def test_detail(self):
self.image_service.detail(self.context)
def test_s3_create(self):
metadata = {'properties': {
'root_device_name': '/dev/sda1',
'block_device_mapping': [
{'device_name': '/dev/sda1',
'snapshot_id': 'snap-12345678',
'delete_on_termination': True},
{'device_name': '/dev/sda2',
'virtual_name': 'ephemeral0'},
{'device_name': '/dev/sdb0',
'no_device': True}]}}
_manifest, image, image_uuid = self.image_service._s3_parse_manifest(
self.context, metadata, ami_manifest_xml)
ret_image = self.image_service.show(self.context, image['id'])
self.assertIn('properties', ret_image)
properties = ret_image['properties']
self.assertIn('mappings', properties)
mappings = properties['mappings']
expected_mappings = [
{"device": "sda1", "virtual": "ami"},
{"device": "/dev/sda1", "virtual": "root"},
{"device": "sda2", "virtual": "ephemeral0"},
{"device": "sda3", "virtual": "swap"}]
self._assertEqualList(mappings, expected_mappings,
['device', 'virtual'])
self.assertIn('block_device_mapping', properties)
block_device_mapping = properties['block_device_mapping']
expected_bdm = [
{'device_name': '/dev/sda1',
'snapshot_id': 'snap-12345678',
'delete_on_termination': True},
{'device_name': '/dev/sda2',
'virtual_name': 'ephemeral0'},
{'device_name': '/dev/sdb0',
'no_device': True}]
self.assertEqual(block_device_mapping, expected_bdm)
def _initialize_mocks(self):
handle, tempf = tempfile.mkstemp(dir='/tmp')
ignore = mox.IgnoreArg()
mockobj = self.mox.CreateMockAnything()
self.stubs.Set(self.image_service, '_conn', mockobj)
mockobj(ignore).AndReturn(mockobj)
self.stubs.Set(mockobj, 'get_bucket', mockobj)
mockobj(ignore).AndReturn(mockobj)
self.stubs.Set(mockobj, 'get_key', mockobj)
mockobj(ignore).AndReturn(mockobj)
self.stubs.Set(mockobj, 'get_contents_as_string', mockobj)
mockobj().AndReturn(file_manifest_xml)
self.stubs.Set(self.image_service, '_download_file', mockobj)
mockobj(ignore, ignore, ignore).AndReturn(tempf)
self.stubs.Set(binascii, 'a2b_hex', mockobj)
mockobj(ignore).AndReturn('foo')
mockobj(ignore).AndReturn('foo')
self.stubs.Set(self.image_service, '_decrypt_image', mockobj)
mockobj(ignore, ignore, ignore, ignore, ignore).AndReturn(mockobj)
self.stubs.Set(self.image_service, '_untarzip_image', mockobj)
mockobj(ignore, ignore).AndReturn(tempf)
self.mox.ReplayAll()
def test_s3_create_image_locations(self):
image_location_1 = 'testbucket_1/test.img.manifest.xml'
# Use another location that starts with a '/'
image_location_2 = '/testbucket_2/test.img.manifest.xml'
metadata = [{'properties': {'image_location': image_location_1}},
{'properties': {'image_location': image_location_2}}]
for mdata in metadata:
self._initialize_mocks()
image = self.image_service._s3_create(self.context, mdata)
eventlet.sleep()
translated = self.image_service._translate_id_to_uuid(self.context,
image)
uuid = translated['id']
image_service = fake.FakeImageService()
updated_image = image_service.update(self.context, uuid,
{'properties': {'image_state': 'available'}},
purge_props=False)
self.assertEqual(updated_image['properties']['image_state'],
'available')
def test_s3_create_is_public(self):
self._initialize_mocks()
metadata = {'properties': {
'image_location': 'mybucket/my.img.manifest.xml'},
'name': 'mybucket/my.img'}
img = self.image_service._s3_create(self.context, metadata)
eventlet.sleep()
translated = self.image_service._translate_id_to_uuid(self.context,
img)
uuid = translated['id']
image_service = fake.FakeImageService()
updated_image = image_service.update(self.context, uuid,
{'is_public': True}, purge_props=False)
self.assertTrue(updated_image['is_public'])
self.assertEqual(updated_image['status'], 'active')
self.assertEqual(updated_image['properties']['image_state'],
'available')
def test_s3_malicious_tarballs(self):
self.assertRaises(exception.NovaException,
self.image_service._test_for_malicious_tarball,
"/unused", os.path.join(os.path.dirname(__file__), 'abs.tar.gz'))
self.assertRaises(exception.NovaException,
self.image_service._test_for_malicious_tarball,
"/unused", os.path.join(os.path.dirname(__file__), 'rel.tar.gz'))
|
apache-2.0
|
vnsofthe/odoo-dev
|
addons/gbsoft_weixin/WXBizMsgCrypt.py
|
6
|
10637
|
#!/usr/bin/env python
#-*- encoding:utf-8 -*-
""" 对公众平台发送给公众账号的消息加解密示例代码.
@copyright: Copyright (c) 1998-2014 Tencent Inc.
"""
# ------------------------------------------------------------------------
import base64
import string
import random
import hashlib
import time
import struct
from Crypto.Cipher import AES
import xml.etree.cElementTree as ET
import sys
import socket
reload(sys)
import ierror
sys.setdefaultencoding('utf-8')
"""
关于Crypto.Cipher模块,ImportError: No module named 'Crypto'解决方案
请到官方网站 https://www.dlitz.net/software/pycrypto/ 下载pycrypto。
下载后,按照README中的“Installation”小节的提示进行pycrypto安装。
"""
class FormatException(Exception):
pass
def throw_exception(message, exception_class=FormatException):
"""my define raise exception function"""
raise exception_class(message)
class SHA1:
"""计算公众平台的消息签名接口"""
def getSHA1(self, token, timestamp, nonce, encrypt):
"""用SHA1算法生成安全签名
@param token: 票据
@param timestamp: 时间戳
@param encrypt: 密文
@param nonce: 随机字符串
@return: 安全签名
"""
try:
sortlist = [token, timestamp, nonce, encrypt]
sortlist.sort()
sha = hashlib.sha1()
sha.update("".join(sortlist))
return ierror.WXBizMsgCrypt_OK, sha.hexdigest()
except Exception,e:
print e
return ierror.WXBizMsgCrypt_ComputeSignature_Error, None
class XMLParse:
"""提供提取消息格式中的密文及生成回复消息格式的接口"""
# xml消息模板
AES_TEXT_RESPONSE_TEMPLATE = """<xml>
<Encrypt><![CDATA[%(msg_encrypt)s]]></Encrypt>
<MsgSignature><![CDATA[%(msg_signaturet)s]]></MsgSignature>
<TimeStamp>%(timestamp)s</TimeStamp>
<Nonce><![CDATA[%(nonce)s]]></Nonce>
</xml>"""
def extract(self, xmltext):
"""提取出xml数据包中的加密消息
@param xmltext: 待提取的xml字符串
@return: 提取出的加密消息字符串
"""
try:
xml_tree = ET.fromstring(xmltext)
encrypt = xml_tree.find("Encrypt")
touser_name = xml_tree.find("ToUserName")
return ierror.WXBizMsgCrypt_OK, encrypt.text, touser_name.text
except Exception,e:
print e
return ierror.WXBizMsgCrypt_ParseXml_Error,None,None
def generate(self, encrypt, signature, timestamp, nonce):
"""生成xml消息
@param encrypt: 加密后的消息密文
@param signature: 安全签名
@param timestamp: 时间戳
@param nonce: 随机字符串
@return: 生成的xml字符串
"""
resp_dict = {
'msg_encrypt' : encrypt,
'msg_signaturet': signature,
'timestamp' : timestamp,
'nonce' : nonce,
}
resp_xml = self.AES_TEXT_RESPONSE_TEMPLATE % resp_dict
return resp_xml
class PKCS7Encoder():
"""提供基于PKCS7算法的加解密接口"""
block_size = 32
def encode(self, text):
""" 对需要加密的明文进行填充补位
@param text: 需要进行填充补位操作的明文
@return: 补齐明文字符串
"""
text_length = len(text)
# 计算需要填充的位数
amount_to_pad = self.block_size - (text_length % self.block_size)
if amount_to_pad == 0:
amount_to_pad = self.block_size
# 获得补位所用的字符
pad = chr(amount_to_pad)
return text + pad * amount_to_pad
def decode(self, decrypted):
"""删除解密后明文的补位字符
@param decrypted: 解密后的明文
@return: 删除补位字符后的明文
"""
pad = ord(decrypted[-1])
if pad<1 or pad >32:
pad = 0
return decrypted[:-pad]
class Prpcrypt(object):
"""提供接收和推送给公众平台消息的加解密接口"""
def __init__(self,key):
#self.key = base64.b64decode(key+"=")
self.key = key
# 设置加解密模式为AES的CBC模式
self.mode = AES.MODE_CBC
def encrypt(self,text,corpid):
"""对明文进行加密
@param text: 需要加密的明文
@return: 加密得到的字符串
"""
# 16位随机字符串添加到明文开头
text = self.get_random_str() + struct.pack("I",socket.htonl(len(text))) + text + corpid
# 使用自定义的填充方式对明文进行补位填充
pkcs7 = PKCS7Encoder()
text = pkcs7.encode(text)
# 加密
cryptor = AES.new(self.key,self.mode,self.key[:16])
try:
ciphertext = cryptor.encrypt(text)
# 使用BASE64对加密后的字符串进行编码
return ierror.WXBizMsgCrypt_OK, base64.b64encode(ciphertext)
except Exception,e:
print e
return ierror.WXBizMsgCrypt_EncryptAES_Error,None
def decrypt(self,text,corpid):
"""对解密后的明文进行补位删除
@param text: 密文
@return: 删除填充补位后的明文
"""
try:
cryptor = AES.new(self.key,self.mode,self.key[:16])
# 使用BASE64对密文进行解码,然后AES-CBC解密
plain_text = cryptor.decrypt(base64.b64decode(text))
except Exception,e:
print e
return ierror.WXBizMsgCrypt_DecryptAES_Error,None
try:
pad = ord(plain_text[-1])
# 去掉补位字符串
#pkcs7 = PKCS7Encoder()
#plain_text = pkcs7.encode(plain_text)
# 去除16位随机字符串
content = plain_text[16:-pad]
xml_len = socket.ntohl(struct.unpack("I",content[ : 4])[0])
xml_content = content[4 : xml_len+4]
from_corpid = content[xml_len+4:]
except Exception,e:
print e
return ierror.WXBizMsgCrypt_IllegalBuffer,None
if from_corpid != corpid:
return ierror.WXBizMsgCrypt_ValidateCorpid_Error,None
return 0,xml_content
def get_random_str(self):
""" 随机生成16位字符串
@return: 16位字符串
"""
rule = string.letters + string.digits
str = random.sample(rule, 16)
return "".join(str)
class WXBizMsgCrypt(object):
#构造函数
#@param sToken: 公众平台上,开发者设置的Token
# @param sEncodingAESKey: 公众平台上,开发者设置的EncodingAESKey
# @param sCorpId: 企业号的CorpId
def __init__(self,sToken,sEncodingAESKey,sCorpId):
try:
self.key = base64.b64decode(sEncodingAESKey+"=")
assert len(self.key) == 32
except:
throw_exception("[error]: EncodingAESKey unvalid !", FormatException)
#return ierror.WXBizMsgCrypt_IllegalAesKey)
self.m_sToken = sToken
self.m_sCorpid = sCorpId
#验证URL
#@param sMsgSignature: 签名串,对应URL参数的msg_signature
#@param sTimeStamp: 时间戳,对应URL参数的timestamp
#@param sNonce: 随机串,对应URL参数的nonce
#@param sEchoStr: 随机串,对应URL参数的echostr
#@param sReplyEchoStr: 解密之后的echostr,当return返回0时有效
#@return:成功0,失败返回对应的错误码
def VerifyURL(self, sMsgSignature, sTimeStamp, sNonce, sEchoStr):
sha1 = SHA1()
ret,signature = sha1.getSHA1(self.m_sToken, sTimeStamp, sNonce, sEchoStr)
if ret != 0:
return ret, None
if not signature == sMsgSignature:
return ierror.WXBizMsgCrypt_ValidateSignature_Error, None
pc = Prpcrypt(self.key)
ret,sReplyEchoStr = pc.decrypt(sEchoStr,self.m_sCorpid)
return ret,sReplyEchoStr
def EncryptMsg(self, sReplyMsg, sNonce, timestamp = None):
#将公众号回复用户的消息加密打包
#@param sReplyMsg: 企业号待回复用户的消息,xml格式的字符串
#@param sTimeStamp: 时间戳,可以自己生成,也可以用URL参数的timestamp,如为None则自动用当前时间
#@param sNonce: 随机串,可以自己生成,也可以用URL参数的nonce
#sEncryptMsg: 加密后的可以直接回复用户的密文,包括msg_signature, timestamp, nonce, encrypt的xml格式的字符串,
#return:成功0,sEncryptMsg,失败返回对应的错误码None
pc = Prpcrypt(self.key)
ret,encrypt = pc.encrypt(sReplyMsg, self.m_sCorpid)
if ret != 0:
return ret,None
if timestamp is None:
timestamp = str(int(time.time()))
# 生成安全签名
sha1 = SHA1()
ret,signature = sha1.getSHA1(self.m_sToken, timestamp, sNonce, encrypt)
if ret != 0:
return ret,None
xmlParse = XMLParse()
return ret,xmlParse.generate(encrypt, signature, timestamp, sNonce)
def DecryptMsg(self, sPostData, sMsgSignature, sTimeStamp, sNonce):
# 检验消息的真实性,并且获取解密后的明文
# @param sMsgSignature: 签名串,对应URL参数的msg_signature
# @param sTimeStamp: 时间戳,对应URL参数的timestamp
# @param sNonce: 随机串,对应URL参数的nonce
# @param sPostData: 密文,对应POST请求的数据
# xml_content: 解密后的原文,当return返回0时有效
# @return: 成功0,失败返回对应的错误码
# 验证安全签名
xmlParse = XMLParse()
ret,encrypt,touser_name = xmlParse.extract(sPostData)
if ret != 0:
return ret, None
sha1 = SHA1()
ret,signature = sha1.getSHA1(self.m_sToken, sTimeStamp, sNonce, encrypt)
if ret != 0:
return ret, None
if not signature == sMsgSignature:
return ierror.WXBizMsgCrypt_ValidateSignature_Error, None
pc = Prpcrypt(self.key)
ret,xml_content = pc.decrypt(encrypt,self.m_sCorpid)
return ret,xml_content
|
agpl-3.0
|
kapilt/ansible
|
test/units/playbook/test_play.py
|
61
|
4064
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.playbook.block import Block
from ansible.playbook.play import Play
from ansible.playbook.role import Role
from units.mock.loader import DictDataLoader
class TestPlay(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_empty_play(self):
p = Play.load(dict())
self.assertEqual(str(p), '')
def test_basic_play(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
connection='local',
remote_user="root",
sudo=True,
sudo_user="testing",
))
def test_play_with_user_conflict(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
user="testing",
gather_facts=False,
))
self.assertEqual(p.remote_user, "testing")
def test_play_with_user_conflict(self):
play_data = dict(
name="test play",
hosts=['foo'],
user="testing",
remote_user="testing",
)
self.assertRaises(AnsibleParserError, Play.load, play_data)
def test_play_with_tasks(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
tasks=[dict(action='shell echo "hello world"')],
))
def test_play_with_handlers(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
handlers=[dict(action='shell echo "hello world"')],
))
def test_play_with_pre_tasks(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
pre_tasks=[dict(action='shell echo "hello world"')],
))
def test_play_with_post_tasks(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
post_tasks=[dict(action='shell echo "hello world"')],
))
def test_play_with_roles(self):
fake_loader = DictDataLoader({
'/etc/ansible/roles/foo/tasks.yml': """
- name: role task
shell: echo "hello world"
""",
})
mock_var_manager = MagicMock()
mock_var_manager.get_vars.return_value = dict()
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
roles=['foo'],
), loader=fake_loader, variable_manager=mock_var_manager)
blocks = p.compile()
def test_play_compile(self):
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
tasks=[dict(action='shell echo "hello world"')],
))
blocks = p.compile()
# with a single block, there will still be three
# implicit meta flush_handler blocks inserted
self.assertEqual(len(blocks), 4)
|
gpl-3.0
|
xodus7/tensorflow
|
tensorflow/contrib/distributions/python/kernel_tests/sample_stats_test.py
|
24
|
18846
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Sample Stats Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import sample_stats
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import spectral_ops_test_util
from tensorflow.python.platform import test
rng = np.random.RandomState(0)
class _AutoCorrelationTest(object):
@property
def use_static_shape(self):
raise NotImplementedError("Subclass failed to implement `use_static_shape`")
@property
def dtype(self):
raise NotImplementedError("Subclass failed to implement `dtype`.")
def test_constant_sequence_axis_0_max_lags_none_center_false(self):
x_ = np.array([[0., 0., 0.],
[1., 1., 1.]]).astype(self.dtype)
x_ph = array_ops.placeholder_with_default(
input=x_,
shape=x_.shape if self.use_static_shape else None)
with spectral_ops_test_util.fft_kernel_label_map():
with self.cached_session() as sess:
# Setting normalize = True means we divide by zero.
auto_corr = sample_stats.auto_correlation(
x_ph, axis=1, center=False, normalize=False)
if self.use_static_shape:
self.assertEqual((2, 3), auto_corr.shape)
auto_corr_ = sess.run(auto_corr)
self.assertAllClose(
[[0., 0., 0.],
[1., 1., 1.]], auto_corr_)
def test_constant_sequence_axis_0_max_lags_none_center_true(self):
x_ = np.array([[0., 0., 0.],
[1., 1., 1.]]).astype(self.dtype)
x_ph = array_ops.placeholder_with_default(
input=x_,
shape=x_.shape if self.use_static_shape else None)
with spectral_ops_test_util.fft_kernel_label_map():
with self.cached_session() as sess:
# Setting normalize = True means we divide by zero.
auto_corr = sample_stats.auto_correlation(
x_ph, axis=1, normalize=False, center=True)
if self.use_static_shape:
self.assertEqual((2, 3), auto_corr.shape)
auto_corr_ = sess.run(auto_corr)
self.assertAllClose(
[[0., 0., 0.],
[0., 0., 0.]], auto_corr_)
def check_results_versus_brute_force(
self, x, axis, max_lags, center, normalize):
"""Compute auto-correlation by brute force, then compare to tf result."""
# Brute for auto-corr -- avoiding fft and transpositions.
axis_len = x.shape[axis]
if max_lags is None:
max_lags = axis_len - 1
else:
max_lags = min(axis_len - 1, max_lags)
auto_corr_at_lag = []
if center:
x -= x.mean(axis=axis, keepdims=True)
for m in range(max_lags + 1):
auto_corr_at_lag.append((
np.take(x, indices=range(0, axis_len - m), axis=axis) *
np.conj(np.take(x, indices=range(m, axis_len), axis=axis))
).mean(axis=axis, keepdims=True))
rxx = np.concatenate(auto_corr_at_lag, axis=axis)
if normalize:
rxx /= np.take(rxx, [0], axis=axis)
x_ph = array_ops.placeholder_with_default(
x, shape=x.shape if self.use_static_shape else None)
with spectral_ops_test_util.fft_kernel_label_map():
with self.cached_session():
auto_corr = sample_stats.auto_correlation(
x_ph, axis=axis, max_lags=max_lags, center=center,
normalize=normalize)
if self.use_static_shape:
output_shape = list(x.shape)
output_shape[axis] = max_lags + 1
self.assertAllEqual(output_shape, auto_corr.shape)
self.assertAllClose(rxx, auto_corr.eval(), rtol=1e-5, atol=1e-5)
def test_axis_n1_center_false_max_lags_none(self):
x = rng.randn(2, 3, 4).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(2, 3, 4).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=-1, max_lags=None, center=False, normalize=False)
def test_axis_n2_center_false_max_lags_none(self):
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=-2, max_lags=None, center=False, normalize=False)
def test_axis_n1_center_false_max_lags_none_normalize_true(self):
x = rng.randn(2, 3, 4).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(2, 3, 4).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=-1, max_lags=None, center=False, normalize=True)
def test_axis_n2_center_false_max_lags_none_normalize_true(self):
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=-2, max_lags=None, center=False, normalize=True)
def test_axis_0_center_true_max_lags_none(self):
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=0, max_lags=None, center=True, normalize=False)
def test_axis_2_center_true_max_lags_1(self):
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=2, max_lags=1, center=True, normalize=False)
def test_axis_2_center_true_max_lags_100(self):
# There are less than 100 elements in axis 2, so expect we get back an array
# the same size as x, despite having asked for 100 lags.
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=2, max_lags=100, center=True, normalize=False)
def test_long_orthonormal_sequence_has_corr_length_0(self):
l = 10000
x = rng.randn(l).astype(self.dtype)
x_ph = array_ops.placeholder_with_default(
x, shape=(l,) if self.use_static_shape else None)
with spectral_ops_test_util.fft_kernel_label_map():
with self.cached_session():
rxx = sample_stats.auto_correlation(
x_ph, max_lags=l // 2, center=True, normalize=False)
if self.use_static_shape:
self.assertAllEqual((l // 2 + 1,), rxx.shape)
rxx_ = rxx.eval()
# OSS CPU FFT has some accuracy issues is not the most accurate.
# So this tolerance is a bit bad.
self.assertAllClose(1., rxx_[0], rtol=0.05)
# The maximal error in the rest of the sequence is not great.
self.assertAllClose(np.zeros(l // 2), rxx_[1:], atol=0.1)
# The mean error in the rest is ok, actually 0.008 when I tested it.
self.assertLess(np.abs(rxx_[1:]).mean(), 0.02)
def test_step_function_sequence(self):
# x jumps to new random value every 10 steps. So correlation length = 10.
x = (rng.randint(-10, 10, size=(1000, 1))
* np.ones((1, 10))).ravel().astype(self.dtype)
x_ph = array_ops.placeholder_with_default(
x, shape=(1000 * 10,) if self.use_static_shape else None)
with spectral_ops_test_util.fft_kernel_label_map():
with self.cached_session():
rxx = sample_stats.auto_correlation(
x_ph, max_lags=1000 * 10 // 2, center=True, normalize=False)
if self.use_static_shape:
self.assertAllEqual((1000 * 10 // 2 + 1,), rxx.shape)
rxx_ = rxx.eval()
rxx_ /= rxx_[0]
# Expect positive correlation for the first 10 lags, then significantly
# smaller negative.
self.assertGreater(rxx_[:10].min(), 0)
self.assertGreater(rxx_[9], 5 * rxx_[10:20].mean())
# RXX should be decreasing for the first 10 lags.
diff = np.diff(rxx_)
self.assertLess(diff[:10].max(), 0)
def test_normalization(self):
l = 10000
x = 3 * rng.randn(l).astype(self.dtype)
x_ph = array_ops.placeholder_with_default(
x, shape=(l,) if self.use_static_shape else None)
with spectral_ops_test_util.fft_kernel_label_map():
with self.cached_session():
rxx = sample_stats.auto_correlation(
x_ph, max_lags=l // 2, center=True, normalize=True)
if self.use_static_shape:
self.assertAllEqual((l // 2 + 1,), rxx.shape)
rxx_ = rxx.eval()
# Note that RXX[0] = 1, despite the fact that E[X^2] = 9, and this is
# due to normalize=True.
# OSS CPU FFT has some accuracy issues is not the most accurate.
# So this tolerance is a bit bad.
self.assertAllClose(1., rxx_[0], rtol=0.05)
# The maximal error in the rest of the sequence is not great.
self.assertAllClose(np.zeros(l // 2), rxx_[1:], atol=0.1)
# The mean error in the rest is ok, actually 0.008 when I tested it.
self.assertLess(np.abs(rxx_[1:]).mean(), 0.02)
class AutoCorrelationTestStaticShapeFloat32(test.TestCase,
_AutoCorrelationTest):
@property
def dtype(self):
return np.float32
@property
def use_static_shape(self):
return True
class AutoCorrelationTestStaticShapeComplex64(test.TestCase,
_AutoCorrelationTest):
@property
def dtype(self):
return np.complex64
@property
def use_static_shape(self):
return True
class AutoCorrelationTestDynamicShapeFloat32(test.TestCase,
_AutoCorrelationTest):
@property
def dtype(self):
return np.float32
@property
def use_static_shape(self):
return False
class PercentileTestWithLowerInterpolation(test.TestCase):
_interpolation = "lower"
def test_one_dim_odd_input(self):
x = [1., 5., 3., 2., 4.]
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=0)
with self.cached_session():
pct = sample_stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_one_dim_even_input(self):
x = [1., 5., 3., 2., 4., 5.]
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
with self.cached_session():
pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_two_dim_odd_input_axis_0(self):
x = np.array([[-1., 50., -3.5, 2., -1], [0., 0., 3., 2., 4.]]).T
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=0)
with self.cached_session():
# Get dim 1 with negative and positive indices.
pct_neg_index = sample_stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
pct_pos_index = sample_stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
self.assertAllEqual((2,), pct_neg_index.get_shape())
self.assertAllEqual((2,), pct_pos_index.get_shape())
self.assertAllClose(expected_percentile, pct_neg_index.eval())
self.assertAllClose(expected_percentile, pct_pos_index.eval())
def test_two_dim_even_axis_0(self):
x = np.array([[1., 2., 4., 50.], [1., 2., -4., 5.]]).T
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=0)
with self.cached_session():
pct = sample_stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
self.assertAllEqual((2,), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_two_dim_even_input_and_keep_dims_true(self):
x = np.array([[1., 2., 4., 50.], [1., 2., -4., 5.]]).T
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, keepdims=True, axis=0)
with self.cached_session():
pct = sample_stats.percentile(
x,
q=q,
interpolation=self._interpolation,
keep_dims=True,
axis=[0])
self.assertAllEqual((1, 2), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_four_dimensional_input(self):
x = rng.rand(2, 3, 4, 5)
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x, q=0.77, interpolation=self._interpolation, axis=axis)
with self.cached_session():
pct = sample_stats.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis)
self.assertAllEqual(expected_percentile.shape, pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_four_dimensional_input_and_keepdims(self):
x = rng.rand(2, 3, 4, 5)
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keepdims=True)
with self.cached_session():
pct = sample_stats.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keep_dims=True)
self.assertAllEqual(expected_percentile.shape, pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_four_dimensional_input_x_static_ndims_but_dynamic_sizes(self):
x = rng.rand(2, 3, 4, 5)
x_ph = array_ops.placeholder(dtypes.float64, shape=[None, None, None, None])
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x, q=0.77, interpolation=self._interpolation, axis=axis)
with self.cached_session():
pct = sample_stats.percentile(
x_ph,
q=0.77,
interpolation=self._interpolation,
axis=axis)
self.assertAllClose(expected_percentile, pct.eval(feed_dict={x_ph: x}))
def test_four_dimensional_input_and_keepdims_x_static_ndims_dynamic_sz(self):
x = rng.rand(2, 3, 4, 5)
x_ph = array_ops.placeholder(dtypes.float64, shape=[None, None, None, None])
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keepdims=True)
with self.cached_session():
pct = sample_stats.percentile(
x_ph,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keep_dims=True)
self.assertAllClose(expected_percentile, pct.eval(feed_dict={x_ph: x}))
def test_with_integer_dtype(self):
x = [1, 5, 3, 2, 4]
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
with self.cached_session():
pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertEqual(dtypes.int32, pct.dtype)
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
class PercentileTestWithHigherInterpolation(
PercentileTestWithLowerInterpolation):
_interpolation = "higher"
class PercentileTestWithNearestInterpolation(test.TestCase):
"""Test separately because np.round and tf.round make different choices."""
_interpolation = "nearest"
def test_one_dim_odd_input(self):
x = [1., 5., 3., 2., 4.]
for q in [0, 10.1, 25.1, 49.9, 50.1, 50.01, 89, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
with self.cached_session():
pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_one_dim_even_input(self):
x = [1., 5., 3., 2., 4., 5.]
for q in [0, 10.1, 25.1, 49.9, 50.1, 50.01, 89, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
with self.cached_session():
pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_invalid_interpolation_raises(self):
x = [1., 5., 3., 2., 4.]
with self.assertRaisesRegexp(ValueError, "interpolation"):
sample_stats.percentile(x, q=0.5, interpolation="bad")
def test_vector_q_raises_static(self):
x = [1., 5., 3., 2., 4.]
with self.assertRaisesRegexp(ValueError, "Expected.*ndims"):
sample_stats.percentile(x, q=[0.5])
def test_vector_q_raises_dynamic(self):
x = [1., 5., 3., 2., 4.]
q_ph = array_ops.placeholder(dtypes.float32)
pct = sample_stats.percentile(x, q=q_ph, validate_args=True)
with self.cached_session():
with self.assertRaisesOpError("rank"):
pct.eval(feed_dict={q_ph: [0.5]})
def test_finds_max_of_long_array(self):
# d - 1 == d in float32 and d = 3e7.
# So this test only passes if we use double for the percentile indices.
# If float is used, it fails with InvalidArgumentError about an index out of
# bounds.
x = math_ops.linspace(0., 3e7, num=int(3e7))
with self.cached_session():
minval = sample_stats.percentile(x, q=0, validate_args=True)
self.assertAllEqual(0, minval.eval())
if __name__ == "__main__":
test.main()
|
apache-2.0
|
MobinRanjbar/hue
|
desktop/core/ext-py/requests-2.6.0/requests/__init__.py
|
151
|
1861
|
# -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('https://www.python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post('http://httpbin.org/post', data=payload)
>>> print(r.text)
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2015 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '2.6.0'
__build__ = 0x020503
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2015 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
try:
from .packages.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
except ImportError:
pass
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
|
apache-2.0
|
unseenlaser/machinekit
|
src/emc/usr_intf/gscreen/mdi.py
|
36
|
13854
|
# Touchy is Copyright (c) 2009 Chris Radek <[email protected]>
#
# Touchy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Touchy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# self.mcodes = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 30, 48, 49, 50, 51,
# 52, 53, 60, 61, 62, 63, 64, 65, 66, 67, 68)
#
# self.gcodes = (0, 10, 20, 30, 40, 50, 51, 52, 53, 70, 80, 100,
# 170, 171, 180, 181, 190, 191, 200, 210, 280, 281,
# 300, 301, 330, 331, 382, 383, 384, 385, 400, 410,
# 411, 420, 421, 430, 431, 490, 530, 540, 550, 560,
# 570, 580, 590, 591, 592, 593, 610, 611, 640, 730,
# 760, 800, 810, 820, 830, 840, 850, 860, 870, 880,
# 890, 900, 901, 910, 911, 920, 921, 922, 923, 930,
# 940, 950, 960, 970, 980, 990)
class mdi:
def __init__(self, emc):
self.clear()
self.emc = emc
self.emcstat = emc.stat()
self.emccommand = emc.command()
self.emcstat.poll()
am = self.emcstat.axis_mask
self.axes = []
self.polar = 0
axisnames = ['X', 'Y', 'Z', 'A', 'B', 'C', 'U', 'V', 'W']
for i in range(9):
if am & (1<<i):
self.axes.append(axisnames[i])
self.gcode = 'M2'
self.codes = {
'M3' : [_('Spindle CW'), 'S'],
'M4' : [_('Spindle CCW'), 'S'],
'M6' : [_('Tool change'), 'T'],
'M66' : [_('Input control'), 'P', 'E', 'L', 'Q'],
# 'A' means 'the axes'
'G0' : [_('Straight rapid'), 'A'],
'G00' : [_('Straight rapid'), 'A'],
'G1' : [_('Straight feed'), 'A', 'F'],
'G01' : [_('Straight feed'), 'A', 'F'],
'G2' : [_('Arc CW'), 'A', 'I', 'J', 'K', 'R', 'F'],
'G02' : [_('Arc CW'), 'A', 'I', 'J', 'K', 'R', 'F'],
'G3' : [_('Arc CCW'), 'A', 'I', 'J', 'K', 'R', 'F'],
'G03' : [_('Arc CCW'), 'A', 'I', 'J', 'K', 'R', 'F'],
'G4' : [_('Dwell'), 'P'],
'G04' : [_('Dwell'), 'P'],
'G10' : [_('Setup'), 'L', 'P', 'A', 'Q', 'R'],
'G33' : [_('Spindle synchronized feed'), 'A', 'K'],
'G33.1' : [_('Rigid tap'), 'Z', 'K'],
'G38.2' : [_('Probe'), 'A', 'F'],
'G38.3' : [_('Probe'), 'A', 'F'],
'G38.4' : [_('Probe'), 'A', 'F'],
'G38.5' : [_('Probe'), 'A', 'F'],
'G41' : [_('Radius compensation left'), 'D'],
'G42' : [_('Radius compensation right'), 'D'],
'G41.1' : [_('Radius compensation left, immediate'), 'D', 'L'],
'G42.1' : [_('Radius compensation right, immediate'), 'D', 'L'],
'G43' : [_('Tool length offset'), 'H'],
'G43.1' : [_('Tool length offset immediate'), 'I', 'K'],
'G53' : [_('Motion in unoffset coordinates'), 'G', 'A', 'F'],
'G64' : [_('Continuous mode'), 'P'],
'G76' : [_('Thread'), 'Z', 'P', 'I', 'J', 'K', 'R', 'Q', 'H', 'E', 'L'],
'G81' : [_('Drill'), 'A', 'R', 'L', 'F'],
'G82' : [_('Drill with dwell'), 'A', 'R', 'L', 'P', 'F'],
'G83' : [_('Peck drill'), 'A', 'R', 'L', 'Q', 'F'],
'G73' : [_('Chip-break drill'), 'A', 'R', 'L', 'Q', 'F'],
'G85' : [_('Bore'), 'A', 'R', 'L', 'F'],
'G89' : [_('Bore with dwell'), 'A', 'R', 'L', 'P', 'F'],
'G92' : [_('Offset all coordinate systems'), 'A'],
'G96' : [_('CSS Mode'), 'S', 'D'],
}
self.ocodes = []
def add_macros(self, macros):
for m in macros:
words = m.split()
call = "O<%s> call" % words[0]
args = [''] + [w + ' ' for w in words[1:]]
self.ocodes.append(call)
self.codes[call] = args
def get_description(self, gcode):
return self.codes[gcode][0]
def get_words(self, gcode):
self.gcode = gcode
if gcode[0] == 'M' and gcode.find(".") == -1 and int(gcode[1:]) >= 100 and int(gcode[1:]) <= 199:
return ['P', 'Q']
if not self.codes.has_key(gcode):
return []
# strip description
words = self.codes[gcode][1:]
# replace A with the real axis names
if 'A' in words:
i = words.index('A')
words = words[:i] + self.axes + words[i+1:]
if self.polar and 'X' in self.axes and 'Y' in self.axes:
words[self.axes.index('X')] = '@'
words[self.axes.index('Y')] = '^'
return words
def clear(self):
self.words = {}
def set_word(self, word, value):
self.words[word] = value
def set_polar(self, p):
self.polar = p;
def issue(self):
m = self.gcode
if m.lower().startswith('o'):
codes = self.codes[m]
for code in self.codes[m][1:]:
v = self.words[code] or "0"
m = m + " [%s]" % v
else:
w = [i for i in self.words if len(self.words.get(i)) > 0]
if '@' in w:
m += '@' + self.words.get('@')
w.remove('@')
if '^' in w:
m += '^' + self.words.get('^')
w.remove('^')
for i in w:
if len(self.words.get(i)) > 0:
m += i + self.words.get(i)
self.emcstat.poll()
if self.emcstat.task_mode != self.emc.MODE_MDI:
self.emccommand.mode(self.emc.MODE_MDI)
self.emccommand.wait_complete()
self.emccommand.mdi(m)
def set_tool_touchoff(self,tool,axis,value):
m = "G10 L10 P%d %s%f"%(tool,axis,value)
self.emcstat.poll()
if self.emcstat.task_mode != self.emc.MODE_MDI:
self.emccommand.mode(self.emc.MODE_MDI)
self.emccommand.wait_complete()
self.emccommand.mdi(m)
self.emccommand.wait_complete()
self.emccommand.mdi("g43")
def set_axis_origin(self,axis,value):
m = "G10 L20 P0 %s%f"%(axis,value)
self.emcstat.poll()
if self.emcstat.task_mode != self.emc.MODE_MDI:
self.emccommand.mode(self.emc.MODE_MDI)
self.emccommand.wait_complete()
self.emccommand.mdi(m)
def go_to_position(self,axis,position,feedrate):
m = "G1 %s %f F%f"%(axis,position,feedrate)
self.emcstat.poll()
if self.emcstat.task_mode != self.emc.MODE_MDI:
self.emccommand.mode(self.emc.MODE_MDI)
self.emccommand.wait_complete()
self.emccommand.mdi(m)
def set_spindle_speed(self,value):
m = "s %f"%(value)
self.emcstat.poll()
if self.emcstat.task_mode != self.emc.MODE_MDI:
self.emccommand.mode(self.emc.MODE_MDI)
self.emccommand.wait_complete()
self.emccommand.mdi(m)
def set_user_system(self,value):
m = "g %f"%(value)
self.emcstat.poll()
if self.emcstat.task_mode != self.emc.MODE_MDI:
self.emccommand.mode(self.emc.MODE_MDI)
self.emccommand.wait_complete()
self.emccommand.mdi(m)
def index_tool(self,toolnumber):
m = "T %f M6"%(toolnumber)
self.emcstat.poll()
if self.emcstat.task_mode != self.emc.MODE_MDI:
self.emccommand.mode(self.emc.MODE_MDI)
self.emccommand.wait_complete()
self.emccommand.mdi("m6 T %f"%(toolnumber))
self.emccommand.mdi("g43 h%f"%(toolnumber))
def arbitrary_mdi(self,command):
if self.emcstat.task_mode != self.emc.MODE_MDI:
self.emccommand.mode(self.emc.MODE_MDI)
self.emccommand.wait_complete()
self.emccommand.mdi(command)
class mdi_control:
def __init__(self, gtk, emc, labels, eventboxes):
self.labels = labels
self.eventboxes = eventboxes
self.numlabels = len(labels)
self.numwords = 1
self.selected = 0
self.gtk = gtk
self.mdi = mdi(emc)
#for i in range(self.numlabels):
# self.not_editing(i)
#self.editing(self.selected)
#self.set_text("G")
def mdi_is_reading(self):
self.mdi.emcstat.poll()
if self.mdi.emcstat.interp_state == self.mdi.emc.INTERP_READING:
return True
return False
def set_mdi_mode(self):
self.mdi.emcstat.poll()
if self.mdi.emcstat.task_mode != self.mdi.emc.MODE_MDI:
self.mdi.emccommand.mode(self.mdi.emc.MODE_MDI)
self.mdi.emccommand.wait_complete()
def set_axis(self,axis,value):
premode = self.mdi.emcstat.task_mode
self.mdi.set_axis_origin(axis,value)
self.mdi.emccommand.mode(premode)
self.mdi.emccommand.wait_complete()
def touchoff(self,tool,axis,value):
premode = self.mdi.emcstat.task_mode
self.mdi.set_tool_touchoff(tool,axis,value)
self.mdi.emccommand.mode(premode)
self.mdi.emccommand.wait_complete()
def set_spindle_speed(self,value):
self.mdi.set_spindle_speed(value)
def go_to_position(self,axis,position,feedrate):
self.mdi.go_to_position(axis,position,feedrate)
def set_user_system(self,system):
print "set user system to :G",system
premode = self.mdi.emcstat.task_mode
self.mdi.set_user_system(system)
self.mdi.emccommand.mode(premode)
self.mdi.emccommand.wait_complete()
def index_tool(self,toolnumber):
print "set tool number to :T",toolnumber
premode = self.mdi.emcstat.task_mode
self.mdi.index_tool(toolnumber)
#self.mdi.emccommand.mode(premode)
#self.mdi.emccommand.wait_complete()
def user_command(self,command):
premode = self.mdi.emcstat.task_mode
self.mdi.arbitrary_mdi(command)
#self.mdi.emccommand.mode(premode)
#self.mdi.emccommand.wait_complete()
def not_editing(self, n):
e = self.eventboxes[n]
e.modify_bg(self.gtk.STATE_NORMAL, self.gtk.gdk.color_parse("#ccc"))
def editing(self, n):
self.not_editing(self.selected)
self.selected = n
e = self.eventboxes[n]
e.modify_bg(self.gtk.STATE_NORMAL, self.gtk.gdk.color_parse("#fff"))
def get_text(self):
w = self.labels[self.selected]
return w.get_text()
def set_text(self, t, n = -1):
if n == -1: n = self.selected
w = self.labels[n]
w.set_text(t)
if n > 0:
head = t.rstrip("0123456789.-")
tail = t[len(head):]
self.mdi.set_word(head, tail)
if len(t) < 2:
w.set_alignment(1.0, 0.5)
else:
w.set_alignment(0.0, 0.5)
def clear(self, b):
t = self.get_text()
self.set_text(t.rstrip("0123456789.-"))
def back(self, b):
t = self.get_text()
if t[-1:] in "0123456789.-":
self.set_text(t[:-1])
def fill_out(self):
if self.selected == 0:
w = self.mdi.get_words(self.get_text())
self.numwords = len(w)
for i in range(1,self.numlabels):
if i <= len(w):
self.set_text(w[i-1], i)
else:
self.set_text("", i)
def next(self, b):
self.fill_out();
if self.numwords > 0:
self.editing(max(1,(self.selected+1) % (self.numwords+1)))
def ok(self, b):
self.fill_out();
self.mdi.issue()
def decimal(self, b):
t = self.get_text()
if t.find(".") == -1:
self.set_text(t + ".")
def minus(self, b):
t = self.get_text()
if self.selected > 0:
head = t.rstrip("0123456789.-")
tail = t[len(head):]
if tail.find("-") == -1:
self.set_text(head + "-" + tail)
else:
self.set_text(head + tail[1:])
def keypad(self, b):
t = self.get_text()
num = b.get_name()
self.set_text(t + num)
def gp(self, b):
self.g(b, "G", 1)
def g(self, b, code="G", polar=0):
self.mdi.set_polar(polar)
self.set_text(code, 0)
for i in range(1, self.numlabels):
self.set_text("", i)
self.editing(0)
self.mdi.clear()
def m(self, b):
self.g(b, "M")
def t(self, b):
self.g(b, "T")
def o(self, b):
old_code = self.labels[0].get_text()
ocodes = self.mdi.ocodes
if old_code in ocodes:
j = (ocodes.index(old_code) + 1) % len(ocodes)
else:
j = 0
self.g(b, ocodes[j])
self.next(b)
def select(self, eventbox, event):
n = int(eventbox.get_name()[12:])
if self.selected == 0:
self.fill_out()
if n <= self.numwords:
self.editing(n)
def set_tool(self, tool, g10l11):
self.g(0)
self.set_text("G10", 0)
self.next(0)
if g10l11:
self.set_text("L11", 1)
else:
self.set_text("L10", 1)
self.next(0)
self.set_text("P%d" % tool, 2)
self.next(0)
self.next(0)
self.next(0)
def set_origin(self, system):
self.g(0)
self.set_text("G10", 0)
self.next(0)
self.set_text("L20", 1)
self.next(0)
self.set_text("P%d" % system, 2)
self.next(0)
|
lgpl-2.1
|
tseaver/google-cloud-python
|
tasks/google/cloud/tasks_v2beta2/gapic/cloud_tasks_client_config.py
|
4
|
4855
|
config = {
"interfaces": {
"google.cloud.tasks.v2beta2.CloudTasks": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": [],
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 20000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 20000,
"total_timeout_millis": 600000,
}
},
"methods": {
"ListQueues": {
"timeout_millis": 15000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"GetQueue": {
"timeout_millis": 10000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"CreateQueue": {
"timeout_millis": 10000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"UpdateQueue": {
"timeout_millis": 10000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"DeleteQueue": {
"timeout_millis": 10000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"PurgeQueue": {
"timeout_millis": 10000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"PauseQueue": {
"timeout_millis": 10000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"ResumeQueue": {
"timeout_millis": 10000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"GetIamPolicy": {
"timeout_millis": 10000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"SetIamPolicy": {
"timeout_millis": 10000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"TestIamPermissions": {
"timeout_millis": 10000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"ListTasks": {
"timeout_millis": 10000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"GetTask": {
"timeout_millis": 10000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"CreateTask": {
"timeout_millis": 10000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"DeleteTask": {
"timeout_millis": 10000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"LeaseTasks": {
"timeout_millis": 10000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"AcknowledgeTask": {
"timeout_millis": 10000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"RenewLease": {
"timeout_millis": 10000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"CancelLease": {
"timeout_millis": 10000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"RunTask": {
"timeout_millis": 10000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
},
}
}
}
|
apache-2.0
|
AntonKhorev/spb-budget-xls
|
src/testRecord.py
|
1
|
23498
|
#!/usr/bin/env python3
import unittest
import record,number
class TestRecordBuilder(unittest.TestCase):
def setUp(self):
self.text1=[
"1. АДМИНИСТРАЦИЯ ГУБЕРНАТОРА САНКТ- 2 114 774,1",
"ПЕТЕРБУРГА (801)",
"1.1. Расходы на содержание главы Правительства 0102 0010008 2 026,0",
"Санкт-Петербурга",
"1.1.1. Выполнение функций государственными органами 0102 0010008 012 2 026,0",
"1.2. Содержание исполнительного органа 0114 0010009 984 695,5",
]
self.lr1=record.RecordBuilder(1)
self.lr2=record.RecordBuilder(2)
def doTestName(self,lr,lines,name):
rows=[None]
for i in range(len(lines)-1):
lines[i+1]=lr.read(rows,lines[i],lines[i+1])
self.assertEqual(rows[-1]['name'],name)
def test1(self):
rows=[None]
nextLine=self.lr1.read(rows,self.text1[0],self.text1[1])
self.assertEqual(nextLine,self.text1[1])
self.assertEqual(rows,[None,
{'number':'1.','name':'АДМИНИСТРАЦИЯ ГУБЕРНАТОРА САНКТ-','amounts':[21147741]},
])
def test1next(self):
rows=[None,
{'number':'1.','name':'АДМИНИСТРАЦИЯ ГУБЕРНАТОРА САНКТ-','amounts':[21147741]},
]
nextLine=self.lr1.read(rows,self.text1[1],self.text1[2])
self.assertEqual(nextLine,self.text1[2])
self.assertEqual(rows,[None,
{'number':'1.','name':'АДМИНИСТРАЦИЯ ГУБЕРНАТОРА САНКТ-ПЕТЕРБУРГА (801)','amounts':[21147741]},
])
def test11(self):
rows=[None,
{'number':'1.','name':'АДМИНИСТРАЦИЯ ГУБЕРНАТОРА САНКТ-ПЕТЕРБУРГА (801)','amounts':[21147741]},
]
nextLine=self.lr1.read(rows,self.text1[2],self.text1[3])
self.assertEqual(nextLine,self.text1[3])
self.assertEqual(rows,[None,
{'number':'1.','name':'АДМИНИСТРАЦИЯ ГУБЕРНАТОРА САНКТ-ПЕТЕРБУРГА (801)','amounts':[21147741]},
{'number':'1.1.','name':'Расходы на содержание главы Правительства','section':'0102','article':'0010008','amounts':[20260]},
])
def test11next(self):
rows=[None,
{'number':'1.','name':'АДМИНИСТРАЦИЯ ГУБЕРНАТОРА САНКТ-ПЕТЕРБУРГА (801)','amounts':[21147741]},
{'number':'1.1.','name':'Расходы на содержание главы Правительства','section':'0102','article':'0010008','amounts':[20260]},
]
nextLine=self.lr1.read(rows,self.text1[3],self.text1[4])
self.assertEqual(nextLine,self.text1[4])
self.assertEqual(rows,[None,
{'number':'1.','name':'АДМИНИСТРАЦИЯ ГУБЕРНАТОРА САНКТ-ПЕТЕРБУРГА (801)','amounts':[21147741]},
{'number':'1.1.','name':'Расходы на содержание главы Правительства Санкт-Петербурга','section':'0102','article':'0010008','amounts':[20260]},
])
def test111(self):
rows=[None,
{'number':'1.','name':'АДМИНИСТРАЦИЯ ГУБЕРНАТОРА САНКТ-ПЕТЕРБУРГА (801)','amounts':[21147741]},
{'number':'1.1.','name':'Расходы на содержание главы Правительства Санкт-Петербурга','section':'0102','article':'0010008','amounts':[20260]},
]
nextLine=self.lr1.read(rows,self.text1[4],self.text1[5])
self.assertEqual(nextLine,self.text1[5])
self.assertEqual(rows,[None,
{'number':'1.','name':'АДМИНИСТРАЦИЯ ГУБЕРНАТОРА САНКТ-ПЕТЕРБУРГА (801)','amounts':[21147741]},
{'number':'1.1.','name':'Расходы на содержание главы Правительства Санкт-Петербурга','section':'0102','article':'0010008','amounts':[20260]},
{'number':'1.1.1.','name':'Выполнение функций государственными органами','section':'0102','article':'0010008','type':'012','amounts':[20260]},
])
def testNumberPartOnNextLine(self):
rows=[None]
nextLine=self.lr2.read(rows,
'12.12. Выполнение функций государственными 0707 4320024 012 -668 622.3 -670 998.3',
'1. органами'
)
self.assertEqual(nextLine,'органами')
self.assertEqual(rows,[None,
{'number':'12.12.1.','name':'Выполнение функций государственными','section':'0707','article':'4320024','type':'012','amounts':[-6686223,-6709983]}
])
def testSpacesAroundQuote(self):
rows=[None]
line='63.18. Расходы на выполнение мероприятий по 0501 3500910 808.0'
nextLine='обследованию и сносу " деревьев-угроз" , '
line=self.lr1.read(rows,line,nextLine)
self.assertEqual(line,nextLine)
self.assertEqual(rows,[None,
{'number':'63.18.','name':'Расходы на выполнение мероприятий по','section':'0501','article':'3500910','amounts':[8080]},
])
nextLine='находящихся на придомовой территории, не '
line=self.lr1.read(rows,line,nextLine)
self.assertEqual(line,nextLine)
self.assertEqual(rows,[None,
{'number':'63.18.','name':'Расходы на выполнение мероприятий по обследованию и сносу "деревьев-угроз",','section':'0501','article':'3500910','amounts':[8080]},
])
def testDoubleSpace(self):
rows=[None]
line="1.3. Расходы на материальное обеспечение 0103 0011201 149.0"
nextLine="деятельности членов Совета Федерации и их "
line=self.lr1.read(rows,line,nextLine)
self.assertEqual(line,nextLine)
self.assertEqual(rows,[None,
{'number':'1.3.','name':'Расходы на материальное обеспечение','section':'0103','article':'0011201','amounts':[1490]},
])
nextLine="помощников за счет средств федерального "
line=self.lr1.read(rows,line,nextLine)
self.assertEqual(line,nextLine)
self.assertEqual(rows,[None,
{'number':'1.3.','name':'Расходы на материальное обеспечение деятельности членов Совета Федерации и их','section':'0103','article':'0011201','amounts':[1490]},
])
def testYPrirodopolzovanie(self):
rows=[None]
line='24. КОМИТЕТ ПО 1 963 666.1 1 467 692.3'
nextLine='ПРИРОДОПОЛЬЗОВАНИЮ, ОХРАНЕ '
line=self.lr2.read(rows,line,nextLine)
self.assertEqual(line,nextLine)
self.assertEqual(rows,[None,
{'number':'24.','name':'КОМИТЕТ ПО','amounts':[19636661,14676923]},
])
nextLine='ОКРУЖАЮЩЕЙ СРЕДЫ И '
line=self.lr2.read(rows,line,nextLine)
self.assertEqual(line,nextLine)
self.assertEqual(rows,[None,
{'number':'24.','name':'КОМИТЕТ ПО ПРИРОДОПОЛЬЗОВАНИЮ, ОХРАНЕ','amounts':[19636661,14676923]},
])
nextLine='ОБЕСПЕЧЕНИЮ ЭКОЛОГИЧЕСКОЙ '
line=self.lr2.read(rows,line,nextLine)
self.assertEqual(line,nextLine)
self.assertEqual(rows,[None,
{'number':'24.','name':'КОМИТЕТ ПО ПРИРОДОПОЛЬЗОВАНИЮ, ОХРАНЕ ОКРУЖАЮЩЕЙ СРЕДЫ И','amounts':[19636661,14676923]},
])
nextLine='БЕЗОПАСНОСТИ (825)'
line=self.lr2.read(rows,line,nextLine)
self.assertEqual(line,nextLine)
self.assertEqual(rows,[None,
{'number':'24.','name':'КОМИТЕТ ПО ПРИРОДОПОЛЬЗОВАНИЮ, ОХРАНЕ ОКРУЖАЮЩЕЙ СРЕДЫ И ОБЕСПЕЧЕНИЮ ЭКОЛОГИЧЕСКОЙ','amounts':[19636661,14676923]},
])
def testYArkhivnyi(self):
rows=[None]
line='2. АРХИВНЫ Й КОМИТЕТ САНКТ-ПЕТЕРБУРГА 335 899.9'
nextLine='(803)'
line=self.lr1.read(rows,line,nextLine)
self.assertEqual(line,nextLine)
self.assertEqual(rows,[None,
{'number':'2.','name':'АРХИВНЫЙ КОМИТЕТ САНКТ-ПЕТЕРБУРГА','amounts':[3358999]},
])
def testYStroitelnyNadzor(self):
self.doTestName(self.lr1,[
'43. СЛУЖБА ГОСУДАРСТВЕННОГО 160 509.6',
'СТРОИТЕЛЬНОГО НАДЗОРА И ',
'ЭКСПЕРТИЗЫ САНКТ-ПЕТЕРБУРГА (839)',
'43.1. Содержание исполнительного органа 01 13 0010009 129 469.9',
],'СЛУЖБА ГОСУДАРСТВЕННОГО СТРОИТЕЛЬНОГО НАДЗОРА И ЭКСПЕРТИЗЫ САНКТ-ПЕТЕРБУРГА (839)')
def testZnaniye(self):
self.doTestName(self.lr1,[
'19.39. Субсидия Межрегиональной общественной 0801 4400210 200.0',
'организации " Общество " Знание" Санкт-',
'Петербурга и Ленинградской области" на ',
'проведение культурно-образовательных и ',
],'Субсидия Межрегиональной общественной организации "Общество "Знание" Санкт-Петербурга и Ленинградской области" на')
def testFund(self):
self.doTestName(self.lr1,[
'32.13. Субсидия ОАО "Фонд имущества Санкт- 0114 0921401 10 000,0',
'Петербурга" на возмещ-е затрат,связан.с осущ-ем ',
'функций по сопров-ю сделок по приобрет-ю ',
],'Субсидия ОАО "Фонд имущества Санкт-Петербурга" на возмещ-е затрат,связан.с осущ-ем')
def testLeadingDash(self):
self.doTestName(self.lr2,[
'19. КОМИТЕТ ФИНАНСОВ САНКТ +863 051.1 -2 210 922.6',
'-ПЕТЕРБУРГА (833)',
'19.1. Расходы на оплату услуг 01 13 0301020 +0.0 +0.0',
],'КОМИТЕТ ФИНАНСОВ САНКТ-ПЕТЕРБУРГА (833)')
def testIspolnenieEntry(self):
lr=record.RecordBuilder(3,2)
rows=[None]
line='1. АДМИНИСТРАЦИЯ ГУБЕРНАТОРА САНКТ- 2 112 960.4 2 143 756.6 2 062 085.8 97.59 96.19'
nextLine='ПЕТЕРБУРГА'
line=lr.read(rows,line,nextLine)
self.assertEqual(rows,[None,
{'number':'1.','name':'АДМИНИСТРАЦИЯ ГУБЕРНАТОРА САНКТ-','amounts':[21129604,21437566,20620858]},
])
def testIspolnenieTotal(self):
lr=record.RecordBuilder(3,2)
rows=[{}]
line=' Итого: 431 939 763.4 442 067 920.3 404 032 373.1 93.54 91.40'
nextLine=' '
line=lr.read(rows,line,nextLine)
self.assertEqual(rows,[
{'name':'Итого','amounts':[4319397634,4420679203,4040323731]},
])
def testIspolneniePageBreak(self):
lr=record.RecordBuilder(3,2)
rows=[None]
line='7. ЖИЛИЩНЫЙ КОМИТЕТ 15 624 229.3 15 794 610.8 14 855 482.4 95.08 94.05'
nextLine='Показатели расходов бюджета Санкт-Петербурга за 2011 год'
line=lr.read(rows,line,nextLine)
self.assertEqual(rows,[
None,
{'number':'7.','name':'ЖИЛИЩНЫЙ КОМИТЕТ','amounts':[156242293,157946108,148554824]},
])
nextLine='по ведомственной структуре расходов бюджета Санкт-Петербурга'
line=lr.read(rows,line,nextLine)
self.assertEqual(rows,[
None,
{'number':'7.','name':'ЖИЛИЩНЫЙ КОМИТЕТ','amounts':[156242293,157946108,148554824]},
None,
])
def testUnmarkedTotal(self):
lr=record.RecordBuilder(1,quirks={'unmarkedTotal'})
rows=[{}]
line='323 653 884.8'
nextLine='226 Приложение 3'
line=lr.read(rows,line,nextLine)
self.assertEqual(rows,[
{'name':'Итого','amounts':[3236538848]},
])
def testUnmarkedTotalWithUndottedNumbersEnabled(self):
lr=record.RecordBuilder(1,quirks={'unmarkedTotal','undottedNumbers'})
rows=[{}]
line='323 653 884.8'
nextLine='226 Приложение 3'
line=lr.read(rows,line,nextLine)
self.assertEqual(rows,[
{'name':'Итого','amounts':[3236538848]},
])
def testUndottedNumber(self):
lr=record.RecordBuilder(1,quirks={'undottedNumbers'})
rows=[None]
line='1 АДМИНИСТРАЦИЯ ГУБЕРНАТОРА САНКТ- 1 501 819.1'
nextLine='ПЕТЕРБУРГА (801)'
line=lr.read(rows,line,nextLine)
self.assertEqual(rows,[None,
{'number':'1.','name':'АДМИНИСТРАЦИЯ ГУБЕРНАТОРА САНКТ-','amounts':[15018191]},
])
def testUndottedNumberPartOnNextLine(self):
lr=record.RecordBuilder(2,quirks={'undottedNumbers'})
rows=[None]
line='18.43. Мероприятия в области здравоохранения, 0904 5220086 455 1 494 500.0 1 499 700.0'
nextLine='1 спорта и физической культуры, туризма'
line=lr.read(rows,line,nextLine)
self.assertEqual(line,'спорта и физической культуры, туризма')
self.assertEqual(rows,[None,
{'number':'18.43.1.','name':'Мероприятия в области здравоохранения,','section':'0904','article':'5220086','type':'455','amounts':[14945000,14997000]},
])
def testUndottedSplitNumber(self):
lr=record.RecordBuilder(2,quirks={'undottedNumbers'})
rows=[None]
line='17.4.1 Осуществление расходов Российской 0401 5190010 282 173 584.8 172 367.1'
nextLine='1 Федерации по управлению в области занятости'
line=lr.read(rows,line,nextLine)
self.assertEqual(line,'Федерации по управлению в области занятости')
self.assertEqual(rows,[None,
{'number':'17.4.11.','name':'Осуществление расходов Российской','section':'0401','article':'5190010','type':'282','amounts':[1735848,1723671]},
])
def testUndottedNonsplitNumberFollowedByDigits(self):
lr=record.RecordBuilder(1,quirks={'undottedNumbers'})
rows=[None]
line='45.2 Расходы на реализацию Федерального Закона от 0105 5190009 410.3'
nextLine='20.08.2004 № 113-ФЗ "О присяжных заседателях'
line=lr.read(rows,line,nextLine)
self.assertEqual(rows,[None,
{'number':'45.2.','name':'Расходы на реализацию Федерального Закона от','section':'0105','article':'5190009','amounts':[4103]},
])
def testUndottedNonsplitNumberFollowedByPageBreak(self):
lr=record.RecordBuilder(1,quirks={'undottedNumbers'})
rows=[None]
line='9.9.1 Мероприятия в области жилищного хозяйства 0501 3500001 410 204 843.9'
nextLine='10 Приложение 3'
line=lr.read(rows,line,nextLine)
self.assertEqual(rows,[None,
{'number':'9.9.1.','name':'Мероприятия в области жилищного хозяйства','section':'0501','article':'3500001','type':'410','amounts':[2048439]},
])
def testTwoCodesQuadrupleDepth(self):
lr=record.RecordBuilder(3,2,quirks={'OSGUcode','depth4'})
rows=[None]
line='1.1.1. Оплата труда и начисления на выплаты по 0102 0010008 012 210 1 721.0 1 721.0 1 459.1 84.78 84.78'
nextLine='оплате труда'
line=lr.read(rows,line,nextLine)
self.assertEqual(rows,[None,
{'number':'1.1.1.','name':'Оплата труда и начисления на выплаты по','section':'0102','article':'0010008','type':'012','OSGU':'210','amounts':[17210,17210,14591]},
])
nextLine='1.1.1.1. Заработная плата 0102 0010008 012 211 1 434.2 1 434.2 1 434.2 100.00 100.00'
line=lr.read(rows,line,nextLine)
self.assertEqual(rows,[None,
{'number':'1.1.1.','name':'Оплата труда и начисления на выплаты по оплате труда','section':'0102','article':'0010008','type':'012','OSGU':'210','amounts':[17210,17210,14591]},
])
nextLine='1.1.1.2. Начисления на выплаты по оплате труда 0102 0010008 012 213 286.8 286.8 24.9 8.68 8.68'
line=lr.read(rows,line,nextLine)
self.assertEqual(rows,[None,
{'number':'1.1.1.','name':'Оплата труда и начисления на выплаты по оплате труда','section':'0102','article':'0010008','type':'012','OSGU':'210','amounts':[17210,17210,14591]},
{'number':'1.1.1.1.','name':'Заработная плата','section':'0102','article':'0010008','type':'012','OSGU':'211','amounts':[14342,14342,14342]},
])
def testQuadDepthWithDotOnNextLine(self):
lr=record.RecordBuilder(3,2,quirks={'OSGUcode','depth4'})
rows=[None]
line='10.11.1.1 Безвозмездные перечисления 0409 3510444 006 241 1 112 801.0 1 112 801.0 1 112 801.0 100.00 100.00'
nextLine='. государственным и муниципальным '
line=lr.read(rows,line,nextLine)
self.assertEqual(rows,[None,
{'number':'10.11.1.1.','name':'Безвозмездные перечисления','section':'0409','article':'3510444','type':'006','OSGU':'241','amounts':[11128010,11128010,11128010]},
])
def testQuadDepthWithOnlyDotOnNextLine(self):
lr=record.RecordBuilder(3,2,quirks={'econcode','depth4'})
rows=[None]
line='10.10.1.1 Работы, услуги по содержанию имущества 0409 3510205 012 225 5 302 000.0 5 524 316.2 5 404 167.3 101.93 97.83'
nextLine='.'
line=lr.read(rows,line,nextLine)
self.assertEqual(rows,[None,
{'number':'10.10.1.1.','name':'Работы, услуги по содержанию имущества','section':'0409','article':'3510205','type':'012','econ':'225','amounts':[53020000,55243162,54041673]},
])
def testCantTellIfNumberOnNextLine(self):
lr=record.RecordBuilder(1,quirks={'undottedNumbers','econcode','depth4'})
nsc=number.NumberSequenceChecker(3,4)
rows=[None,
{'number':'49.38.1.1.','name':'Услуги по содержанию имущества','section':'0501','article':'3500682','type':'410','econ':'225','amounts':[755550]},
]
line='49.39 Расходы на благоустройство кварталов 16, 16а, 0501 3500683 2 500.0'
nextLine='55 Гражданки Калининского района'
line=lr.read(rows,line,nextLine,nsc)
self.assertEqual(rows,[None,
{'number':'49.38.1.1.','name':'Услуги по содержанию имущества','section':'0501','article':'3500682','type':'410','econ':'225','amounts':[755550]},
{'number':'49.39.','name':'Расходы на благоустройство кварталов 16, 16а,','section':'0501','article':'3500683','amounts':[25000]},
])
def testNegativeExecution(self):
lr=record.RecordBuilder(3,2,quirks={'econcode','depth4'})
rows=[None]
line='12.7.3. Прочие расходы 0115 0010009 005 290 0.0 2.0 -0.4 0.00 -20.00'
nextLine='12.7.4. Увеличение стоимости основных средств 0115 0010009 005 310 300.0 320.0 319.4 106.47 99.81 '
line=lr.read(rows,line,nextLine)
self.assertEqual(rows,[None,
{'number':'12.7.3.','name':'Прочие расходы','section':'0115','article':'0010009','type':'005','econ':'290','amounts':[0,20,-4]},
])
def testSplitSection(self):
lr=record.RecordBuilder(1,quirks={'splitSection'})
rows=[None]
line='1.1. Расходы на содержание главы Правительства 01 02 0010008 2 862.8'
nextLine='Санкт-Петербурга'
line=lr.read(rows,line,nextLine)
self.assertEqual(rows,[None,
{'number':'1.1.','name':'Расходы на содержание главы Правительства','section':'0102','article':'0010008','amounts':[28628]},
])
def testLetterArticle(self):
lr=record.RecordBuilder(1,quirks={'splitSection'})
rows=[None]
line='29.7. Расходы на реализацию мероприятий 04 09 24Б2060 500 000.0'
nextLine='подпрограммы " Автомобильные дороги" '
line=lr.read(rows,line,nextLine)
self.assertEqual(rows,[None,
{'number':'29.7.','name':'Расходы на реализацию мероприятий','section':'0409','article':'24Б2060','amounts':[5000000]},
])
def testLetterArticle2(self):
lr=record.RecordBuilder(1,quirks={'splitSection'})
rows=[None]
line='16.43. Расходы на осуществление полномочий в сфере 09 09 33159Б0 4 168.1'
nextLine='охраны здоровья граждан за счет единой '
line=lr.read(rows,line,nextLine)
self.assertEqual(rows,[None,
{'number':'16.43.','name':'Расходы на осуществление полномочий в сфере','section':'0909','article':'33159Б0','amounts':[41681]},
])
def testPageBreak(self):
lr=record.RecordBuilder(2,quirks={'splitSection'})
rows=[None]
line='1.3.3. Иные бюджетные ассигнования 01 13 1519612 800 3 672.6 931.6'
nextLine='2Приложение 4'
line=lr.read(rows,line,nextLine)
self.assertEqual(rows,[
None,
{'number':'1.3.3.','name':'Иные бюджетные ассигнования','section':'0113','article':'1519612','type':'800','amounts':[36726,9316]},
])
nextLine='к Закону Санкт-Петербурга "О бюджете'
line=lr.read(rows,line,nextLine)
self.assertEqual(rows,[
None,
{'number':'1.3.3.','name':'Иные бюджетные ассигнования','section':'0113','article':'1519612','type':'800','amounts':[36726,9316]},
None,
])
def testPageBreakBroken(self):
lr=record.RecordBuilder(2,quirks={'splitSection','brokenNewLine'})
rows=[None]
line='4.1. Содержание исполнительного 01 13 9910009 238 419.3 252 577.5'
nextLine='органа государственной власти '
line=lr.read(rows,line,nextLine)
nextLine='Санкт-Петербурга'
line=lr.read(rows,line,nextLine)
nextLine='10'
line=lr.read(rows,line,nextLine)
nextLine='Приложение 4'
line=lr.read(rows,line,nextLine)
self.assertEqual(rows,[
None,
{'number':'4.1.','name':'Содержание исполнительного органа государственной власти Санкт-Петербурга','section':'0113','article':'9910009','amounts':[2384193,2525775]},
None,
])
# quirk accounted for in spreadsheet.py
# def testInternationalCommittee(self):
# lr=record.RecordBuilder(1,quirks={'splitSection','missingCommitteeCode875'})
# self.doTestName(lr,[
# '21. КОМИТЕТ ПО МЕЖНАЦИОНАЛЬНЫМ 107 075.2',
# 'ОТНОШЕНИЯМ И РЕАЛИЗАЦИИ ',
# 'МИГРАЦИОННОЙ ПОЛИТИКИ В САНКТ-',
# 'ПЕТЕРБУРГЕ',
# '21.1. Содержание исполнительного органа 01 13 0010009 29 579.1',
# ],'КОМИТЕТ ПО МЕЖНАЦИОНАЛЬНЫМ ОТНОШЕНИЯМ И РЕАЛИЗАЦИИ МИГРАЦИОННОЙ ПОЛИТИКИ В САНКТ-ПЕТЕРБУРГЕ (875)')
if __name__=='__main__':
unittest.main()
|
bsd-2-clause
|
hahaps/openstack-project-generator
|
template/<project_name>/tests/unit/api/middleware/test_faults.py
|
1
|
11029
|
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from xml.dom import minidom
import mock
from oslo_i18n import fixture as i18n_fixture
from oslo_serialization import jsonutils
import webob.dec
from <project_name>.api import common
from <project_name>.api.openstack import wsgi
from <project_name>.i18n import _
from <project_name> import test
class TestFaults(test.TestCase):
"""Tests covering `<project_name>.api.openstack.faults:Fault` class."""
def setUp(self):
super(TestFaults, self).setUp()
self.useFixture(i18n_fixture.ToggleLazy(True))
def _prepare_xml(self, xml_string):
"""Remove characters from string which hinder XML equality testing."""
xml_string = xml_string.replace(" ", "")
xml_string = xml_string.replace("\n", "")
xml_string = xml_string.replace("\t", "")
return xml_string
def test_400_fault_json(self):
"""Test fault serialized to JSON via file-extension and/or header."""
requests = [
webob.Request.blank('/.json'),
webob.Request.blank('/', headers={"Accept": "application/json"}),
]
for request in requests:
fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram'))
response = request.get_response(fault)
expected = {
"badRequest": {
"message": "scram",
"code": 400,
},
}
actual = jsonutils.loads(response.body)
self.assertEqual("application/json", response.content_type)
self.assertEqual(expected, actual)
def test_413_fault_json(self):
"""Test fault serialized to JSON via file-extension and/or header."""
requests = [
webob.Request.blank('/.json'),
webob.Request.blank('/', headers={"Accept": "application/json"}),
]
for request in requests:
exc = webob.exc.HTTPRequestEntityTooLarge
fault = wsgi.Fault(exc(explanation='sorry',
headers={'Retry-After': 4}))
response = request.get_response(fault)
expected = {
"overLimit": {
"message": "sorry",
"code": 413,
"retryAfter": 4,
},
}
actual = jsonutils.loads(response.body)
self.assertEqual("application/json", response.content_type)
self.assertEqual(expected, actual)
def test_raise(self):
"""Ensure the ability to raise :class:`Fault` in WSGI-ified methods."""
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPNotFound(explanation='whut?'))
req = webob.Request.blank('/.xml')
resp = req.get_response(raiser)
self.assertEqual("application/xml", resp.content_type)
self.assertEqual(404, resp.status_int)
self.assertIn('whut?', resp.body)
def test_raise_403(self):
"""Ensure the ability to raise :class:`Fault` in WSGI-ified methods."""
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPForbidden(explanation='whut?'))
req = webob.Request.blank('/.xml')
resp = req.get_response(raiser)
self.assertEqual("application/xml", resp.content_type)
self.assertEqual(403, resp.status_int)
self.assertNotIn('resizeNotAllowed', resp.body)
self.assertIn('forbidden', resp.body)
@mock.patch('<project_name>.api.openstack.wsgi.i18n.translate')
def test_raise_http_with_localized_explanation(self, mock_translate):
params = ('blah', )
expl = _("String with params: %s") % params
def _mock_translation(msg, locale):
return "Mensaje traducido"
mock_translate.side_effect = _mock_translation
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPNotFound(explanation=expl))
req = webob.Request.blank('/.xml')
resp = req.get_response(raiser)
self.assertEqual("application/xml", resp.content_type)
self.assertEqual(404, resp.status_int)
self.assertIn(("Mensaje traducido"), resp.body)
self.stubs.UnsetAll()
def test_fault_has_status_int(self):
"""Ensure the status_int is set correctly on faults."""
fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='what?'))
self.assertEqual(400, fault.status_int)
def test_xml_serializer(self):
"""Ensure that a v1 request responds with a v1 xmlns."""
request = webob.Request.blank('/v1',
headers={"Accept": "application/xml"})
fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram'))
response = request.get_response(fault)
self.assertIn(common.XML_NS_V1, response.body)
self.assertEqual("application/xml", response.content_type)
self.assertEqual(400, response.status_int)
class FaultsXMLSerializationTestV11(test.TestCase):
"""Tests covering `<project_name>.api.openstack.faults:Fault` class."""
def _prepare_xml(self, xml_string):
xml_string = xml_string.replace(" ", "")
xml_string = xml_string.replace("\n", "")
xml_string = xml_string.replace("\t", "")
return xml_string
def test_400_fault(self):
metadata = {'attributes': {"badRequest": 'code'}}
serializer = wsgi.XMLDictSerializer(metadata=metadata,
xmlns=common.XML_NS_V1)
fixture = {
"badRequest": {
"message": "scram",
"code": 400,
},
}
output = serializer.serialize(fixture)
actual = minidom.parseString(self._prepare_xml(output))
expected = minidom.parseString(self._prepare_xml("""
<badRequest code="400" xmlns="%s">
<message>scram</message>
</badRequest>
""") % common.XML_NS_V1)
self.assertEqual(expected.toxml(), actual.toxml())
def test_413_fault(self):
metadata = {'attributes': {"overLimit": 'code'}}
serializer = wsgi.XMLDictSerializer(metadata=metadata,
xmlns=common.XML_NS_V1)
fixture = {
"overLimit": {
"message": "sorry",
"code": 413,
"retryAfter": 4,
},
}
output = serializer.serialize(fixture)
actual = minidom.parseString(self._prepare_xml(output))
expected = minidom.parseString(self._prepare_xml("""
<overLimit code="413" xmlns="%s">
<message>sorry</message>
<retryAfter>4</retryAfter>
</overLimit>
""") % common.XML_NS_V1)
self.assertEqual(expected.toxml(), actual.toxml())
def test_404_fault(self):
metadata = {'attributes': {"itemNotFound": 'code'}}
serializer = wsgi.XMLDictSerializer(metadata=metadata,
xmlns=common.XML_NS_V1)
fixture = {
"itemNotFound": {
"message": "sorry",
"code": 404,
},
}
output = serializer.serialize(fixture)
actual = minidom.parseString(self._prepare_xml(output))
expected = minidom.parseString(self._prepare_xml("""
<itemNotFound code="404" xmlns="%s">
<message>sorry</message>
</itemNotFound>
""") % common.XML_NS_V1)
self.assertEqual(expected.toxml(), actual.toxml())
class FaultsXMLSerializationTestV2(test.TestCase):
"""Tests covering `<project_name>.api.openstack.faults:Fault` class."""
def _prepare_xml(self, xml_string):
xml_string = xml_string.replace(" ", "")
xml_string = xml_string.replace("\n", "")
xml_string = xml_string.replace("\t", "")
return xml_string
def test_400_fault(self):
metadata = {'attributes': {"badRequest": 'code'}}
serializer = wsgi.XMLDictSerializer(metadata=metadata,
xmlns=common.XML_NS_V1)
fixture = {
"badRequest": {
"message": "scram",
"code": 400,
},
}
output = serializer.serialize(fixture)
actual = minidom.parseString(self._prepare_xml(output))
expected = minidom.parseString(self._prepare_xml("""
<badRequest code="400" xmlns="%s">
<message>scram</message>
</badRequest>
""") % common.XML_NS_V1)
self.assertEqual(expected.toxml(), actual.toxml())
def test_413_fault(self):
metadata = {'attributes': {"overLimit": 'code'}}
serializer = wsgi.XMLDictSerializer(metadata=metadata,
xmlns=common.XML_NS_V1)
fixture = {
"overLimit": {
"message": "sorry",
"code": 413,
"retryAfter": 4,
},
}
output = serializer.serialize(fixture)
actual = minidom.parseString(self._prepare_xml(output))
expected = minidom.parseString(self._prepare_xml("""
<overLimit code="413" xmlns="%s">
<message>sorry</message>
<retryAfter>4</retryAfter>
</overLimit>
""") % common.XML_NS_V1)
self.assertEqual(expected.toxml(), actual.toxml())
def test_404_fault(self):
metadata = {'attributes': {"itemNotFound": 'code'}}
serializer = wsgi.XMLDictSerializer(metadata=metadata,
xmlns=common.XML_NS_V1)
fixture = {
"itemNotFound": {
"message": "sorry",
"code": 404,
},
}
output = serializer.serialize(fixture)
actual = minidom.parseString(self._prepare_xml(output))
expected = minidom.parseString(self._prepare_xml("""
<itemNotFound code="404" xmlns="%s">
<message>sorry</message>
</itemNotFound>
""") % common.XML_NS_V1)
self.assertEqual(expected.toxml(), actual.toxml())
|
apache-2.0
|
Ohjel/wood-process
|
base/hero.py
|
1
|
1239
|
import pygame
from entity import Entity
from animation import Animation
HERO_INERTIA = 0.95
HERO_ACCELERATION = 0.1
HERO_HITBOX = pygame.Rect(0, 50, 64, 64)
HERO_ANIMATION = (0, 0)
class Hero(Entity):
def __init__(self, nb, img):
#Call the parent constructor
Entity.__init__(self, HERO_HITBOX, True, (145, 75, 120))
#Attributes
self.id = nb
self.x = float(self.hitbox.left)
self.y = float(self.hitbox.top)
self.dx = 0.0
self.dy = 0.0
self.anim = Animation(img, HERO_ANIMATION)
def __del__(self):
pass
def update(self, events, pastDelay):
for i in range(0, pastDelay):
#Update speed
self.dx += events[self.id]['axisX']
self.dy += events[self.id]['axisY']
self.dx *= HERO_INERTIA
self.dy *= HERO_INERTIA
#Update coordinates
self.x += self.dx * HERO_ACCELERATION
self.y += self.dy * HERO_ACCELERATION
#Set the good coordinates
self.hitbox.left = int(self.x)
self.hitbox.top = int(self.y)
def draw(self, screen):
if self.visible:
self.anim.draw(screen, (self.hitbox.left, self.hitbox.top))
|
mit
|
martynovp/edx-platform
|
common/djangoapps/student/migrations/0002_text_to_varchar_and_indexes.py
|
188
|
9581
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'UserProfile.name'
db.alter_column('auth_userprofile', 'name', self.gf('django.db.models.fields.CharField')(max_length=255))
# Adding index on 'UserProfile', fields ['name']
db.create_index('auth_userprofile', ['name'])
# Changing field 'UserProfile.language'
db.alter_column('auth_userprofile', 'language', self.gf('django.db.models.fields.CharField')(max_length=255))
# Adding index on 'UserProfile', fields ['language']
db.create_index('auth_userprofile', ['language'])
# Changing field 'UserProfile.courseware'
db.alter_column('auth_userprofile', 'courseware', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'UserProfile.meta'
db.alter_column('auth_userprofile', 'meta', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'UserProfile.location'
db.alter_column('auth_userprofile', 'location', self.gf('django.db.models.fields.CharField')(max_length=255))
# Adding index on 'UserProfile', fields ['location']
db.create_index('auth_userprofile', ['location'])
def backwards(self, orm):
# Removing index on 'UserProfile', fields ['location']
db.delete_index('auth_userprofile', ['location'])
# Removing index on 'UserProfile', fields ['language']
db.delete_index('auth_userprofile', ['language'])
# Removing index on 'UserProfile', fields ['name']
db.delete_index('auth_userprofile', ['name'])
# Changing field 'UserProfile.name'
db.alter_column('auth_userprofile', 'name', self.gf('django.db.models.fields.TextField')())
# Changing field 'UserProfile.language'
db.alter_column('auth_userprofile', 'language', self.gf('django.db.models.fields.TextField')())
# Changing field 'UserProfile.courseware'
db.alter_column('auth_userprofile', 'courseware', self.gf('django.db.models.fields.TextField')())
# Changing field 'UserProfile.meta'
db.alter_column('auth_userprofile', 'meta', self.gf('django.db.models.fields.TextField')())
# Changing field 'UserProfile.location'
db.alter_column('auth_userprofile', 'location', self.gf('django.db.models.fields.TextField')())
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'meta': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['student']
|
agpl-3.0
|
chengduoZH/Paddle
|
python/paddle/fluid/tests/unittests/test_square_error_cost.py
|
2
|
2032
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import sys
import paddle.fluid.core as core
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid.executor import Executor
class TestSquareErrorCost(unittest.TestCase):
def test_square_error_cost(self):
input_val = np.random.uniform(0.1, 0.5, (2, 3)).astype("float32")
label_val = np.random.uniform(0.1, 0.5, (2, 3)).astype("float32")
sub = input_val - label_val
np_result = sub * sub
input_var = layers.create_tensor(dtype="float32", name="input")
label_var = layers.create_tensor(dtype="float32", name="label")
layers.assign(input=input_val, output=input_var)
layers.assign(input=label_val, output=label_var)
output = layers.square_error_cost(input=input_var, label=label_var)
for use_cuda in ([False, True]
if core.is_compiled_with_cuda() else [False]):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = Executor(place)
result = exe.run(fluid.default_main_program(),
feed={"input": input_var,
"label": label_var},
fetch_list=[output])
self.assertTrue(np.isclose(np_result, result).all())
if __name__ == "__main__":
unittest.main()
|
apache-2.0
|
adoosii/edx-platform
|
lms/envs/bok_choy.py
|
1
|
6517
|
"""
Settings for Bok Choy tests that are used when running LMS.
Bok Choy uses two different settings files:
1. test_static_optimized is used when invoking collectstatic
2. bok_choy is used when running the tests
Note: it isn't possible to have a single settings file, because Django doesn't
support both generating static assets to a directory and also serving static
from the same directory.
"""
import os
from path import Path as path
from tempfile import mkdtemp
# Pylint gets confused by path.py instances, which report themselves as class
# objects. As a result, pylint applies the wrong regex in validating names,
# and throws spurious errors. Therefore, we disable invalid-name checking.
# pylint: disable=invalid-name
CONFIG_ROOT = path(__file__).abspath().dirname()
TEST_ROOT = CONFIG_ROOT.dirname().dirname() / "test_root"
########################## Prod-like settings ###################################
# These should be as close as possible to the settings we use in production.
# As in prod, we read in environment and auth variables from JSON files.
# Unlike in prod, we use the JSON files stored in this repo.
# This is a convenience for ensuring (a) that we can consistently find the files
# and (b) that the files are the same in Jenkins as in local dev.
os.environ['SERVICE_VARIANT'] = 'bok_choy'
os.environ['CONFIG_ROOT'] = CONFIG_ROOT
from .aws import * # pylint: disable=wildcard-import, unused-wildcard-import
######################### Testing overrides ####################################
# Needed for the reset database management command
INSTALLED_APPS += ('django_extensions',)
# Redirect to the test_root folder within the repo
GITHUB_REPO_ROOT = (TEST_ROOT / "data").abspath()
LOG_DIR = (TEST_ROOT / "log").abspath()
# Configure modulestore to use the test folder within the repo
update_module_store_settings(
MODULESTORE,
module_store_options={
'fs_root': (TEST_ROOT / "data").abspath(),
},
xml_store_options={
'data_dir': (TEST_ROOT / "data").abspath(),
},
default_store=os.environ.get('DEFAULT_STORE', 'draft'),
)
############################ STATIC FILES #############################
# Enable debug so that static assets are served by Django
DEBUG = True
# Serve static files at /static directly from the staticfiles directory under test root
# Note: optimized files for testing are generated with settings from test_static_optimized
STATIC_URL = "/static/"
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
)
STATICFILES_DIRS = (
(TEST_ROOT / "staticfiles" / "lms").abspath(),
)
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
MEDIA_ROOT = TEST_ROOT / "uploads"
MEDIA_URL = "/static/uploads/"
# Don't use compression during tests
PIPELINE_JS_COMPRESSOR = None
################################# CELERY ######################################
CELERY_ALWAYS_EAGER = True
CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend'
###################### Grade Downloads ######################
GRADES_DOWNLOAD = {
'STORAGE_TYPE': 'localfs',
'BUCKET': 'edx-grades',
'ROOT_PATH': os.path.join(mkdtemp(), 'edx-s3', 'grades'),
}
# Configure the LMS to use our stub XQueue implementation
XQUEUE_INTERFACE['url'] = 'http://localhost:8040'
# Configure the LMS to use our stub ORA implementation
OPEN_ENDED_GRADING_INTERFACE['url'] = 'http://localhost:8041/'
# Configure the LMS to use our stub EdxNotes implementation
EDXNOTES_PUBLIC_API = 'http://localhost:8042/api/v1'
EDXNOTES_INTERNAL_API = 'http://localhost:8042/api/v1'
# Silence noisy logs
import logging
LOG_OVERRIDES = [
('track.middleware', logging.CRITICAL),
('edxmako.shortcuts', logging.ERROR),
('dd.dogapi', logging.ERROR),
('edx.discussion', logging.CRITICAL),
]
for log_name, log_level in LOG_OVERRIDES:
logging.getLogger(log_name).setLevel(log_level)
# Enable milestones app
FEATURES['MILESTONES_APP'] = True
# Enable pre-requisite course
FEATURES['ENABLE_PREREQUISITE_COURSES'] = True
# Enable Course Discovery
FEATURES['ENABLE_COURSE_DISCOVERY'] = True
# Enable student notes
FEATURES['ENABLE_EDXNOTES'] = True
# Enable teams feature
FEATURES['ENABLE_TEAMS'] = True
# Enable custom content licensing
FEATURES['LICENSING'] = True
########################### Entrance Exams #################################
FEATURES['MILESTONES_APP'] = True
FEATURES['ENTRANCE_EXAMS'] = True
FEATURES['ENABLE_PROCTORED_EXAMS'] = True
# Point the URL used to test YouTube availability to our stub YouTube server
YOUTUBE_PORT = 9080
YOUTUBE['API'] = "http://127.0.0.1:{0}/get_youtube_api/".format(YOUTUBE_PORT)
YOUTUBE['METADATA_URL'] = "http://127.0.0.1:{0}/test_youtube/".format(YOUTUBE_PORT)
YOUTUBE['TEXT_API']['url'] = "127.0.0.1:{0}/test_transcripts_youtube/".format(YOUTUBE_PORT)
############################# SECURITY SETTINGS ################################
# Default to advanced security in common.py, so tests can reset here to use
# a simpler security model
FEATURES['ENFORCE_PASSWORD_POLICY'] = False
FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS'] = False
FEATURES['SQUELCH_PII_IN_LOGS'] = False
FEATURES['PREVENT_CONCURRENT_LOGINS'] = False
FEATURES['ADVANCED_SECURITY'] = False
FEATURES['ENABLE_MOBILE_REST_API'] = True # Show video bumper in LMS
FEATURES['ENABLE_VIDEO_BUMPER'] = True # Show video bumper in LMS
FEATURES['SHOW_BUMPER_PERIODICITY'] = 1
PASSWORD_MIN_LENGTH = None
PASSWORD_COMPLEXITY = {}
# Enable courseware search for tests
FEATURES['ENABLE_COURSEWARE_SEARCH'] = True
# Enable dashboard search for tests
FEATURES['ENABLE_DASHBOARD_SEARCH'] = True
# Use MockSearchEngine as the search engine for test scenario
SEARCH_ENGINE = "search.tests.mock_search_engine.MockSearchEngine"
# Path at which to store the mock index
MOCK_SEARCH_BACKING_FILE = (
TEST_ROOT / "index_file.dat"
).abspath()
# Generate a random UUID so that different runs of acceptance tests don't break each other
import uuid
SECRET_KEY = uuid.uuid4().hex
# Set dummy values for profile image settings.
PROFILE_IMAGE_BACKEND = {
'class': 'storages.backends.overwrite.OverwriteStorage',
'options': {
'location': os.path.join(MEDIA_ROOT, 'profile-images/'),
'base_url': os.path.join(MEDIA_URL, 'profile-images/'),
},
}
#####################################################################
# Lastly, see if the developer has any local overrides.
try:
from .private import * # pylint: disable=import-error
except ImportError:
pass
|
agpl-3.0
|
pwaller/pgi
|
examples/cairo/shapes.py
|
2
|
1555
|
#!/usr/bin/python
'''
ZetCode PyCairo tutorial
This code example draws another
three shapes in PyCairo.
author: Jan Bodnar
website: zetcode.com
last edited: August 2012
'''
import sys
sys.path.insert(0, '../..')
import pgi
pgi.install_as_gi()
from gi.repository import Gtk
class cv(object):
points = (
( 0, 85 ),
( 75, 75 ),
( 100, 10 ),
( 125, 75 ),
( 200, 85 ),
( 150, 125 ),
( 160, 190 ),
( 100, 150 ),
( 40, 190 ),
( 50, 125 ),
( 0, 85 )
)
class Example(Gtk.Window):
def __init__(self):
super(Example, self).__init__()
self.init_ui()
def init_ui(self):
darea = Gtk.DrawingArea()
darea.connect("draw", self.on_draw)
self.add(darea)
self.set_title("Complex shapes")
self.resize(460, 240)
self.set_position(Gtk.WindowPosition.CENTER)
self.connect("delete-event", Gtk.main_quit)
self.show_all()
def on_draw(self, wid, cr):
cr.set_source_rgb(0.6, 0.6, 0.6)
cr.set_line_width(1)
for i in range(10):
cr.line_to(cv.points[i][0], cv.points[i][1])
cr.fill()
cr.move_to(240, 40)
cr.line_to(240, 160)
cr.line_to(350, 160)
cr.fill()
cr.move_to(380, 40)
cr.line_to(380, 160)
cr.line_to(450, 160)
cr.curve_to(440, 155, 380, 145, 380, 40)
cr.fill()
def main():
app = Example()
Gtk.main()
if __name__ == "__main__":
main()
|
lgpl-2.1
|
euphorie/Euphorie
|
src/euphorie/deployment/commands/xmlimport.py
|
1
|
7808
|
from AccessControl.SecurityManagement import getSecurityManager
from AccessControl.SecurityManagement import newSecurityManager
from AccessControl.SecurityManagement import setSecurityManager
from euphorie.client import publish
from euphorie.content.upload import SurveyImporter
from euphorie.content.user import UserProvider
from optparse import OptionParser
from plone.namedfile.file import NamedBlobImage
from Products.CMFPlone.utils import safe_unicode
from Testing.makerequest import makerequest
from zope.site import hooks
import logging
import lxml.etree
import lxml.objectify
import os.path
import six
import sys
import transaction
import zExceptions
log = logging.getLogger(__name__)
class Abort(RuntimeError):
def __init__(self, message, exitcode=1):
self.message = message
self.exitcode = exitcode
def GetCountry(plone, options):
sectors = plone.sectors
if not hasattr(sectors, options.country):
log.info("Creating missing country %s", options.country)
sectors.invokeFactory(
"euphorie.country", options.country, title=options.country
)
return getattr(sectors, options.country)
def GetSector(country, xml_sector, options):
if options.sector is None:
if not hasattr(xml_sector, "account"):
return None
login = xml_sector.account.get("login").lower()
password = xml_sector.account.get("password")
else:
password = None
login = options.sector.lower()
if options.login is not None:
login = options.login
if password is None:
password = login
sector = getattr(country, login, None)
if sector is not None:
return sector
log.info("Creating new sector '%s' with password '%s'", login, password)
id = country.invokeFactory(
"euphorie.sector", login, title=options.sector or xml_sector.title.text.strip()
)
sector = getattr(country, id)
sector.login = login
sector.password = password
if hasattr(xml_sector, "contact"):
xml_contact = xml_sector.contact
if hasattr(xml_contact, "name"):
sector.contact_name = six.text_type(xml_contact.name.text)
if hasattr(xml_contact, "email"):
sector.contact_email = six.text_type(xml_contact.email.text)
if options.logo is not None:
sector.logo = NamedBlobImage(
data=open(options.logo, "r").read(),
filename=safe_unicode(os.path.basename(options.logo)),
)
if options.main_colour:
sector.main_colour = options.main_colour
if options.support_colour:
sector.support_colour = options.support_colour
return sector
def ImportSector(plone, options, filename):
input = open(filename, "r")
dom = lxml.objectify.parse(input)
xml_sector = dom.getroot()
country = GetCountry(plone, options)
if not hasattr(xml_sector, "survey"):
return
sector = GetSector(country, xml_sector, options)
if sector is None:
raise Abort("No sector specified and no account information found.")
# Login as the sector
sup = UserProvider(sector)
sectoruser = plone.acl_users.getUserById(sup.getUserId())
sm = getSecurityManager()
try:
newSecurityManager(None, sectoruser)
name = options.name or six.text_type(xml_sector.survey.title.text)
if hasattr(sector, name):
raise Abort("There is already a survey named '%s'" % name)
log.info(u"Importing survey '%s' with version '%s'", name, options.version)
importer = SurveyImporter(sector)
survey = importer(xml_sector, name, options.version)
if options.publish:
log.info("Publishing survey")
publisher = publish.PublishSurvey(survey, None)
publisher.publish()
finally:
setSecurityManager(sm)
def main(app, args):
parser = OptionParser(usage="Usage: bin/instance xmlimport [options] <XML-files>")
parser.add_option(
"-p",
"--publish",
help="Publish the imported sector.",
action="store_true",
dest="publish",
default=False,
)
parser.add_option(
"-S",
"--site",
help="id of the Plone site. Defaults to Plone",
action="store",
type="string",
dest="site",
default="Plone",
)
parser.add_option(
"-L",
"--logo",
help="Filename for the sector logo.",
action="store",
type="string",
dest="logo",
)
parser.add_option(
"--main-colour",
help="Main colour used for client pages.",
action="store",
type="string",
dest="main_colour",
)
parser.add_option(
"--support-colour",
help="Support colour used for client pages.",
action="store",
type="string",
dest="support_colour",
)
parser.add_option(
"-c",
"--country",
help="The country where the branch/model should be created. " "Defaults to nl.",
action="store",
type="string",
dest="country",
default="nl",
)
parser.add_option(
"-s",
"--sector",
help="The name of the sector where the survey should be created.",
action="store",
type="string",
dest="sector",
)
parser.add_option(
"-l",
"--login",
help="Login name for the sector. Also used as sector id.",
action="store",
type="string",
dest="login",
)
parser.add_option(
"-n",
"--name",
help="Override name for the imported survey.",
action="store",
type="string",
dest="name",
)
parser.add_option(
"-v",
"--version-name",
help="Name of the new survey version. Defaults to 'default'.",
action="store",
type="string",
dest="version",
default="default",
)
(options, args) = parser.parse_args(args)
if not args:
raise Abort("Please specify a (single) XML file to import.")
# The magic Zope2 setup dance
zope2 = makerequest(app)
hooks.setHooks()
plone = getattr(zope2, options.site)
hooks.setSite(plone)
# Login as admin
admin = zope2.acl_users.getUserById("admin")
newSecurityManager(None, admin)
for arg in args:
transaction.begin()
try:
log.info("Importing %s", arg)
ImportSector(plone, options, arg)
trans = transaction.get()
trans.setUser("-commandline-")
trans.note("Import of %s" % arg)
trans.commit()
except lxml.etree.XMLSyntaxError as e:
transaction.abort()
log.error(e.message)
log.error("Invalid input file")
except RuntimeError as e:
transaction.abort()
log.error(e.message)
except zExceptions.Unauthorized as e:
transaction.abort()
log.error(e.message)
log.error("This is mostly likely due to too deep nesting " "in the survey.")
except zExceptions.BadRequest as e:
transaction.abort()
log.error(e.message)
log.error("This is mostly likely due to illegal input data.")
except Exception:
transaction.abort()
raise
if __name__ == "__main__":
# We can not use logging.basicConfig since Zope2 has already configured
# things.
rootlog = logging.getLogger()
rootlog.setLevel(logging.INFO)
formatter = logging.Formatter("[%(levelname)s] %(message)s")
for handler in rootlog.handlers:
handler.setFormatter(formatter)
handler.setLevel(logging.INFO)
main(app, sys.argv[1:]) # noqa: F821
|
gpl-2.0
|
Radagast-red/golem
|
tests/golem/task/dummy/runner.py
|
2
|
8435
|
"""Test script for running a single instance of a dummy task.
The task simply computes hashes of some random data and requires
no external tools. The amount of data processed (ie hashed) and computational
difficulty is configurable, see comments in DummyTaskParameters.
"""
import atexit
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
from os import path
from threading import Thread
from twisted.internet import reactor
from golem.environments.environment import Environment
from golem.resource.dirmanager import DirManager
from golem.network.transport.tcpnetwork import SocketAddress
from task import DummyTask, DummyTaskParameters
REQUESTING_NODE_KIND = "requestor"
COMPUTING_NODE_KIND = "computer"
def format_msg(kind, pid, msg):
return "[{} {:>5}] {}".format(kind, pid, msg)
node_kind = ""
def report(msg):
print format_msg(node_kind, os.getpid(), msg)
def override_ip_info(*_, **__):
from stun import OpenInternet
return OpenInternet, '1.2.3.4', 40102
def create_client(datadir):
# executed in a subprocess
import stun
stun.get_ip_info = override_ip_info
from golem.client import Client
return Client(datadir=datadir,
use_monitor=False,
transaction_system=False,
connect_to_known_hosts=False,
use_docker_machine_manager=False,
estimated_lux_performance=1000.0,
estimated_blender_performance=1000.0)
def run_requesting_node(datadir, num_subtasks=3):
client = None
def shutdown():
client and client.quit()
logging.shutdown()
atexit.register(shutdown)
global node_kind
node_kind = "REQUESTOR"
start_time = time.time()
report("Starting in {}".format(datadir))
client = create_client(datadir)
client.start()
report("Started in {:.1f} s".format(time.time() - start_time))
params = DummyTaskParameters(1024, 2048, 256, 0x0001ffff)
task = DummyTask(client.get_node_name(), params, num_subtasks)
task.initialize(DirManager(datadir))
client.enqueue_new_task(task)
port = client.p2pservice.cur_port
requestor_addr = "{}:{}".format(client.node.prv_addr, port)
report("Listening on {}".format(requestor_addr))
def report_status():
while True:
time.sleep(1)
if task.finished_computation():
report("Task finished")
shutdown()
return
reactor.callInThread(report_status)
reactor.run()
return client # Used in tests, with mocked reactor
def run_computing_node(datadir, peer_address, fail_after=None):
client = None
def shutdown():
client and client.quit()
logging.shutdown()
atexit.register(shutdown)
global node_kind
node_kind = "COMPUTER "
start_time = time.time()
report("Starting in {}".format(datadir))
client = create_client(datadir)
client.start()
client.task_server.task_computer.support_direct_computation = True
report("Started in {:.1f} s".format(time.time() - start_time))
class DummyEnvironment(Environment):
@classmethod
def get_id(cls):
return DummyTask.ENVIRONMENT_NAME
def __init__(self):
super(DummyEnvironment, self).__init__()
self.allow_custom_main_program_file = True
dummy_env = DummyEnvironment()
dummy_env.accept_tasks = True
client.environments_manager.add_environment(dummy_env)
report("Connecting to requesting node at {}:{} ..."
.format(peer_address.address, peer_address.port))
client.connect(peer_address)
def report_status(fail_after=None):
t0 = time.time()
while True:
if fail_after and time.time() - t0 > fail_after:
report("Failure!")
reactor.callFromThread(reactor.stop)
shutdown()
return
time.sleep(1)
reactor.callInThread(report_status, fail_after)
reactor.run()
return client # Used in tests, with mocked reactor
# Global var set by a thread monitoring the status of the requestor node
task_finished = False
def run_simulation(num_computing_nodes=2, num_subtasks=3, timeout=120,
node_failure_times=None):
# We need to pass the PYTHONPATH to the child processes
pythonpath = "".join(dir + os.pathsep for dir in sys.path)
env = os.environ.copy()
env["PYTHONPATH"] = pythonpath
datadir = tempfile.mkdtemp(prefix='golem_dummy_simulation_')
start_time = time.time()
# Start the requesting node in a separate process
reqdir = path.join(datadir, REQUESTING_NODE_KIND)
requesting_proc = subprocess.Popen(
["python", "-u", __file__, REQUESTING_NODE_KIND, reqdir, str(num_subtasks)],
bufsize=1, # line buffered
env=env,
stdout=subprocess.PIPE)
# Scan the requesting node's stdout for the address
address_re = re.compile(".+REQUESTOR.+Listening on (.+)")
while True:
line = requesting_proc.stdout.readline().strip()
if line:
print line
m = address_re.match(line)
if m:
requestor_address = m.group(1)
break
# Start computing nodes in a separate processes
computing_procs = []
for n in range(0, num_computing_nodes):
compdir = path.join(datadir, COMPUTING_NODE_KIND + str(n))
cmdline = [
"python", "-u", __file__, COMPUTING_NODE_KIND, compdir, requestor_address]
if node_failure_times and len(node_failure_times) > n:
# Simulate failure of a computing node
cmdline.append(str(node_failure_times[n]))
proc = subprocess.Popen(
cmdline,
bufsize=1,
env=env,
stdout=subprocess.PIPE)
computing_procs.append(proc)
all_procs = computing_procs + [requesting_proc]
task_finished_status = format_msg(
"REQUESTOR", requesting_proc.pid, "Task finished")
global task_finished
task_finished = False
def monitor_subprocess(proc):
global task_finished
while proc.returncode is None:
line = proc.stdout.readline().strip()
if line:
print line
if line == task_finished_status:
task_finished = True
monitor_threads = [Thread(target=monitor_subprocess,
name="monitor {}".format(p.pid),
args=(p,))
for p in all_procs]
for th in monitor_threads:
th.setDaemon(True)
th.start()
# Wait until timeout elapses or the task is computed
try:
while not task_finished:
if time.time() - start_time > timeout:
return "Computation timed out"
# Check if all subprocesses are alive
for proc in all_procs:
if proc.poll() is not None:
return "Node exited with return code {}".format(
proc.returncode)
time.sleep(1)
return None
finally:
print "Stopping nodes..."
for proc in all_procs:
if proc.poll() is None:
proc.kill()
proc.wait()
del proc
time.sleep(1)
shutil.rmtree(datadir)
def dispatch(args):
if len(args) == 4 and args[1] == REQUESTING_NODE_KIND:
# I'm a requesting node,
# second arg is the data dir,
# third arg is the number of subtasks.
run_requesting_node(args[2], int(args[3]))
elif len(args) in [4, 5] and args[1] == COMPUTING_NODE_KIND:
# I'm a computing node,
# second arg is the data dir,
# third arg is the address to connect to,
# forth arg is the timeout (optional).
fail_after = float(args[4]) if len(args) == 5 else None
run_computing_node(args[2], SocketAddress.parse(args[3]), fail_after=fail_after)
elif len(args) == 1:
# I'm the main script, run simulation
error_msg = run_simulation(num_computing_nodes=2, num_subtasks=4,
timeout=120)
if error_msg:
print "Dummy task computation failed:", error_msg
sys.exit(1)
if __name__ == "__main__":
dispatch(sys.argv)
|
gpl-3.0
|
surgebiswas/poker
|
PokerBots_2017/Johnny/theano/sandbox/cuda/tests/test_memory.py
|
6
|
7415
|
from __future__ import print_function
import copy
import gc
import numpy as np
import theano
from theano import tensor
from theano.sandbox import cuda
from theano import ifelse
# Skip test if cuda_ndarray is not available.
from nose.plugins.skip import SkipTest
if cuda.cuda_available == False:
raise SkipTest('Optional package cuda disabled')
if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
else:
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
# The GC need to be enabled for those tests to work correctly.
if not getattr(mode_with_gpu.linker, 'allow_gc', False):
mode_with_gpu.linker = copy.copy(mode_with_gpu.linker)
mode_with_gpu.linker.allow_gc = True
def freemem(extra_alloc=0):
"""
Return the free memory on the gpu in megabytes.
"""
gc.collect()
gc.collect()
gc.collect()
n_mallocs = cuda.cuda_ndarray.cuda_ndarray.outstanding_mallocs()
if hasattr(cuda.cuda_ndarray.cuda_ndarray, "theano_allocated"):
theano_alloc = cuda.cuda_ndarray.cuda_ndarray.theano_allocated()
return ("(n malloc/theano mem allocated in KB)",
n_mallocs + extra_alloc,
int(theano_alloc / 1024) + extra_size)
return ("n malloc on the gpu", n_mallocs + extra_alloc)
# I don't use the following by default as if there is other stuff running
# on the GPU, this won't work.
mem_info = cuda.cuda_ndarray.cuda_ndarray.mem_info()
gpu_used = (mem_info[1] - mem_info[0]) / 1024 ** 2
mem_info_msg = "(n malloc/gpu mem used in MB)"
return (mem_info_msg, n_mallocs, int(gpu_used))
def test_memory():
"""
We test that we do not keep link to memory between Theano function call
and during Theano compilation
The origin of this code come from Aaron Vandenoord and Sander Dieleman.
I have their autorisation to put this in Theano with the Theano license.
note::
This test can fail if there is other process running on the gpu.
"""
shapes = (200, 100)
# more_alloc1 was different for each dtype in the past.
# more_alloc2 is still currently not the same for both dtype.
# when dtype is float32, the computation is done on the gpu.
# This insert constant on the gpu during compilation
# that raise the number of alloc.
# When dtype is float64, only the shared is on the gpu and it is transferd
# to the cpu for computation. So no extra alloc after compilation.
# more_alloc1 if after the first compilation, more_alloc2 after the second.
for dtype, more_alloc1, more_alloc2 in [("float32", 0, 3),
("float64", 0, 0)]:
print(dtype)
test_params = np.asarray(np.random.randn(np.prod(shapes)), dtype)
some_vector = tensor.vector('some_vector', dtype=dtype)
some_matrix = some_vector.reshape(shapes)
mem1 = freemem()
print("Before shared variable", mem1)
variables = cuda.shared_constructor(np.ones((shapes[1],),
dtype='float32'))
derp = tensor.sum(tensor.dot(some_matrix[:shapes[0]], variables))
print("Shared took ", np.prod(variables.get_value(
borrow=True,
return_internal_type=True).shape) * 4 / 1024, "kB")
mem2 = freemem()
print("Before compilation", mem2)
mem2_1 = freemem(extra_alloc=more_alloc1)
mem2_2 = freemem(extra_alloc=more_alloc2)
obj = theano.function([some_vector], derp, mode=mode_with_gpu)
mem3 = freemem()
print("After function compilation 1", mem3)
assert mem2_1 == mem3, (mem2_1, mem3, dtype)
grad_derp = tensor.grad(derp, some_vector)
grad = theano.function([some_vector], grad_derp, mode=mode_with_gpu)
mem4 = freemem()
print("After function compilation 2", mem4)
assert mem2_2 == mem4, (mem2_2, mem4, dtype)
for i in range(3):
obj(test_params)
print("After function evaluation 1", freemem())
assert mem2_2 == freemem(), (mem2_2, freemem())
grad(test_params)
print("After function evaluation 2", freemem())
assert mem2_2 == freemem(), (mem2_2, freemem())
del obj
# print "After deleting function 1", freemem()
#assert mem2 == freemem(), (mem2, freemem())
del grad
print("After deleting function 2", freemem())
assert mem2 == freemem(), (mem2, freemem())
del derp, variables, grad_derp
print("After deleting shared variable and ref to it", freemem())
assert mem1 == freemem(), (mem1, freemem())
@theano.configparser.change_flags(**{'vm.lazy': True})
def test_memory_lazy():
"""As test_memory, but with the ifelse op.
We need to test it as the ifelse op with the [c]vm create op not
executed in the graph. This mess with [c]vm gc implementation.
"""
shapes = (50, 100)
# more_alloc1 is not the same for both dtype.
# when dtype is float32, the computation is done on the gpu.
# This insert constant on the gpu during compilation
# that raise the number of alloc.
# When dtype is float64, only the shared is on the gpu and it is transferd
# to the cpu for computation. So no extra alloc after compilation.
# more_alloc1 if after the first compilation
for dtype, more_alloc1 in [("float32", 1),
("float64", 0)]:
print(dtype)
test_params = np.asarray(np.random.randn(np.prod(shapes)), dtype)
some_vector = tensor.vector('some_vector', dtype=dtype)
some_matrix = some_vector.reshape(shapes)
branch_select = tensor.iscalar()
mem1 = freemem()
print("Before shared variable", mem1)
variables = cuda.shared_constructor(np.ones((shapes[1],),
dtype='float32'))
derp = tensor.sum(tensor.dot(some_matrix[:shapes[0]], variables))
derp = ifelse.IfElse(1)(branch_select,
derp, some_matrix[:shapes[0]].sum())
derp += 1
print("Shared took ", np.prod(variables.get_value(
borrow=True,
return_internal_type=True).shape) * 4 / 1024, "kB")
mem2 = freemem()
print("Before compilation", mem2)
mem2_1 = freemem(extra_alloc=more_alloc1)
obj = theano.function([some_vector, branch_select], derp,
mode=mode_with_gpu)
#theano.printing.debugprint(obj, print_type=True)
mem3 = freemem()
print("After function compilation 1", mem3)
assert mem2_1 == mem3, (mem2_1, mem3)
for i in range(3):
obj(test_params, 1)
print("After function evaluation branch true", freemem())
assert mem2_1 == freemem(), (mem2_1, freemem())
obj(test_params, 0)
print("After function evaluation branch false", freemem())
assert mem2_1 == freemem(), (mem2_1, freemem())
del obj
print("After deleting function 1", freemem())
assert mem2 == freemem(), (mem2, freemem())
del derp, variables
print("After deleting shared variable and ref to it", freemem())
assert mem1 == freemem(), (mem1, freemem())
|
mit
|
s40523117/2016fallcp_hw
|
plugin/liquid_tags/test_flickr.py
|
26
|
2545
|
from . import flickr
try:
from unittest.mock import patch
except ImportError:
from mock import patch
import os
import pytest
import re
PLUGIN_DIR = os.path.dirname(__file__)
TEST_DATA_DIR = os.path.join(PLUGIN_DIR, 'test_data')
@pytest.mark.parametrize('input,expected', [
('18873146680 large "test 1"',
dict(photo_id='18873146680',
size='large',
alt='test 1')),
('18873146680 large \'test 1\'',
dict(photo_id='18873146680',
size='large',
alt='test 1')),
('18873143536360 medium "test number two"',
dict(photo_id='18873143536360',
size='medium',
alt='test number two')),
('18873143536360 small "test number 3"',
dict(photo_id='18873143536360',
size='small',
alt='test number 3')),
('18873143536360 "test 4"',
dict(photo_id='18873143536360',
size=None,
alt='test 4')),
('18873143536360',
dict(photo_id='18873143536360',
size=None,
alt=None)),
('123456 small',
dict(photo_id='123456',
size='small',
alt=None))
])
def test_regex(input, expected):
assert re.match(flickr.PARSE_SYNTAX, input).groupdict() == expected
@pytest.mark.parametrize('input,expected', [
(['1', 'server1', '1', 'secret1', 'small'],
'https://farm1.staticflickr.com/server1/1_secret1_n.jpg'),
(['2', 'server2', '2', 'secret2', 'medium'],
'https://farm2.staticflickr.com/server2/2_secret2_c.jpg'),
(['3', 'server3', '3', 'secret3', 'large'],
'https://farm3.staticflickr.com/server3/3_secret3_b.jpg')
])
def test_source_url(input, expected):
assert flickr.source_url(
input[0], input[1], input[2], input[3], input[4]) == expected
@patch('liquid_tags.flickr.urlopen')
def test_generage_html(mock_urlopen):
# mock the return to deliver the flickr.json file instead
with open(TEST_DATA_DIR + '/flickr.json', 'rb') as f:
mock_urlopen.return_value.read.return_value = f.read()
attrs = dict(
photo_id='1234567',
size='large',
alt='this is a test'
)
expected = ('<a href="https://www.flickr.com/photos/'
'marvinxsteadfast/18841055371/">'
'<img src="https://farm6.staticflickr.com/5552/1234567_'
'17ac287217_b.jpg" alt="this is a test"></a>')
assert flickr.generate_html(attrs, 'abcdef') == expected
|
agpl-3.0
|
dustcloud/dustlink
|
views/web/dustWeb/viz/VizForm.py
|
2
|
9108
|
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('VizForm')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import VizjQuery
class VizForm(VizjQuery.VizjQuery):
#======================== header ==========================================
templateHeader = '''
<style type="text/css">
</style>
'''
#======================== body ============================================
templateBody = '''
<script type='text/javascript'>
// wait for the page to be loaded, then create the form (once)
$(document).ready(getData_{VIZID});
//======================= get form ========================================
function getData_{VIZID}() {{
var statusDivId;
// update the status message
statusDivId = 'status_div_{VIZID}';
updateStatus(statusDivId,'busy','');
// get updated data from the server and execute
jQuery.ajax({{
type: 'GET',
url: '/{RESOURCE}/',
timeout: 5*1000,
statusCode: {{
200: function(response) {{
try {{
drawForm_{VIZID}(response);
}} catch(err) {{
throw err;
}}
updateStatus(statusDivId,'success','');
}},
400: function() {{
updateStatus(statusDivId,'failure','Malformed.');
}},
401: function() {{
updateStatus(statusDivId,'failure','Access denied.');
}},
404: function() {{
updateStatus(statusDivId,'failure','Resource not found.');
}},
500: function() {{
updateStatus(statusDivId,'failure','Internal server error.');
}}
}},
error: function(jqXHR, textStatus, errorThrown) {{
if (textStatus=='timeout') {{
updateStatus(statusDivId,'failure','Server unreachable.');
}}
}}
}});
}}
function drawForm_{VIZID}(data) {{
var cells,
thisCell,
fieldId;
// clear old contents
document.getElementById('chart_div_{VIZID}').innerHTML = '';
// draw new table
$('<table/>', {{
'class': 'formTable_{VIZID}'
}}).appendTo('#chart_div_{VIZID}');
for (var i = 0; i < data.length; i++) {{
cells = [];
// name
thisCell = '';
thisCell += '<td>';
thisCell += data[i].name;
thisCell += '</td>';
cells.push(thisCell);
// field
fieldId = 'fieldTable_{VIZID}_'+data[i].name
if (data[i].type=='text') {{
thisCell = '';
thisCell += '<td>';
thisCell += '<input type="text"';
thisCell += ' id="'+fieldId+'"';
thisCell += ' name="'+data[i].name+'"';
thisCell += ' value="'+data[i].value+'"';
thisCell += ' class="formElems_{VIZID}"';
thisCell += '/>';
thisCell += '</td>';
}} else if (data[i].type=='password') {{
thisCell = '';
thisCell += '<td>';
thisCell += '<input type="password"';
thisCell += ' id="'+fieldId+'"';
thisCell += ' name="'+data[i].name+'"';
thisCell += ' value="'+data[i].value+'"';
thisCell += ' class="formElems_{VIZID}"';
thisCell += '/>';
thisCell += '</td>';
}} else if (data[i].type=='boolean') {{
thisCell = '';
thisCell += '<td>';
thisCell += '<input type="checkbox"';
thisCell += ' id="'+fieldId+'"';
thisCell += ' name="'+data[i].name+'"';
thisCell += ' class="formElems_{VIZID}"';
if (data[i].value==true) {{
thisCell += ' checked ';
}}
thisCell += '/>';
thisCell += '</td>';
}} else if (data[i].type=='select') {{
thisCell = '';
thisCell += '<td>';
thisCell += '<select';
thisCell += ' id="'+fieldId+'"';
thisCell += ' name="'+data[i].name+'"';
thisCell += ' class="formElems_{VIZID}"';
thisCell += '>';
for (var optidx = 0; optidx < data[i].optionDisplay.length; optidx++) {{
thisCell += '<option value="'+data[i].optionValue[optidx]+'"';
if (data[i].optionValue[optidx]==data[i].value) {{
thisCell += ' selected="selected"';
}}
thisCell += '>';
thisCell += data[i].optionDisplay[optidx];
thisCell += '</option>';
}}
thisCell += '</select>';
thisCell += '</td>';
}} else {{
thisCell = '';
thisCell += '<td>';
thisCell += 'WARNING unknown type: '+data[i].type;
thisCell += '</td>';
}}
cells.push(thisCell);
// status
thisCell = '';
thisCell += '<td>';
thisCell += '<div id="'+fieldId+'_status"></div>';
thisCell += '</td>';
cells.push(thisCell);
$('<tr/>', {{
html: cells.join('')
}}).appendTo('.formTable_{VIZID}');
}}
$('<tr/>', {{
html: '<button onclick="postFormData_{VIZID}()">Submit</button>'
}}).appendTo('.formTable_{VIZID}');
}}
//======================= post from data ==================================
function postFormData_{VIZID}() {{
var statusDivId,
formElems,
dataToSend,
i,
fieldName,
fieldValue;
// update the status message
statusDivId = 'status_div_{VIZID}';
updateStatus(statusDivId,'busy', '');
// build data to send
formElems = document.getElementsByClassName('formElems_{VIZID}');
dataToSend = {{}};
for (i=0; i<formElems.length; i++) {{
fieldName = formElems[i].name;
if (formElems[i].type=='text') {{
fieldValue = formElems[i].value;
}} else if (formElems[i].type=='password') {{
fieldValue = formElems[i].value;
}} else if (formElems[i].type=='checkbox') {{
fieldValue = formElems[i].checked;
}} else if (formElems[i].type=='select-one') {{
fieldValue = formElems[i].options[formElems[i].selectedIndex].value;
}} else {{
console.log('WARNING: in post, unexpected type '+formElems[i].type);
}}
dataToSend[fieldName] = fieldValue;
}}
jQuery.ajax({{
type: 'POST',
url: '/{RESOURCE}/',
timeout: 5*1000,
data: JSON.stringify(dataToSend),
statusCode: {{
200: function() {{
updateStatus(statusDivId,'success', '');
location.reload();
}},
400: function() {{
updateStatus(statusDivId,'failure','Malformed.');
}},
401: function() {{
updateStatus(statusDivId,'failure','Access denied.');
}},
404: function() {{
updateStatus(statusDivId,'failure','Resource not found.');
}},
500: function() {{
updateStatus(statusDivId,'failure','Internal server error.');
}}
}},
error: function(jqXHR, textStatus, errorThrown) {{
if (textStatus=='timeout') {{
updateStatus(statusDivId,'failure','Server unreachable.');
}}
}}
}});
}}
</script>
'''
def __init__(self, **kw):
super(VizForm, self).__init__(forbidAutorefresh=True, autorefresh=False, **kw)
|
bsd-3-clause
|
nvoron23/avos
|
horizon/utils/filters.py
|
86
|
2017
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import iso8601
from django.template.defaultfilters import register # noqa
from django.template.defaultfilters import timesince # noqa
from django.utils.safestring import mark_safe
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
@register.filter
def replace_underscores(string):
return string.replace("_", " ")
@register.filter
def parse_isotime(timestr, default=None):
"""This duplicates oslo timeutils parse_isotime but with a
@register.filter annotation and a silent fallback on error.
"""
try:
return iso8601.parse_date(timestr)
except (iso8601.ParseError, TypeError):
return default or ''
@register.filter
def timesince_or_never(dt, default=None):
"""Call the Django ``timesince`` filter, but return the string
*default* if *dt* is not a valid ``date`` or ``datetime`` object.
When *default* is None, "Never" is returned.
"""
if default is None:
default = _("Never")
if isinstance(dt, datetime.date):
return timesince(dt)
else:
return default
@register.filter
def timesince_sortable(dt):
delta = timezone.now() - dt
# timedelta.total_seconds() not supported on python < 2.7
seconds = delta.seconds + (delta.days * 24 * 3600)
return mark_safe("<span data-seconds=\"%d\">%s</span>" %
(seconds, timesince(dt)))
|
apache-2.0
|
barnabytprowe/great3-public
|
validation/plot_variable_submission.py
|
2
|
3710
|
#!/usr/bin/env python
# Copyright (c) 2014, the GREAT3 executive committee (http://www.great3challenge.info/?q=contacts)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""@file plot_variable_submission.py
Handy command line executable script for plotting up a GREAT3 variable shear submission.
"""
# Constants
NFIELDS = 10
NBINS_THETA = 15
YLIM_EMODE = 2.e-5
YLIM_BMODE = 2.e-5
def plot(submission_filename, output_filename, nfields=NFIELDS, nbins_theta=NBINS_THETA,
ylim_emode=YLIM_EMODE, ylim_bmode=YLIM_BMODE):
"""Plot a submission.
"""
import numpy as np
import matplotlib.pyplot as plt
# Load the data from the input submission
data = np.loadtxt(submission_filename)
field, theta, map_E, map_B, maperr = (
data[:, 0].astype(int), data[:, 1], data[:, 2], data[:, 3], data[:, 4])
# Then plot (largely borrowed from the code in server/great3/evaluate.py)
plt.figure(figsize=(10, 8))
plt.subplot(211)
for ifield in range(nfields):
plt.semilogx(
theta[ifield * nbins_theta: (ifield + 1) * nbins_theta],
map_E[ifield * nbins_theta: (ifield + 1) * nbins_theta], label="Field "+str(ifield))
plt.ylim(-ylim_emode, ylim_emode)
plt.title(submission_filename+" E-mode")
plt.ylabel("Ap. Mass Dispersion")
plt.axhline(ls="--", color="k")
plt.legend()
plt.subplot(212)
for ifield in range(nfields):
plt.semilogx(
theta[ifield * nbins_theta: (ifield + 1) * nbins_theta],
map_B[ifield * nbins_theta: (ifield + 1) * nbins_theta], label="Field "+str(ifield))
plt.ylim(-ylim_bmode, ylim_bmode)
plt.title(submission_filename+" B-mode")
plt.xlabel("Theta [degrees]")
plt.ylabel("Ap. Mass Dispersion")
plt.axhline(ls="--", color="k")
plt.legend()
plt.savefig(output_filename)
return
if __name__ == "__main__":
import sys
# Get the input and output filenames from the command line
if len(sys.argv) != 3:
print "plot_variable_submission.py"
print "usage: ./plot_variable_submission.py input_submission output_filename"
sys.exit(1)
submission_filename = sys.argv[1]
output_filename = sys.argv[2]
plot(submission_filename, output_filename)
|
bsd-3-clause
|
morreene/tradenews
|
venv/Lib/encodings/undefined.py
|
860
|
1299
|
""" Python 'undefined' Codec
This codec will always raise a ValueError exception when being
used. It is intended for use by the site.py file to switch off
automatic string to Unicode coercion.
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
raise UnicodeError("undefined encoding")
def decode(self,input,errors='strict'):
raise UnicodeError("undefined encoding")
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
raise UnicodeError("undefined encoding")
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
raise UnicodeError("undefined encoding")
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='undefined',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
|
bsd-3-clause
|
dilee/carbon-device-mgt-plugins
|
features/device-types-feature/raspberrypi-plugin-feature/org.wso2.carbon.device.mgt.iot.raspberrypi.backend.feature/src/main/resources/agent/src/httpServer.py
|
23
|
4485
|
#!/usr/bin/env python
"""
/**
* Copyright (c) 2015, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
**/
"""
import time
import BaseHTTPServer
import iotUtils
import running_mode
import os
import subprocess
import re
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Class that handles HTTP GET requests for operations on the RPi
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class OnRequestListener(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(request):
# """Respond to a GET request."""
if not processURLPath(request.path):
return
print request.path.split("/")[1].upper()
resource = request.path.split("/")[1].upper()
state = request.path.split("/")[2].upper()
print "HTTP_SERVER: Resource - " + resource
if resource == "TEMPERATURE":
request.send_response(200)
request.send_header('Content-Type', 'application/json')
request.send_header('Authorization', 'Bearer ' + iotUtils.AUTH_TOKEN)
request.end_headers()
request.wfile.write(iotUtils.LAST_TEMP)
elif resource == "BULB":
iotUtils.switchBulb(state)
print "HTTP_SERVER: Requested Switch State - " + state
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Check the URL string of the request and validate
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def processURLPath(path):
if path.count("/") != 2 and not "favicon" in path:
print "HTTP_SERVER: Invalid URL String: " + path
return False
resource = path.split("/")[1]
if not iequal("BULB", resource) and not iequal("TEMPERATURE", resource):
if not "favicon" in resource:
print "HTTP_SERVER: Invalid resource - " + resource + " to execute operation"
return False
return True
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Case-Insensitive check on whether two string are similar
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def iequal(a, b):
try:
return a.upper() == b.upper()
except AttributeError:
return a == b
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# The Main method of the server script
# This method is invoked from RaspberryStats.py on a new thread
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def main():
HOST_NAME = iotUtils.getDeviceIP()
HTTP_SERVER_PORT = iotUtils.getHTTPServerPort()
server_class = BaseHTTPServer.HTTPServer
while True:
try:
httpd = server_class((HOST_NAME, HTTP_SERVER_PORT), OnRequestListener)
print "HTTP_SERVER: " + time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, HTTP_SERVER_PORT)
httpd.serve_forever()
except (KeyboardInterrupt, Exception) as e:
print "HTTP_SERVER: Exception in HttpServerThread (either KeyboardInterrupt or Other)"
print ("HTTP_SERVER: " + str(e))
if running_mode.RUNNING_MODE == "N":
iotUtils.switchBulb("OFF")
else :
iotUtils.switchBulb("OFF")
httpd.server_close()
print "HTTP_SERVER: " + time.asctime(), "Server Stops - %s:%s" % (HOST_NAME, HTTP_SERVER_PORT)
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
pass
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if __name__ == '__main__':
main()
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.